import { Head, Link, usePage } from '@inertiajs/react' import { useState, useEffect, useRef } from 'react' import SettingsLayout from '~/layouts/SettingsLayout' import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' import CircularGauge from '~/components/systeminfo/CircularGauge' import InfoCard from '~/components/systeminfo/InfoCard' import Alert from '~/components/Alert' import StyledButton from '~/components/StyledButton' import InfoTooltip from '~/components/InfoTooltip' import BuilderTagSelector from '~/components/BuilderTagSelector' import { IconRobot, IconChartBar, IconCpu, IconDatabase, IconServer, IconChevronDown, IconClock, } from '@tabler/icons-react' import { useTransmit } from 'react-adonis-transmit' import { BenchmarkProgress, BenchmarkStatus } from '../../../types/benchmark' import BenchmarkResult from '#models/benchmark_result' import api from '~/lib/api' import useServiceInstalledStatus from '~/hooks/useServiceInstalledStatus' import { SERVICE_NAMES } from '../../../constants/service_names' import { BROADCAST_CHANNELS } from '../../../constants/broadcast' type BenchmarkProgressWithID = BenchmarkProgress & { benchmark_id: string } export default function BenchmarkPage(props: { benchmark: { latestResult: BenchmarkResult | null status: BenchmarkStatus currentBenchmarkId: string | null } }) { const { aiAssistantName } = usePage<{ aiAssistantName: string }>().props const { subscribe } = useTransmit() const queryClient = useQueryClient() const aiInstalled = useServiceInstalledStatus(SERVICE_NAMES.OLLAMA) const [progress, setProgress] = useState(null) const [isRunning, setIsRunning] = useState(props.benchmark.status !== 'idle') const refetchLatestRef = useRef<(() => void) | null>(null) const [showDetails, setShowDetails] = useState(false) const [showHistory, setShowHistory] = useState(false) const [showAIRequiredAlert, setShowAIRequiredAlert] = useState(false) const [shareAnonymously, setShareAnonymously] = useState(false) const [currentBuilderTag, setCurrentBuilderTag] = useState( props.benchmark.latestResult?.builder_tag || null ) // Fetch latest result const { data: latestResult, refetch: refetchLatest } = useQuery({ queryKey: ['benchmark', 'latest'], queryFn: async () => { const res = await api.getLatestBenchmarkResult() if (res && res.result) { return res.result } return null }, initialData: props.benchmark.latestResult, }) refetchLatestRef.current = refetchLatest // Fetch all benchmark results for history const { data: benchmarkHistory } = useQuery({ queryKey: ['benchmark', 'history'], queryFn: async () => { const res = await api.getBenchmarkResults() if (res && res.results && Array.isArray(res.results)) { return res.results } return [] }, }) // Run benchmark mutation (uses sync mode by default for simpler local dev) const runBenchmark = useMutation({ mutationFn: async (type: 'full' | 'system' | 'ai') => { setIsRunning(true) setProgress({ status: 'starting', progress: 5, message: 'Starting benchmark... This takes 2-5 minutes.', current_stage: 'Starting', benchmark_id: '', timestamp: new Date().toISOString(), }) // Use sync mode - runs inline without needing Redis/queue worker return await api.runBenchmark(type, true) }, onSuccess: (data) => { if (data?.success) { setProgress({ status: 'completed', progress: 100, message: 'Benchmark completed!', current_stage: 'Complete', benchmark_id: data.benchmark_id, timestamp: new Date().toISOString(), }) refetchLatest() } else { setProgress({ status: 'error', progress: 0, message: 'Benchmark failed', current_stage: 'Error', benchmark_id: '', timestamp: new Date().toISOString(), }) } setIsRunning(false) }, onError: (error) => { setProgress({ status: 'error', progress: 0, message: error.message || 'Benchmark failed', current_stage: 'Error', benchmark_id: '', timestamp: new Date().toISOString(), }) setIsRunning(false) }, }) // Update builder tag mutation const updateBuilderTag = useMutation({ mutationFn: async ({ benchmarkId, builderTag, }: { benchmarkId: string builderTag: string invalidate?: boolean }) => { const res = await api.updateBuilderTag(benchmarkId, builderTag) if (!res || !res.success) { throw new Error(res?.error || 'Failed to update builder tag') } return res }, onSuccess: (_, variables) => { if (variables.invalidate) { refetchLatest() queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) } }, }) // Submit to repository mutation const [submitError, setSubmitError] = useState(null) const submitResult = useMutation({ mutationFn: async ({ benchmarkId, anonymous }: { benchmarkId: string; anonymous: boolean }) => { setSubmitError(null) // First, save the current builder tag to the benchmark (don't refetch yet) if (currentBuilderTag && !anonymous) { await updateBuilderTag.mutateAsync({ benchmarkId, builderTag: currentBuilderTag, invalidate: false, }) } const res = await api.submitBenchmark(benchmarkId, anonymous) if (!res || !res.success) { throw new Error(res?.error || 'Failed to submit benchmark') } return res }, onSuccess: () => { refetchLatest() queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) }, onError: (error: any) => { // Check if this is a 409 Conflict error (already submitted) if (error.status === 409) { setSubmitError('A benchmark for this system with the same or higher score has already been submitted.') } else { setSubmitError(error.message) } }, }) // Check if the latest result is a full benchmark with AI data (eligible for sharing) const canShareBenchmark = latestResult && latestResult.benchmark_type === 'full' && latestResult.ai_tokens_per_second !== null && latestResult.ai_tokens_per_second > 0 && !latestResult.submitted_to_repository // Handle Full Benchmark click with pre-flight check const handleFullBenchmarkClick = () => { if (!aiInstalled) { setShowAIRequiredAlert(true) return } setShowAIRequiredAlert(false) runBenchmark.mutate('full') } // Simulate progress during sync benchmark (since we don't get SSE updates) useEffect(() => { if (!isRunning || progress?.status === 'completed' || progress?.status === 'error') return const stages: { status: BenchmarkStatus progress: number message: string label: string duration: number }[] = [ { status: 'detecting_hardware', progress: 10, message: 'Detecting system hardware...', label: 'Detecting Hardware', duration: 2000, }, { status: 'running_cpu', progress: 25, message: 'Running CPU benchmark (30s)...', label: 'CPU Benchmark', duration: 32000, }, { status: 'running_memory', progress: 40, message: 'Running memory benchmark...', label: 'Memory Benchmark', duration: 8000, }, { status: 'running_disk_read', progress: 55, message: 'Running disk read benchmark (30s)...', label: 'Disk Read Test', duration: 35000, }, { status: 'running_disk_write', progress: 70, message: 'Running disk write benchmark (30s)...', label: 'Disk Write Test', duration: 35000, }, { status: 'downloading_ai_model', progress: 80, message: 'Downloading AI benchmark model (first run only)...', label: 'Downloading AI Model', duration: 5000, }, { status: 'running_ai', progress: 85, message: 'Running AI inference benchmark...', label: 'AI Inference Test', duration: 15000, }, { status: 'calculating_score', progress: 95, message: 'Calculating NOMAD score...', label: 'Calculating Score', duration: 2000, }, ] let currentStage = 0 const advanceStage = () => { if (currentStage < stages.length && isRunning) { const stage = stages[currentStage] setProgress({ status: stage.status, progress: stage.progress, message: stage.message, current_stage: stage.label, benchmark_id: '', timestamp: new Date().toISOString(), }) currentStage++ } } // Start the first stage after a short delay const timers: NodeJS.Timeout[] = [] let elapsed = 1000 stages.forEach((stage) => { timers.push(setTimeout(() => advanceStage(), elapsed)) elapsed += stage.duration }) return () => { timers.forEach((t) => clearTimeout(t)) } }, [isRunning]) // Listen for benchmark progress via SSE (backup for async mode) useEffect(() => { const unsubscribe = subscribe(BROADCAST_CHANNELS.BENCHMARK_PROGRESS, (data: BenchmarkProgressWithID) => { setProgress(data) if (data.status === 'completed' || data.status === 'error') { setIsRunning(false) refetchLatestRef.current?.() } }) return () => { unsubscribe() } // eslint-disable-next-line react-hooks/exhaustive-deps }, [subscribe]) const formatBytes = (bytes: number) => { const gb = bytes / (1024 * 1024 * 1024) return `${gb.toFixed(1)} GB` } const getScoreColor = (score: number) => { if (score >= 70) return 'text-green-600' if (score >= 40) return 'text-yellow-600' return 'text-red-600' } const getProgressPercent = () => { if (!progress) return 0 const stages: Record = { idle: 0, starting: 5, detecting_hardware: 10, running_cpu: 25, running_memory: 40, running_disk_read: 55, running_disk_write: 70, downloading_ai_model: 80, running_ai: 85, calculating_score: 95, completed: 100, error: 0, } return stages[progress.status] || 0 } // Calculate AI score from tokens per second (normalized to 0-100) // Reference: 30 tok/s = 50 score, 60 tok/s = 100 score const getAIScore = (tokensPerSecond: number | null): number => { if (!tokensPerSecond) return 0 const score = (tokensPerSecond / 60) * 100 return Math.min(100, Math.max(0, score)) } return (

System Benchmark

Measure your server's performance and compare with the NOMAD community

{/* Run Benchmark Section */}

Run Benchmark

{isRunning ? (
{progress?.current_stage || 'Running benchmark...'}

{progress?.message}

) : (
{progress?.status === 'error' && ( setProgress(null)} /> )} {showAIRequiredAlert && ( setShowAIRequiredAlert(false)} > Go to Apps to install {aiAssistantName} → )}

Run a benchmark to measure your system's CPU, memory, disk, and AI inference performance. The benchmark takes approximately 2-5 minutes to complete.

Run Full Benchmark runBenchmark.mutate('system')} disabled={runBenchmark.isPending} icon="IconCpu" > System Only runBenchmark.mutate('ai')} disabled={runBenchmark.isPending || !aiInstalled} icon="IconWand" title={ !aiInstalled ? `${aiAssistantName} must be installed to run AI benchmark` : undefined } > AI Only
{!aiInstalled && (

Note: {aiAssistantName} is not installed. Install it {' '} to run full benchmarks and share results with the community.

)}
)}
{/* Results Section */} {latestResult && ( <>

NOMAD Score

} />
{latestResult.nomad_score.toFixed(1)}

Your NOMAD Score is a weighted composite of all benchmark results.

{/* Share with Community - Only for full benchmarks with AI data */} {canShareBenchmark && (

Share with Community

Share your benchmark on the community leaderboard. Choose a Builder Tag to claim your spot, or share anonymously.

{/* Builder Tag Selector */}
{/* Anonymous checkbox */} submitResult.mutate({ benchmarkId: latestResult.benchmark_id, anonymous: shareAnonymously, }) } disabled={submitResult.isPending} icon="IconCloudUpload" > {submitResult.isPending ? 'Submitting...' : 'Share with Community'} {submitError && ( setSubmitError(null)} /> )}
)} {/* Show message for partial benchmarks */} {latestResult && !latestResult.submitted_to_repository && !canShareBenchmark && ( )} {latestResult.submitted_to_repository && ( View the leaderboard → )}

System Performance

} />
} />
} />
} />
{/* AI Performance Section */}

AI Performance

{latestResult.ai_tokens_per_second ? (
} />
{latestResult.ai_tokens_per_second.toFixed(1)}
Tokens per Second
{latestResult.ai_time_to_first_token?.toFixed(0) || 'N/A'} ms
Time to First Token
) : (

No AI Benchmark Data

Run a Full Benchmark or AI Only benchmark to measure AI inference performance.

)}

Hardware Information

} variant="elevated" data={[ { label: 'Model', value: latestResult.cpu_model }, { label: 'Cores', value: latestResult.cpu_cores }, { label: 'Threads', value: latestResult.cpu_threads }, ]} /> } variant="elevated" data={[ { label: 'RAM', value: formatBytes(latestResult.ram_bytes) }, { label: 'Disk Type', value: latestResult.disk_type.toUpperCase() }, { label: 'GPU', value: latestResult.gpu_model || 'Not detected' }, ]} />

Benchmark Details

{/* Summary row - always visible */} {/* Expanded details */} {showDetails && (
{/* Raw Scores */}

Raw Scores

CPU Score {(latestResult.cpu_score * 100).toFixed(1)}%
Memory Score {(latestResult.memory_score * 100).toFixed(1)}%
Disk Read Score {(latestResult.disk_read_score * 100).toFixed(1)}%
Disk Write Score {(latestResult.disk_write_score * 100).toFixed(1)}%
{latestResult.ai_tokens_per_second && ( <>
AI Tokens/sec {latestResult.ai_tokens_per_second.toFixed(1)}
AI Time to First Token {latestResult.ai_time_to_first_token?.toFixed(0) || 'N/A'} ms
)}
{/* Benchmark Info */}

Benchmark Info

Full Benchmark ID {latestResult.benchmark_id}
Benchmark Type {latestResult.benchmark_type}
Run Date {new Date( latestResult.created_at as unknown as string ).toLocaleString()}
Builder Tag {latestResult.builder_tag || 'Not set'}
{latestResult.ai_model_used && (
AI Model Used {latestResult.ai_model_used}
)}
Submitted to Repository {latestResult.submitted_to_repository ? 'Yes' : 'No'}
{latestResult.repository_id && (
Repository ID {latestResult.repository_id}
)}
)}
{/* Benchmark History */} {benchmarkHistory && benchmarkHistory.length > 1 && (

Benchmark History

{showHistory && (
{benchmarkHistory.map((result) => ( ))}
Date Type Score Builder Tag Shared
{new Date( result.created_at as unknown as string ).toLocaleDateString()} {result.benchmark_type} {result.nomad_score.toFixed(1)} {result.builder_tag || '—'} {result.submitted_to_repository ? ( ) : ( )}
)}
)} )} {!latestResult && !isRunning && ( )}
) }