import { Head, Link, usePage } from '@inertiajs/react' import { useState, useEffect, useRef } from 'react' import { useTranslation } from 'react-i18next' import SettingsLayout from '~/layouts/SettingsLayout' import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' import CircularGauge from '~/components/systeminfo/CircularGauge' import InfoCard from '~/components/systeminfo/InfoCard' import Alert from '~/components/Alert' import StyledButton from '~/components/StyledButton' import InfoTooltip from '~/components/InfoTooltip' import BuilderTagSelector from '~/components/BuilderTagSelector' import { IconRobot, IconChartBar, IconCpu, IconDatabase, IconServer, IconChevronDown, IconClock, } from '@tabler/icons-react' import { useTransmit } from 'react-adonis-transmit' import { BenchmarkProgress, BenchmarkStatus } from '../../../types/benchmark' import BenchmarkResult from '#models/benchmark_result' import api from '~/lib/api' import useServiceInstalledStatus from '~/hooks/useServiceInstalledStatus' import { SERVICE_NAMES } from '../../../constants/service_names' import { BROADCAST_CHANNELS } from '../../../constants/broadcast' type BenchmarkProgressWithID = BenchmarkProgress & { benchmark_id: string } export default function BenchmarkPage(props: { benchmark: { latestResult: BenchmarkResult | null status: BenchmarkStatus currentBenchmarkId: string | null } }) { const { t } = useTranslation() const { aiAssistantName } = usePage<{ aiAssistantName: string }>().props const { subscribe } = useTransmit() const queryClient = useQueryClient() const aiInstalled = useServiceInstalledStatus(SERVICE_NAMES.OLLAMA) const [progress, setProgress] = useState(null) const [isRunning, setIsRunning] = useState(props.benchmark.status !== 'idle') const refetchLatestRef = useRef<(() => void) | null>(null) const [showDetails, setShowDetails] = useState(false) const [showHistory, setShowHistory] = useState(false) const [showAIRequiredAlert, setShowAIRequiredAlert] = useState(false) const [shareAnonymously, setShareAnonymously] = useState(false) const [currentBuilderTag, setCurrentBuilderTag] = useState( props.benchmark.latestResult?.builder_tag || null ) // Fetch latest result const { data: latestResult, refetch: refetchLatest } = useQuery({ queryKey: ['benchmark', 'latest'], queryFn: async () => { const res = await api.getLatestBenchmarkResult() if (res && res.result) { return res.result } return null }, initialData: props.benchmark.latestResult, }) refetchLatestRef.current = refetchLatest // Fetch all benchmark results for history const { data: benchmarkHistory } = useQuery({ queryKey: ['benchmark', 'history'], queryFn: async () => { const res = await api.getBenchmarkResults() if (res && res.results && Array.isArray(res.results)) { return res.results } return [] }, }) // Run benchmark mutation (uses sync mode by default for simpler local dev) const runBenchmark = useMutation({ mutationFn: async (type: 'full' | 'system' | 'ai') => { setIsRunning(true) setProgress({ status: 'starting', progress: 5, message: t('benchmark.progress.starting'), current_stage: t('benchmark.stages.starting'), benchmark_id: '', timestamp: new Date().toISOString(), }) // Use sync mode - runs inline without needing Redis/queue worker return await api.runBenchmark(type, true) }, onSuccess: (data) => { if (data?.success) { setProgress({ status: 'completed', progress: 100, message: t('benchmark.progress.completed'), current_stage: t('benchmark.stages.complete'), benchmark_id: data.benchmark_id, timestamp: new Date().toISOString(), }) refetchLatest() } else { setProgress({ status: 'error', progress: 0, message: t('benchmark.progress.failed'), current_stage: t('benchmark.stages.error'), benchmark_id: '', timestamp: new Date().toISOString(), }) } setIsRunning(false) }, onError: (error) => { setProgress({ status: 'error', progress: 0, message: error.message || t('benchmark.progress.failed'), current_stage: t('benchmark.stages.error'), benchmark_id: '', timestamp: new Date().toISOString(), }) setIsRunning(false) }, }) // Update builder tag mutation const updateBuilderTag = useMutation({ mutationFn: async ({ benchmarkId, builderTag, }: { benchmarkId: string builderTag: string invalidate?: boolean }) => { const res = await api.updateBuilderTag(benchmarkId, builderTag) if (!res || !res.success) { throw new Error(res?.error || 'Failed to update builder tag') } return res }, onSuccess: (_, variables) => { if (variables.invalidate) { refetchLatest() queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) } }, }) // Submit to repository mutation const [submitError, setSubmitError] = useState(null) const submitResult = useMutation({ mutationFn: async ({ benchmarkId, anonymous }: { benchmarkId: string; anonymous: boolean }) => { setSubmitError(null) // First, save the current builder tag to the benchmark (don't refetch yet) if (currentBuilderTag && !anonymous) { await updateBuilderTag.mutateAsync({ benchmarkId, builderTag: currentBuilderTag, invalidate: false, }) } const res = await api.submitBenchmark(benchmarkId, anonymous) if (!res || !res.success) { throw new Error(res?.error || 'Failed to submit benchmark') } return res }, onSuccess: () => { refetchLatest() queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) }, onError: (error: any) => { // Check if this is a 409 Conflict error (already submitted) if (error.status === 409) { setSubmitError(t('benchmark.alreadySubmitted')) } else { setSubmitError(error.message) } }, }) // Check if the latest result is a full benchmark with AI data (eligible for sharing) const canShareBenchmark = latestResult && latestResult.benchmark_type === 'full' && latestResult.ai_tokens_per_second !== null && latestResult.ai_tokens_per_second > 0 && !latestResult.submitted_to_repository // Handle Full Benchmark click with pre-flight check const handleFullBenchmarkClick = () => { if (!aiInstalled) { setShowAIRequiredAlert(true) return } setShowAIRequiredAlert(false) runBenchmark.mutate('full') } // Simulate progress during sync benchmark (since we don't get SSE updates) useEffect(() => { if (!isRunning || progress?.status === 'completed' || progress?.status === 'error') return const stages: { status: BenchmarkStatus progress: number message: string label: string duration: number }[] = [ { status: 'detecting_hardware', progress: 10, message: t('benchmark.progress.detectingHardware'), label: t('benchmark.stages.detectingHardware'), duration: 2000, }, { status: 'running_cpu', progress: 25, message: t('benchmark.progress.runningCpu'), label: t('benchmark.stages.cpuBenchmark'), duration: 32000, }, { status: 'running_memory', progress: 40, message: t('benchmark.progress.runningMemory'), label: t('benchmark.stages.memoryBenchmark'), duration: 8000, }, { status: 'running_disk_read', progress: 55, message: t('benchmark.progress.runningDiskRead'), label: t('benchmark.stages.diskReadTest'), duration: 35000, }, { status: 'running_disk_write', progress: 70, message: t('benchmark.progress.runningDiskWrite'), label: t('benchmark.stages.diskWriteTest'), duration: 35000, }, { status: 'downloading_ai_model', progress: 80, message: t('benchmark.progress.downloadingAiModel'), label: t('benchmark.stages.downloadingAiModel'), duration: 5000, }, { status: 'running_ai', progress: 85, message: t('benchmark.progress.runningAi'), label: t('benchmark.stages.aiInferenceTest'), duration: 15000, }, { status: 'calculating_score', progress: 95, message: t('benchmark.progress.calculatingScore'), label: t('benchmark.stages.calculatingScore'), duration: 2000, }, ] let currentStage = 0 const advanceStage = () => { if (currentStage < stages.length && isRunning) { const stage = stages[currentStage] setProgress({ status: stage.status, progress: stage.progress, message: stage.message, current_stage: stage.label, benchmark_id: '', timestamp: new Date().toISOString(), }) currentStage++ } } // Start the first stage after a short delay const timers: NodeJS.Timeout[] = [] let elapsed = 1000 stages.forEach((stage) => { timers.push(setTimeout(() => advanceStage(), elapsed)) elapsed += stage.duration }) return () => { timers.forEach((t) => clearTimeout(t)) } }, [isRunning]) // Listen for benchmark progress via SSE (backup for async mode) useEffect(() => { const unsubscribe = subscribe(BROADCAST_CHANNELS.BENCHMARK_PROGRESS, (data: BenchmarkProgressWithID) => { setProgress(data) if (data.status === 'completed' || data.status === 'error') { setIsRunning(false) refetchLatestRef.current?.() } }) return () => { unsubscribe() } // eslint-disable-next-line react-hooks/exhaustive-deps }, [subscribe]) const formatBytes = (bytes: number) => { const gb = bytes / (1024 * 1024 * 1024) return `${gb.toFixed(1)} GB` } const getScoreColor = (score: number) => { if (score >= 70) return 'text-green-600' if (score >= 40) return 'text-yellow-600' return 'text-red-600' } const getProgressPercent = () => { if (!progress) return 0 const stages: Record = { idle: 0, starting: 5, detecting_hardware: 10, running_cpu: 25, running_memory: 40, running_disk_read: 55, running_disk_write: 70, downloading_ai_model: 80, running_ai: 85, calculating_score: 95, completed: 100, error: 0, } return stages[progress.status] || 0 } // Calculate AI score from tokens per second (normalized to 0-100) // Reference: 30 tok/s = 50 score, 60 tok/s = 100 score const getAIScore = (tokensPerSecond: number | null): number => { if (!tokensPerSecond) return 0 const score = (tokensPerSecond / 60) * 100 return Math.min(100, Math.max(0, score)) } return (

{t('benchmark.heading')}

{t('benchmark.description')}

{/* Run Benchmark Section */}

{t('benchmark.runBenchmark')}

{isRunning ? (
{progress?.current_stage || t('benchmark.runningBenchmark')}

{progress?.message}

) : (
{progress?.status === 'error' && ( setProgress(null)} /> )} {showAIRequiredAlert && ( setShowAIRequiredAlert(false)} > {t('benchmark.goToApps', { name: aiAssistantName })} )}

{t('benchmark.benchmarkDescription')}

{t('benchmark.runFullBenchmark')} runBenchmark.mutate('system')} disabled={runBenchmark.isPending} icon="IconCpu" > {t('benchmark.systemOnly')} runBenchmark.mutate('ai')} disabled={runBenchmark.isPending || !aiInstalled} icon="IconWand" title={ !aiInstalled ? t('benchmark.aiOnlyTooltip', { name: aiAssistantName }) : undefined } > {t('benchmark.aiOnly')}
{!aiInstalled && (

Note: {t('benchmark.aiNotInstalledNote', { name: aiAssistantName })} {t('benchmark.installIt')} {' '} {t('benchmark.aiNotInstalledSuffix')}

)}
)}
{/* Results Section */} {latestResult && ( <>

{t('benchmark.nomadScore')}

} />
{latestResult.nomad_score.toFixed(1)}

{t('benchmark.nomadScoreDescription')}

{/* Share with Community - Only for full benchmarks with AI data */} {canShareBenchmark && (

{t('benchmark.shareWithCommunity')}

{t('benchmark.shareDescription')}

{/* Builder Tag Selector */}
{/* Anonymous checkbox */} submitResult.mutate({ benchmarkId: latestResult.benchmark_id, anonymous: shareAnonymously, }) } disabled={submitResult.isPending} icon="IconCloudUpload" > {submitResult.isPending ? t('benchmark.submitting') : t('benchmark.shareButton')} {submitError && ( setSubmitError(null)} /> )}
)} {/* Show message for partial benchmarks */} {latestResult && !latestResult.submitted_to_repository && !canShareBenchmark && ( )} {latestResult.submitted_to_repository && ( {t('benchmark.viewLeaderboard')} )}

{t('benchmark.systemPerformance')}

} />
} />
} />
} />
{/* AI Performance Section */}

{t('benchmark.aiPerformance')}

{latestResult.ai_tokens_per_second ? (
} />
{latestResult.ai_tokens_per_second.toFixed(1)}
{t('benchmark.tokensPerSecond')}
{latestResult.ai_time_to_first_token?.toFixed(0) || 'N/A'} ms
{t('benchmark.timeToFirstToken')}
) : (

{t('benchmark.noAIData')}

{t('benchmark.noAIDataMessage')}

)}

{t('benchmark.hardwareInformation')}

} variant="elevated" data={[ { label: t('benchmark.modelLabel'), value: latestResult.cpu_model }, { label: t('benchmark.cores'), value: latestResult.cpu_cores }, { label: t('benchmark.threads'), value: latestResult.cpu_threads }, ]} /> } variant="elevated" data={[ { label: t('benchmark.ram'), value: formatBytes(latestResult.ram_bytes) }, { label: t('benchmark.diskType'), value: latestResult.disk_type.toUpperCase() }, { label: t('benchmark.gpu'), value: latestResult.gpu_model || t('benchmark.notDetected') }, ]} />

{t('benchmark.benchmarkDetails')}

{/* Summary row - always visible */} {/* Expanded details */} {showDetails && (
{/* Raw Scores */}

{t('benchmark.rawScores')}

{t('benchmark.cpuScore')} {(latestResult.cpu_score * 100).toFixed(1)}%
{t('benchmark.memoryScore')} {(latestResult.memory_score * 100).toFixed(1)}%
{t('benchmark.diskReadScore')} {(latestResult.disk_read_score * 100).toFixed(1)}%
{t('benchmark.diskWriteScore')} {(latestResult.disk_write_score * 100).toFixed(1)}%
{latestResult.ai_tokens_per_second && ( <>
{t('benchmark.aiTokensSec')} {latestResult.ai_tokens_per_second.toFixed(1)}
{t('benchmark.aiTimeToFirstToken')} {latestResult.ai_time_to_first_token?.toFixed(0) || 'N/A'} ms
)}
{/* Benchmark Info */}

{t('benchmark.benchmarkInfo')}

{t('benchmark.fullBenchmarkId')} {latestResult.benchmark_id}
{t('benchmark.benchmarkType')} {latestResult.benchmark_type}
{t('benchmark.runDate')} {new Date( latestResult.created_at as unknown as string ).toLocaleString()}
{t('benchmark.builderTag')} {latestResult.builder_tag || t('benchmark.notSet')}
{latestResult.ai_model_used && (
{t('benchmark.aiModelUsed')} {latestResult.ai_model_used}
)}
{t('benchmark.submittedToRepository')} {latestResult.submitted_to_repository ? t('benchmark.yes') : t('benchmark.no')}
{latestResult.repository_id && (
{t('benchmark.repositoryId')} {latestResult.repository_id}
)}
)}
{/* Benchmark History */} {benchmarkHistory && benchmarkHistory.length > 1 && (

{t('benchmark.benchmarkHistory')}

{showHistory && (
{benchmarkHistory.map((result) => ( ))}
{t('benchmark.date')} {t('benchmark.type')} {t('benchmark.score')} {t('benchmark.builderTag')} {t('benchmark.shared')}
{new Date( result.created_at as unknown as string ).toLocaleDateString()} {result.benchmark_type} {result.nomad_score.toFixed(1)} {result.builder_tag || '—'} {result.submitted_to_repository ? ( ) : ( )}
)}
)} )} {!latestResult && !isRunning && ( )}
) }