diff --git a/admin/app/controllers/benchmark_controller.ts b/admin/app/controllers/benchmark_controller.ts index 19c5b94..58eaf18 100644 --- a/admin/app/controllers/benchmark_controller.ts +++ b/admin/app/controllers/benchmark_controller.ts @@ -169,9 +169,10 @@ export default class BenchmarkController { */ async submit({ request, response }: HttpContext) { const payload = await request.validateUsing(submitBenchmarkValidator) + const anonymous = request.input('anonymous') === true || request.input('anonymous') === 'true' try { - const submitResult = await this.benchmarkService.submitToRepository(payload.benchmark_id) + const submitResult = await this.benchmarkService.submitToRepository(payload.benchmark_id, anonymous) return response.send({ success: true, repository_id: submitResult.repository_id, @@ -185,6 +186,48 @@ export default class BenchmarkController { } } + /** + * Update builder tag for a benchmark result + */ + async updateBuilderTag({ request, response }: HttpContext) { + const benchmarkId = request.input('benchmark_id') + const builderTag = request.input('builder_tag') + + if (!benchmarkId) { + return response.status(400).send({ + success: false, + error: 'benchmark_id is required', + }) + } + + const result = await this.benchmarkService.getResultById(benchmarkId) + if (!result) { + return response.status(404).send({ + success: false, + error: 'Benchmark result not found', + }) + } + + // Validate builder tag format if provided + if (builderTag) { + const tagPattern = /^[A-Za-z]+-[A-Za-z]+-\d{4}$/ + if (!tagPattern.test(builderTag)) { + return response.status(400).send({ + success: false, + error: 'Invalid builder tag format. Expected: Word-Word-0000', + }) + } + } + + result.builder_tag = builderTag || null + await result.save() + + return response.send({ + success: true, + builder_tag: result.builder_tag, + }) + } + /** * Get comparison stats from central repository */ diff --git a/admin/app/models/benchmark_result.ts b/admin/app/models/benchmark_result.ts index 8aaa8ae..fdede41 100644 --- a/admin/app/models/benchmark_result.ts +++ b/admin/app/models/benchmark_result.ts @@ -74,6 +74,9 @@ export default class BenchmarkResult extends BaseModel { @column() declare repository_id: string | null + @column() + declare builder_tag: string | null + @column.dateTime({ autoCreate: true }) declare created_at: DateTime diff --git a/admin/app/services/benchmark_service.ts b/admin/app/services/benchmark_service.ts index 3741d4a..d62ad5d 100644 --- a/admin/app/services/benchmark_service.ts +++ b/admin/app/services/benchmark_service.ts @@ -22,9 +22,15 @@ import type { RepositorySubmitResponse, RepositoryStats, } from '../../types/benchmark.js' -import { randomUUID } from 'node:crypto' +import { randomUUID, createHmac } from 'node:crypto' import { DockerService } from './docker_service.js' +// HMAC secret for signing submissions to the benchmark repository +// This provides basic protection against casual API abuse. +// Note: Since NOMAD is open source, a determined attacker could extract this. +// For stronger protection, see challenge-response authentication. +const BENCHMARK_HMAC_SECRET = 'nomad-benchmark-v1-2026' + // Re-export default weights for use in service const SCORE_WEIGHTS = { ai_tokens_per_second: 0.30, @@ -107,7 +113,7 @@ export class BenchmarkService { /** * Submit benchmark results to central repository */ - async submitToRepository(benchmarkId?: string): Promise { + async submitToRepository(benchmarkId?: string, anonymous?: boolean): Promise { const result = benchmarkId ? await this.getResultById(benchmarkId) : await this.getLatestResult() @@ -116,6 +122,15 @@ export class BenchmarkService { throw new Error('No benchmark result found to submit') } + // Only allow full benchmarks with AI data to be submitted to repository + if (result.benchmark_type !== 'full') { + throw new Error('Only full benchmarks can be shared with the community. Run a Full Benchmark to share your results.') + } + + if (!result.ai_tokens_per_second || result.ai_tokens_per_second <= 0) { + throw new Error('Benchmark must include AI performance data. Ensure AI Assistant is installed and run a Full Benchmark.') + } + if (result.submitted_to_repository) { throw new Error('Benchmark result has already been submitted') } @@ -136,13 +151,27 @@ export class BenchmarkService { nomad_score: result.nomad_score, nomad_version: SystemService.getAppVersion(), benchmark_version: '1.0.0', + builder_tag: anonymous ? null : result.builder_tag, } try { + // Generate HMAC signature for submission verification + const timestamp = Date.now().toString() + const payload = timestamp + JSON.stringify(submission) + const signature = createHmac('sha256', BENCHMARK_HMAC_SECRET) + .update(payload) + .digest('hex') + const response = await axios.post( 'https://benchmark.projectnomad.us/api/v1/submit', submission, - { timeout: 30000 } + { + timeout: 30000, + headers: { + 'X-NOMAD-Timestamp': timestamp, + 'X-NOMAD-Signature': signature, + }, + } ) if (response.data.success) { @@ -214,17 +243,42 @@ export class BenchmarkService { } } - // Get GPU model (prefer discrete GPU) + // Get GPU model (prefer discrete GPU with dedicated VRAM) let gpuModel: string | null = null if (graphics.controllers && graphics.controllers.length > 0) { - const discreteGpu = graphics.controllers.find( - (g) => !g.vendor?.toLowerCase().includes('intel') && - !g.vendor?.toLowerCase().includes('amd') || - (g.vram && g.vram > 0) - ) + // First, look for discrete GPUs (NVIDIA, AMD discrete, or any with significant VRAM) + const discreteGpu = graphics.controllers.find((g) => { + const vendor = g.vendor?.toLowerCase() || '' + const model = g.model?.toLowerCase() || '' + // NVIDIA GPUs are always discrete + if (vendor.includes('nvidia') || model.includes('geforce') || model.includes('rtx') || model.includes('quadro')) { + return true + } + // AMD discrete GPUs (Radeon, not integrated APU graphics) + if ((vendor.includes('amd') || vendor.includes('ati')) && + (model.includes('radeon') || model.includes('rx ') || model.includes('vega')) && + !model.includes('graphics')) { + return true + } + // Any GPU with dedicated VRAM > 512MB is likely discrete + if (g.vram && g.vram > 512) { + return true + } + return false + }) gpuModel = discreteGpu?.model || graphics.controllers[0]?.model || null } + // Fallback: Extract integrated GPU from CPU model name (common for AMD APUs) + // e.g., "AMD Ryzen AI 9 HX 370 w/ Radeon 890M" -> "Radeon 890M" + if (!gpuModel) { + const cpuFullName = `${cpu.manufacturer} ${cpu.brand}` + const radeonMatch = cpuFullName.match(/w\/\s*(Radeon\s+\d+\w*)/i) + if (radeonMatch) { + gpuModel = radeonMatch[1] + } + } + return { cpu_model: `${cpu.manufacturer} ${cpu.brand}`, cpu_cores: cpu.physicalCores, diff --git a/admin/database/migrations/1769324448_add_builder_tag_to_benchmark_results.ts b/admin/database/migrations/1769324448_add_builder_tag_to_benchmark_results.ts new file mode 100644 index 0000000..49555a1 --- /dev/null +++ b/admin/database/migrations/1769324448_add_builder_tag_to_benchmark_results.ts @@ -0,0 +1,17 @@ +import { BaseSchema } from '@adonisjs/lucid/schema' + +export default class extends BaseSchema { + protected tableName = 'benchmark_results' + + async up() { + this.schema.alterTable(this.tableName, (table) => { + table.string('builder_tag', 64).nullable() + }) + } + + async down() { + this.schema.alterTable(this.tableName, (table) => { + table.dropColumn('builder_tag') + }) + } +} diff --git a/admin/inertia/components/BuilderTagSelector.tsx b/admin/inertia/components/BuilderTagSelector.tsx new file mode 100644 index 0000000..a7b1ff8 --- /dev/null +++ b/admin/inertia/components/BuilderTagSelector.tsx @@ -0,0 +1,131 @@ +import { useState, useEffect } from 'react' +import { ArrowPathIcon } from '@heroicons/react/24/outline' +import { + ADJECTIVES, + NOUNS, + generateRandomNumber, + generateRandomBuilderTag, + parseBuilderTag, + buildBuilderTag, +} from '~/lib/builderTagWords' + +interface BuilderTagSelectorProps { + value: string | null + onChange: (tag: string) => void + disabled?: boolean +} + +export default function BuilderTagSelector({ + value, + onChange, + disabled = false, +}: BuilderTagSelectorProps) { + const [adjective, setAdjective] = useState(ADJECTIVES[0]) + const [noun, setNoun] = useState(NOUNS[0]) + const [number, setNumber] = useState(generateRandomNumber()) + + // Parse existing value on mount + useEffect(() => { + if (value) { + const parsed = parseBuilderTag(value) + if (parsed) { + setAdjective(parsed.adjective) + setNoun(parsed.noun) + setNumber(parsed.number) + } + } else { + // Generate a random tag for new users + const randomTag = generateRandomBuilderTag() + const parsed = parseBuilderTag(randomTag) + if (parsed) { + setAdjective(parsed.adjective) + setNoun(parsed.noun) + setNumber(parsed.number) + onChange(randomTag) + } + } + }, []) + + // Update parent when selections change + const updateTag = (newAdjective: string, newNoun: string, newNumber: string) => { + const tag = buildBuilderTag(newAdjective, newNoun, newNumber) + onChange(tag) + } + + const handleAdjectiveChange = (newAdjective: string) => { + setAdjective(newAdjective) + updateTag(newAdjective, noun, number) + } + + const handleNounChange = (newNoun: string) => { + setNoun(newNoun) + updateTag(adjective, newNoun, number) + } + + const handleRandomize = () => { + const newAdjective = ADJECTIVES[Math.floor(Math.random() * ADJECTIVES.length)] + const newNoun = NOUNS[Math.floor(Math.random() * NOUNS.length)] + const newNumber = generateRandomNumber() + setAdjective(newAdjective) + setNoun(newNoun) + setNumber(newNumber) + updateTag(newAdjective, newNoun, newNumber) + } + + const currentTag = buildBuilderTag(adjective, noun, number) + + return ( +
+
+ + + - + + + + - + + + {number} + + + +
+ +
+ Your Builder Tag: + {currentTag} +
+
+ ) +} diff --git a/admin/inertia/components/InfoTooltip.tsx b/admin/inertia/components/InfoTooltip.tsx new file mode 100644 index 0000000..136c0b0 --- /dev/null +++ b/admin/inertia/components/InfoTooltip.tsx @@ -0,0 +1,35 @@ +import { InformationCircleIcon } from '@heroicons/react/24/outline' +import { useState } from 'react' + +interface InfoTooltipProps { + text: string + className?: string +} + +export default function InfoTooltip({ text, className = '' }: InfoTooltipProps) { + const [isVisible, setIsVisible] = useState(false) + + return ( + + + {isVisible && ( +
+
+ {text} +
+
+
+ )} + + ) +} diff --git a/admin/inertia/components/systeminfo/CircularGauge.tsx b/admin/inertia/components/systeminfo/CircularGauge.tsx index 288d060..2be7c47 100644 --- a/admin/inertia/components/systeminfo/CircularGauge.tsx +++ b/admin/inertia/components/systeminfo/CircularGauge.tsx @@ -31,23 +31,24 @@ export default function CircularGauge({ const displayValue = animated ? animatedValue : value + // Size configs: container size must match SVG size (2 * (radius + strokeWidth)) const sizes = { sm: { - container: 'w-32 h-32', + container: 'w-28 h-28', // 112px = 2 * (48 + 8) strokeWidth: 8, radius: 48, fontSize: 'text-xl', labelSize: 'text-xs', }, md: { - container: 'w-40 h-40', + container: 'w-[140px] h-[140px]', // 140px = 2 * (60 + 10) strokeWidth: 10, radius: 60, fontSize: 'text-2xl', labelSize: 'text-sm', }, lg: { - container: 'w-60 h-60', + container: 'w-[244px] h-[244px]', // 244px = 2 * (110 + 12) strokeWidth: 12, radius: 110, fontSize: 'text-4xl', @@ -60,10 +61,11 @@ export default function CircularGauge({ const offset = circumference - (displayValue / 100) * circumference const getColor = () => { - if (value >= 90) return 'desert-red' - if (value >= 75) return 'desert-orange' - if (value >= 50) return 'desert-tan' - return 'desert-olive' + // For benchmarks: higher scores = better = green + if (value >= 75) return 'desert-green' + if (value >= 50) return 'desert-olive' + if (value >= 25) return 'desert-orange' + return 'desert-red' } const color = getColor() diff --git a/admin/inertia/lib/builderTagWords.ts b/admin/inertia/lib/builderTagWords.ts new file mode 100644 index 0000000..50ddf91 --- /dev/null +++ b/admin/inertia/lib/builderTagWords.ts @@ -0,0 +1,145 @@ +// Builder Tag word lists for generating unique, NOMAD-themed identifiers +// Format: [Adjective]-[Noun]-[4-digit number] + +export const ADJECTIVES = [ + 'Tactical', + 'Stealth', + 'Rogue', + 'Shadow', + 'Ghost', + 'Silent', + 'Covert', + 'Lone', + 'Nomad', + 'Digital', + 'Cyber', + 'Off-Grid', + 'Remote', + 'Arctic', + 'Desert', + 'Mountain', + 'Urban', + 'Bunker', + 'Hidden', + 'Secure', + 'Armored', + 'Fortified', + 'Mobile', + 'Solar', + 'Nuclear', + 'Storm', + 'Thunder', + 'Iron', + 'Steel', + 'Titanium', + 'Carbon', + 'Quantum', + 'Neural', + 'Alpha', + 'Omega', + 'Delta', + 'Sigma', + 'Apex', + 'Prime', + 'Elite', + 'Midnight', + 'Dawn', + 'Dusk', + 'Feral', + 'Relic', + 'Analog', + 'Hardened', + 'Vigilant', + 'Outland', + 'Frontier', +] as const + +export const NOUNS = [ + 'Llama', + 'Wolf', + 'Bear', + 'Eagle', + 'Falcon', + 'Hawk', + 'Raven', + 'Fox', + 'Coyote', + 'Panther', + 'Cobra', + 'Viper', + 'Phoenix', + 'Dragon', + 'Sentinel', + 'Guardian', + 'Ranger', + 'Scout', + 'Survivor', + 'Prepper', + 'Nomad', + 'Wanderer', + 'Drifter', + 'Outpost', + 'Shelter', + 'Bunker', + 'Vault', + 'Cache', + 'Haven', + 'Fortress', + 'Citadel', + 'Node', + 'Hub', + 'Grid', + 'Network', + 'Signal', + 'Beacon', + 'Tower', + 'Server', + 'Cluster', + 'Array', + 'Matrix', + 'Core', + 'Nexus', + 'Archive', + 'Relay', + 'Silo', + 'Depot', + 'Bastion', + 'Homestead', +] as const + +export type Adjective = (typeof ADJECTIVES)[number] +export type Noun = (typeof NOUNS)[number] + +export function generateRandomNumber(): string { + return String(Math.floor(Math.random() * 10000)).padStart(4, '0') +} + +export function generateRandomBuilderTag(): string { + const adjective = ADJECTIVES[Math.floor(Math.random() * ADJECTIVES.length)] + const noun = NOUNS[Math.floor(Math.random() * NOUNS.length)] + const number = generateRandomNumber() + return `${adjective}-${noun}-${number}` +} + +export function parseBuilderTag(tag: string): { + adjective: Adjective + noun: Noun + number: string +} | null { + const match = tag.match(/^(.+)-(.+)-(\d{4})$/) + if (!match) return null + + const [, adjective, noun, number] = match + if (!ADJECTIVES.includes(adjective as Adjective)) return null + if (!NOUNS.includes(noun as Noun)) return null + + return { + adjective: adjective as Adjective, + noun: noun as Noun, + number, + } +} + +export function buildBuilderTag(adjective: string, noun: string, number: string): string { + return `${adjective}-${noun}-${number}` +} diff --git a/admin/inertia/pages/settings/benchmark.tsx b/admin/inertia/pages/settings/benchmark.tsx index 9a478ef..e58aec0 100644 --- a/admin/inertia/pages/settings/benchmark.tsx +++ b/admin/inertia/pages/settings/benchmark.tsx @@ -1,17 +1,20 @@ -import { Head } from '@inertiajs/react' +import { Head, Link } from '@inertiajs/react' import { useState, useEffect } from 'react' import SettingsLayout from '~/layouts/SettingsLayout' -import { useQuery, useMutation } from '@tanstack/react-query' +import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' import CircularGauge from '~/components/systeminfo/CircularGauge' import InfoCard from '~/components/systeminfo/InfoCard' import Alert from '~/components/Alert' import StyledButton from '~/components/StyledButton' +import InfoTooltip from '~/components/InfoTooltip' +import BuilderTagSelector from '~/components/BuilderTagSelector' import { ChartBarIcon, CpuChipIcon, CircleStackIcon, ServerIcon, ChevronDownIcon, + ClockIcon, } from '@heroicons/react/24/outline' import { IconRobot } from '@tabler/icons-react' import { useTransmit } from 'react-adonis-transmit' @@ -28,9 +31,32 @@ export default function BenchmarkPage(props: { } }) { const { subscribe } = useTransmit() + const queryClient = useQueryClient() const [progress, setProgress] = useState(null) const [isRunning, setIsRunning] = useState(props.benchmark.status !== 'idle') const [showDetails, setShowDetails] = useState(false) + const [showHistory, setShowHistory] = useState(false) + const [showAIRequiredAlert, setShowAIRequiredAlert] = useState(false) + const [shareAnonymously, setShareAnonymously] = useState(false) + const [currentBuilderTag, setCurrentBuilderTag] = useState( + props.benchmark.latestResult?.builder_tag || null + ) + + // Check if AI Assistant is installed + const { data: aiInstalled } = useQuery({ + queryKey: ['services', 'ai-installed'], + queryFn: async () => { + const res = await fetch('/api/system/services') + const data = await res.json() + const services = Array.isArray(data) ? data : (data.services || []) + const openWebUI = services.find((s: any) => + s.service_name === 'nomad_open_webui' || s.serviceName === 'nomad_open_webui' + ) + return openWebUI?.installed === true || openWebUI?.installed === 1 + }, + staleTime: 0, + refetchOnMount: true, + }) // Fetch latest result const { data: latestResult, refetch: refetchLatest } = useQuery({ @@ -43,6 +69,16 @@ export default function BenchmarkPage(props: { initialData: props.benchmark.latestResult, }) + // Fetch all benchmark results for history + const { data: benchmarkHistory } = useQuery({ + queryKey: ['benchmark', 'history'], + queryFn: async () => { + const res = await fetch('/api/benchmark/results') + const data = await res.json() + return data.results as BenchmarkResult[] + }, + }) + // Run benchmark mutation (uses sync mode by default for simpler local dev) const runBenchmark = useMutation({ mutationFn: async (type: 'full' | 'system' | 'ai') => { @@ -100,15 +136,45 @@ export default function BenchmarkPage(props: { }, }) + // Update builder tag mutation + const updateBuilderTag = useMutation({ + mutationFn: async ({ benchmarkId, builderTag }: { benchmarkId: string; builderTag: string }) => { + const res = await fetch('/api/benchmark/builder-tag', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ benchmark_id: benchmarkId, builder_tag: builderTag }), + }) + const data = await res.json() + if (!data.success) { + throw new Error(data.error || 'Failed to update builder tag') + } + return data + }, + onSuccess: () => { + refetchLatest() + queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) + }, + }) + // Submit to repository mutation const [submitError, setSubmitError] = useState(null) const submitResult = useMutation({ - mutationFn: async (benchmarkId?: string) => { + mutationFn: async ({ benchmarkId, anonymous }: { benchmarkId: string; anonymous: boolean }) => { setSubmitError(null) + + // First, save the current builder tag to the benchmark + if (currentBuilderTag && !anonymous) { + await fetch('/api/benchmark/builder-tag', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ benchmark_id: benchmarkId, builder_tag: currentBuilderTag }), + }) + } + const res = await fetch('/api/benchmark/submit', { method: 'POST', headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ benchmark_id: benchmarkId }), + body: JSON.stringify({ benchmark_id: benchmarkId, anonymous }), }) const data = await res.json() if (!data.success) { @@ -118,12 +184,30 @@ export default function BenchmarkPage(props: { }, onSuccess: () => { refetchLatest() + queryClient.invalidateQueries({ queryKey: ['benchmark', 'history'] }) }, onError: (error: Error) => { setSubmitError(error.message) }, }) + // Check if the latest result is a full benchmark with AI data (eligible for sharing) + const canShareBenchmark = latestResult && + latestResult.benchmark_type === 'full' && + latestResult.ai_tokens_per_second !== null && + latestResult.ai_tokens_per_second > 0 && + !latestResult.submitted_to_repository + + // Handle Full Benchmark click with pre-flight check + const handleFullBenchmarkClick = () => { + if (!aiInstalled) { + setShowAIRequiredAlert(true) + return + } + setShowAIRequiredAlert(false) + runBenchmark.mutate('full') + } + // Simulate progress during sync benchmark (since we don't get SSE updates) useEffect(() => { if (!isRunning || progress?.status === 'completed' || progress?.status === 'error') return @@ -269,13 +353,30 @@ export default function BenchmarkPage(props: { onDismiss={() => setProgress(null)} /> )} + {showAIRequiredAlert && ( + setShowAIRequiredAlert(false)} + > + + Go to Apps to install AI Assistant → + + + )}

Run a benchmark to measure your system's CPU, memory, disk, and AI inference performance. The benchmark takes approximately 2-5 minutes to complete.

runBenchmark.mutate('full')} + onClick={handleFullBenchmarkClick} disabled={runBenchmark.isPending} icon='PlayIcon' > @@ -292,12 +393,21 @@ export default function BenchmarkPage(props: { runBenchmark.mutate('ai')} - disabled={runBenchmark.isPending} + disabled={runBenchmark.isPending || !aiInstalled} icon='SparklesIcon' + title={!aiInstalled ? 'AI Assistant must be installed to run AI benchmark' : undefined} > AI Only
+ {!aiInstalled && ( +

+ Note: AI Assistant is not installed. + + Install it + to run full benchmarks and share results with the community. +

+ )}
)} @@ -331,13 +441,46 @@ export default function BenchmarkPage(props: {

Your NOMAD Score is a weighted composite of all benchmark results.

- {!latestResult.submitted_to_repository && ( -
+ + {/* Share with Community - Only for full benchmarks with AI data */} + {canShareBenchmark && ( +
+

Share with Community

- Share your benchmark score anonymously with the NOMAD community. Only your hardware specs and scores are sent — no identifying information. + Share your benchmark on the community leaderboard. Choose a Builder Tag to claim your spot, or share anonymously.

+ + {/* Builder Tag Selector */} +
+ + +
+ + {/* Anonymous checkbox */} + + submitResult.mutate(latestResult.benchmark_id)} + onClick={() => submitResult.mutate({ + benchmarkId: latestResult.benchmark_id, + anonymous: shareAnonymously + })} disabled={submitResult.isPending} icon='CloudArrowUpIcon' > @@ -355,6 +498,17 @@ export default function BenchmarkPage(props: { )}
)} + + {/* Show message for partial benchmarks */} + {latestResult && !latestResult.submitted_to_repository && !canShareBenchmark && ( + + )} + {latestResult.submitted_to_repository && ( {latestResult.ai_tokens_per_second.toFixed(1)}
-
Tokens per Second
+
+ Tokens per Second + +
@@ -459,7 +616,10 @@ export default function BenchmarkPage(props: {
{latestResult.ai_time_to_first_token?.toFixed(0) || 'N/A'} ms
-
Time to First Token
+
+ Time to First Token + +
@@ -597,6 +757,10 @@ export default function BenchmarkPage(props: { Run Date {new Date(latestResult.created_at as unknown as string).toLocaleString()} +
+ Builder Tag + {latestResult.builder_tag || 'Not set'} +
{latestResult.ai_model_used && (
AI Model Used @@ -620,6 +784,83 @@ export default function BenchmarkPage(props: { )}
+ + {/* Benchmark History */} + {benchmarkHistory && benchmarkHistory.length > 1 && ( +
+

+
+ Benchmark History +

+ +
+ + + {showHistory && ( +
+
+ + + + + + + + + + + + {benchmarkHistory.map((result) => ( + + + + + + + + ))} + +
DateTypeScoreBuilder TagShared
+ {new Date(result.created_at as unknown as string).toLocaleDateString()} + {result.benchmark_type} + + {result.nomad_score.toFixed(1)} + + + {result.builder_tag || '—'} + + {result.submitted_to_repository ? ( + + ) : ( + + )} +
+
+
+ )} +
+
+ )} )} diff --git a/admin/start/routes.ts b/admin/start/routes.ts index f3f4e43..87401d4 100644 --- a/admin/start/routes.ts +++ b/admin/start/routes.ts @@ -134,6 +134,7 @@ router router.get('/results/latest', [BenchmarkController, 'latest']) router.get('/results/:id', [BenchmarkController, 'show']) router.post('/submit', [BenchmarkController, 'submit']) + router.post('/builder-tag', [BenchmarkController, 'updateBuilderTag']) router.get('/comparison', [BenchmarkController, 'comparison']) router.get('/status', [BenchmarkController, 'status']) router.get('/settings', [BenchmarkController, 'settings']) diff --git a/admin/types/benchmark.ts b/admin/types/benchmark.ts index 35f406e..91e4db8 100644 --- a/admin/types/benchmark.ts +++ b/admin/types/benchmark.ts @@ -46,6 +46,7 @@ export type BenchmarkResultSlim = Pick< | 'nomad_score' | 'submitted_to_repository' | 'created_at' + | 'builder_tag' > & { cpu_model: string gpu_model: string | null @@ -113,6 +114,7 @@ export type RepositorySubmission = Pick< nomad_version: string benchmark_version: string ram_gb: number + builder_tag: string | null // null = anonymous submission } // Central repository response types