project-nomad/admin/app/controllers/benchmark_controller.ts
Chris Sherwood 755807f95e feat: Add system benchmark feature with NOMAD Score
Add comprehensive benchmarking capability to measure server performance:

Backend:
- BenchmarkService with CPU, memory, disk, and AI benchmarks using sysbench
- Database migrations for benchmark_results and benchmark_settings tables
- REST API endpoints for running benchmarks and retrieving results
- CLI commands: benchmark:run, benchmark:results, benchmark:submit
- BullMQ job for async benchmark execution with SSE progress updates
- Synchronous mode option (?sync=true) for simpler local dev setup

Frontend:
- Benchmark settings page with circular gauges for scores
- NOMAD Score display with weighted composite calculation
- System Performance section (CPU, Memory, Disk Read/Write)
- AI Performance section (tokens/sec, time to first token)
- Hardware Information display
- Expandable Benchmark Details section
- Progress simulation during sync benchmark execution

Easy Setup Integration:
- Added System Benchmark to Additional Tools section
- Built-in capability pattern for non-Docker features
- Click-to-navigate behavior for built-in tools

Fixes:
- Docker log multiplexing issue (Tty: true) for proper output parsing
- Consolidated disk benchmarks into single container execution

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-22 21:48:12 -08:00

231 lines
6.2 KiB
TypeScript

import { inject } from '@adonisjs/core'
import type { HttpContext } from '@adonisjs/core/http'
import { BenchmarkService } from '#services/benchmark_service'
import { runBenchmarkValidator, submitBenchmarkValidator } from '#validators/benchmark'
import { RunBenchmarkJob } from '#jobs/run_benchmark_job'
import { v4 as uuidv4 } from 'uuid'
import type { BenchmarkType } from '../../types/benchmark.js'
@inject()
export default class BenchmarkController {
constructor(private benchmarkService: BenchmarkService) {}
/**
* Start a benchmark run (async via job queue, or sync if specified)
*/
async run({ request, response }: HttpContext) {
const payload = await request.validateUsing(runBenchmarkValidator)
const benchmarkType: BenchmarkType = payload.benchmark_type || 'full'
const runSync = request.input('sync') === 'true' || request.input('sync') === true
// Check if a benchmark is already running
const status = this.benchmarkService.getStatus()
if (status.status !== 'idle') {
return response.status(409).send({
success: false,
error: 'A benchmark is already running',
current_benchmark_id: status.benchmarkId,
})
}
// Run synchronously if requested (useful for local dev without Redis)
if (runSync) {
try {
let result
switch (benchmarkType) {
case 'full':
result = await this.benchmarkService.runFullBenchmark()
break
case 'system':
result = await this.benchmarkService.runSystemBenchmarks()
break
case 'ai':
result = await this.benchmarkService.runAIBenchmark()
break
default:
result = await this.benchmarkService.runFullBenchmark()
}
return response.send({
success: true,
benchmark_id: result.benchmark_id,
nomad_score: result.nomad_score,
result,
})
} catch (error) {
return response.status(500).send({
success: false,
error: error.message,
})
}
}
// Generate benchmark ID and dispatch job (async)
const benchmarkId = uuidv4()
const { job, created } = await RunBenchmarkJob.dispatch({
benchmark_id: benchmarkId,
benchmark_type: benchmarkType,
include_ai: benchmarkType === 'full' || benchmarkType === 'ai',
})
return response.status(201).send({
success: true,
job_id: job?.id || benchmarkId,
benchmark_id: benchmarkId,
message: created
? `${benchmarkType} benchmark started`
: 'Benchmark job already exists',
})
}
/**
* Run a system-only benchmark (CPU, memory, disk)
*/
async runSystem({ response }: HttpContext) {
const status = this.benchmarkService.getStatus()
if (status.status !== 'idle') {
return response.status(409).send({
success: false,
error: 'A benchmark is already running',
})
}
const benchmarkId = uuidv4()
await RunBenchmarkJob.dispatch({
benchmark_id: benchmarkId,
benchmark_type: 'system',
include_ai: false,
})
return response.status(201).send({
success: true,
benchmark_id: benchmarkId,
message: 'System benchmark started',
})
}
/**
* Run an AI-only benchmark
*/
async runAI({ response }: HttpContext) {
const status = this.benchmarkService.getStatus()
if (status.status !== 'idle') {
return response.status(409).send({
success: false,
error: 'A benchmark is already running',
})
}
const benchmarkId = uuidv4()
await RunBenchmarkJob.dispatch({
benchmark_id: benchmarkId,
benchmark_type: 'ai',
include_ai: true,
})
return response.status(201).send({
success: true,
benchmark_id: benchmarkId,
message: 'AI benchmark started',
})
}
/**
* Get all benchmark results
*/
async results({}: HttpContext) {
const results = await this.benchmarkService.getAllResults()
return {
results,
total: results.length,
}
}
/**
* Get the latest benchmark result
*/
async latest({}: HttpContext) {
const result = await this.benchmarkService.getLatestResult()
if (!result) {
return { result: null }
}
return { result }
}
/**
* Get a specific benchmark result by ID
*/
async show({ params, response }: HttpContext) {
const result = await this.benchmarkService.getResultById(params.id)
if (!result) {
return response.status(404).send({
error: 'Benchmark result not found',
})
}
return { result }
}
/**
* Submit benchmark results to central repository
*/
async submit({ request, response }: HttpContext) {
const payload = await request.validateUsing(submitBenchmarkValidator)
try {
const submitResult = await this.benchmarkService.submitToRepository(payload.benchmark_id)
return response.send({
success: true,
repository_id: submitResult.repository_id,
percentile: submitResult.percentile,
})
} catch (error) {
return response.status(400).send({
success: false,
error: error.message,
})
}
}
/**
* Get comparison stats from central repository
*/
async comparison({}: HttpContext) {
const stats = await this.benchmarkService.getComparisonStats()
return { stats }
}
/**
* Get current benchmark status
*/
async status({}: HttpContext) {
return this.benchmarkService.getStatus()
}
/**
* Get benchmark settings
*/
async settings({}: HttpContext) {
const { default: BenchmarkSetting } = await import('#models/benchmark_setting')
return await BenchmarkSetting.getAllSettings()
}
/**
* Update benchmark settings
*/
async updateSettings({ request, response }: HttpContext) {
const { default: BenchmarkSetting } = await import('#models/benchmark_setting')
const body = request.body()
if (body.allow_anonymous_submission !== undefined) {
await BenchmarkSetting.setValue(
'allow_anonymous_submission',
body.allow_anonymous_submission ? 'true' : 'false'
)
}
return response.send({
success: true,
settings: await BenchmarkSetting.getAllSettings(),
})
}
}