diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index c0a662d..ec28b8d 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -12,3 +12,6 @@ contact_links: - name: 🤝 Contributing Guide url: https://github.com/Crosstalk-Solutions/project-nomad/blob/main/CONTRIBUTING.md about: Learn how to contribute to Project N.O.M.A.D. + - name: 📅 Roadmap + url: https://roadmap.projectnomad.us + about: See our public roadmap, vote on features, and suggest new ones \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 9a86a68..4fd8296 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -6,13 +6,17 @@ body: - type: markdown attributes: value: | - Thanks for your interest in improving Project N.O.M.A.D.! + Thanks for your interest in improving Project N.O.M.A.D.! Before you submit a feature request, consider checking our [roadmap](https://roadmap.projectnomad.us) to see if it's already planned or in progress. You're welcome to suggest new ideas there if you don't plan on opening PRs yourself. + **Please note:** Feature requests are not guaranteed to be implemented. All requests are evaluated based on alignment with the project's goals, feasibility, and community demand. **Before submitting:** - - Search existing feature requests to avoid duplicates + - Search existing feature requests and our [roadmap](https://roadmap.projectnomad.us) to avoid duplicates - Consider if this aligns with N.O.M.A.D.'s mission: offline-first knowledge and education + - Consider the technical feasibility of the feature: N.O.M.A.D. is designed to be containerized and run on a wide range of hardware, so features that require heavy resources (aside from GPU-intensive tasks) or complex host configurations may be less likely to be implemented + - Consider the scope of the feature: Small, focused enhancements that can be implemented incrementally are more likely to be implemented than large, broad features that would require significant development effort or have an unclear path forward + - If you're able to contribute code, testing, or documentation, that significantly increases the chances of your feature being implemented - type: dropdown id: feature-category diff --git a/.github/workflows/build-disk-collector.yml b/.github/workflows/build-disk-collector.yml new file mode 100644 index 0000000..7649ba5 --- /dev/null +++ b/.github/workflows/build-disk-collector.yml @@ -0,0 +1,51 @@ +name: Build Disk Collector Image + +on: + workflow_dispatch: + inputs: + version: + description: 'Semantic version to label the Docker image under (no "v" prefix, e.g. "1.2.3")' + required: true + type: string + tag_latest: + description: 'Also tag this image as :latest?' + required: false + type: boolean + default: false + +jobs: + check_authorization: + name: Check authorization to publish new Docker image + runs-on: ubuntu-latest + outputs: + isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} + steps: + - name: check-auth + id: check-auth + run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT + build: + name: Build disk-collector image + needs: check_authorization + if: needs.check_authorization.outputs.isAuthorized == 'true' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: install/sidecar-disk-collector + push: true + tags: | + ghcr.io/crosstalk-solutions/project-nomad-disk-collector:${{ inputs.version }} + ghcr.io/crosstalk-solutions/project-nomad-disk-collector:v${{ inputs.version }} + ${{ inputs.tag_latest && 'ghcr.io/crosstalk-solutions/project-nomad-disk-collector:latest' || '' }} diff --git a/.github/workflows/docker.yml b/.github/workflows/build-primary-image.yml similarity index 98% rename from .github/workflows/docker.yml rename to .github/workflows/build-primary-image.yml index 1cebccf..daf0e54 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/build-primary-image.yml @@ -1,4 +1,4 @@ -name: Build Docker Image +name: Build Primary Docker Image on: workflow_dispatch: diff --git a/.github/workflows/build-sidecar-updater.yml b/.github/workflows/build-sidecar-updater.yml new file mode 100644 index 0000000..822bc28 --- /dev/null +++ b/.github/workflows/build-sidecar-updater.yml @@ -0,0 +1,51 @@ +name: Build Sidecar Updater Image + +on: + workflow_dispatch: + inputs: + version: + description: 'Semantic version to label the Docker image under (no "v" prefix, e.g. "1.2.3")' + required: true + type: string + tag_latest: + description: 'Also tag this image as :latest?' + required: false + type: boolean + default: false + +jobs: + check_authorization: + name: Check authorization to publish new Docker image + runs-on: ubuntu-latest + outputs: + isAuthorized: ${{ steps.check-auth.outputs.is_authorized }} + steps: + - name: check-auth + id: check-auth + run: echo "is_authorized=${{ contains(secrets.DEPLOYMENT_AUTHORIZED_USERS, github.triggering_actor) }}" >> $GITHUB_OUTPUT + build: + name: Build sidecar-updater image + needs: check_authorization + if: needs.check_authorization.outputs.isAuthorized == 'true' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: install/sidecar-updater + push: true + tags: | + ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:${{ inputs.version }} + ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:v${{ inputs.version }} + ${{ inputs.tag_latest && 'ghcr.io/crosstalk-solutions/project-nomad-sidecar-updater:latest' || '' }} diff --git a/Dockerfile b/Dockerfile index 27f3aed..c91f9ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,7 +45,14 @@ COPY --from=production-deps /app/node_modules /app/node_modules COPY --from=build /app/build /app # Copy root package.json for version info COPY package.json /app/version.json + +# Copy docs and README for access within the container COPY admin/docs /app/docs COPY README.md /app/README.md + +# Copy entrypoint script and ensure it's executable +COPY install/entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + EXPOSE 8080 -CMD ["node", "./bin/server.js"] \ No newline at end of file +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/README.md b/README.md index 89c2e17..bdca4c8 100644 --- a/README.md +++ b/README.md @@ -21,13 +21,16 @@ Project N.O.M.A.D. can be installed on any Debian-based operating system (we rec *Note: sudo/root privileges are required to run the install script* -#### Quick Install +#### Quick Install (Debian-based OS Only) ```bash sudo apt-get update && sudo apt-get install -y curl && curl -fsSL https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/install_nomad.sh -o install_nomad.sh && sudo bash install_nomad.sh ``` Project N.O.M.A.D. is now installed on your device! Open a browser and navigate to `http://localhost:8080` (or `http://DEVICE_IP:8080`) to start exploring! +### Advanced Installation +For more control over the installation process, copy and paste the [Docker Compose template](https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/management_compose.yaml) into a `docker-compose.yml` file and customize it to your liking (be sure to replace any placeholders with your actual values). Then, run `docker compose up -d` to start the Command Center and its dependencies. Note: this method is recommended for advanced users only, as it requires familiarity with Docker and manual configuration before starting. + ## How It Works N.O.M.A.D. is a management UI ("Command Center") and API that orchestrates a collection of containerized tools and resources via [Docker](https://www.docker.com/). It handles installation, configuration, and updates for everything — so you don't have to. @@ -80,7 +83,7 @@ To run LLM's and other included AI tools: - OS: Debian-based (Ubuntu recommended) - Stable internet connection (required during install only) -**For detailed build recommendations at three price points ($200–$800+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** +**For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** Again, Project N.O.M.A.D. itself is quite lightweight - it's the tools and resources you choose to install with N.O.M.A.D. that will determine the specs required for your unique deployment diff --git a/admin/app/controllers/downloads_controller.ts b/admin/app/controllers/downloads_controller.ts index bd58790..023806b 100644 --- a/admin/app/controllers/downloads_controller.ts +++ b/admin/app/controllers/downloads_controller.ts @@ -15,4 +15,9 @@ export default class DownloadsController { const payload = await request.validateUsing(downloadJobsByFiletypeSchema) return this.downloadService.listDownloadJobs(payload.params.filetype) } + + async removeJob({ params }: HttpContext) { + await this.downloadService.removeFailedJob(params.jobId) + return { success: true } + } } diff --git a/admin/app/controllers/maps_controller.ts b/admin/app/controllers/maps_controller.ts index 8290d45..54f0e8f 100644 --- a/admin/app/controllers/maps_controller.ts +++ b/admin/app/controllers/maps_controller.ts @@ -83,7 +83,7 @@ export default class MapsController { }) } - const styles = await this.mapService.generateStylesJSON(request.host()) + const styles = await this.mapService.generateStylesJSON(request.host(), request.protocol()) return response.json(styles) } diff --git a/admin/app/controllers/settings_controller.ts b/admin/app/controllers/settings_controller.ts index c0a312f..e90370d 100644 --- a/admin/app/controllers/settings_controller.ts +++ b/admin/app/controllers/settings_controller.ts @@ -39,6 +39,10 @@ export default class SettingsController { return inertia.render('settings/legal'); } + async support({ inertia }: HttpContext) { + return inertia.render('settings/support'); + } + async maps({ inertia }: HttpContext) { const baseAssetsCheck = await this.mapService.ensureBaseAssets(); const regionFiles = await this.mapService.listRegions(); diff --git a/admin/app/controllers/system_controller.ts b/admin/app/controllers/system_controller.ts index cdcde7f..0c3e1ad 100644 --- a/admin/app/controllers/system_controller.ts +++ b/admin/app/controllers/system_controller.ts @@ -113,6 +113,11 @@ export default class SystemController { return await this.systemService.subscribeToReleaseNotes(reqData.email); } + async getDebugInfo({}: HttpContext) { + const debugInfo = await this.systemService.getDebugInfo() + return { debugInfo } + } + async checkServiceUpdates({ response }: HttpContext) { await CheckServiceUpdatesJob.dispatch() response.send({ success: true, message: 'Service update check dispatched' }) diff --git a/admin/app/jobs/embed_file_job.ts b/admin/app/jobs/embed_file_job.ts index 0c59b32..0c0a12f 100644 --- a/admin/app/jobs/embed_file_job.ts +++ b/admin/app/jobs/embed_file_job.ts @@ -1,4 +1,4 @@ -import { Job } from 'bullmq' +import { Job, UnrecoverableError } from 'bullmq' import { QueueService } from '#services/queue_service' import { EmbedJobWithProgress } from '../../types/rag.js' import { RagService } from '#services/rag_service' @@ -42,7 +42,15 @@ export class EmbedFileJob { const ragService = new RagService(dockerService, ollamaService) try { - // Check if Ollama and Qdrant services are ready + // Check if Ollama and Qdrant services are installed and ready + // Use UnrecoverableError for "not installed" so BullMQ won't retry — + // retrying 30x when the service doesn't exist just wastes Redis connections + const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') + if (!ollamaUrl) { + logger.warn('[EmbedFileJob] Ollama is not installed. Skipping embedding for: %s', fileName) + throw new UnrecoverableError('Ollama service is not installed. Install AI Assistant to enable file embeddings.') + } + const existingModels = await ollamaService.getModels() if (!existingModels) { logger.warn('[EmbedFileJob] Ollama service not ready yet. Will retry...') @@ -51,8 +59,8 @@ export class EmbedFileJob { const qdrantUrl = await dockerService.getServiceURL('nomad_qdrant') if (!qdrantUrl) { - logger.warn('[EmbedFileJob] Qdrant service not ready yet. Will retry...') - throw new Error('Qdrant service not ready yet') + logger.warn('[EmbedFileJob] Qdrant is not installed. Skipping embedding for: %s', fileName) + throw new UnrecoverableError('Qdrant service is not installed. Install AI Assistant to enable file embeddings.') } logger.info(`[EmbedFileJob] Services ready. Processing file: ${fileName}`) diff --git a/admin/app/jobs/run_download_job.ts b/admin/app/jobs/run_download_job.ts index 3cc09ad..c7f672e 100644 --- a/admin/app/jobs/run_download_job.ts +++ b/admin/app/jobs/run_download_job.ts @@ -82,14 +82,17 @@ export class RunDownloadJob { const zimService = new ZimService(dockerService) await zimService.downloadRemoteSuccessCallback([url], true) - // Dispatch an embedding job for the downloaded ZIM file - try { - await EmbedFileJob.dispatch({ - fileName: url.split('/').pop() || '', - filePath: filepath, - }) - } catch (error) { - console.error(`[RunDownloadJob] Error dispatching EmbedFileJob for URL ${url}:`, error) + // Only dispatch embedding job if AI Assistant (Ollama) is installed + const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') + if (ollamaUrl) { + try { + await EmbedFileJob.dispatch({ + fileName: url.split('/').pop() || '', + filePath: filepath, + }) + } catch (error) { + console.error(`[RunDownloadJob] Error dispatching EmbedFileJob for URL ${url}:`, error) + } } } else if (filetype === 'map') { const mapsService = new MapService() diff --git a/admin/app/services/docker_service.ts b/admin/app/services/docker_service.ts index 8eafc64..5d94f54 100644 --- a/admin/app/services/docker_service.ts +++ b/admin/app/services/docker_service.ts @@ -691,6 +691,7 @@ export class DockerService { const runtimes = dockerInfo.Runtimes || {} if ('nvidia' in runtimes) { logger.info('[DockerService] NVIDIA container runtime detected via Docker API') + await this._persistGPUType('nvidia') return { type: 'nvidia' } } } catch (error) { @@ -722,12 +723,26 @@ export class DockerService { ) if (amdCheck.trim()) { logger.info('[DockerService] AMD GPU detected via lspci') + await this._persistGPUType('amd') return { type: 'amd' } } } catch (error) { // lspci not available, continue } + // Last resort: check if we previously detected a GPU and it's likely still present. + // This handles cases where live detection fails transiently (e.g., Docker daemon + // hiccup, runtime temporarily unavailable) but the hardware hasn't changed. + try { + const savedType = await KVStore.getValue('gpu.type') + if (savedType === 'nvidia' || savedType === 'amd') { + logger.info(`[DockerService] No GPU detected live, but KV store has '${savedType}' from previous detection. Using saved value.`) + return { type: savedType as 'nvidia' | 'amd' } + } + } catch { + // KV store not available, continue + } + logger.info('[DockerService] No GPU detected') return { type: 'none' } } catch (error) { @@ -736,6 +751,15 @@ export class DockerService { } } + private async _persistGPUType(type: 'nvidia' | 'amd'): Promise { + try { + await KVStore.setValue('gpu.type', type) + logger.info(`[DockerService] Persisted GPU type '${type}' to KV store`) + } catch (error) { + logger.warn(`[DockerService] Failed to persist GPU type: ${error.message}`) + } + } + /** * Discover AMD GPU DRI devices dynamically. * Returns an array of device configurations for Docker. @@ -853,6 +877,45 @@ export class DockerService { this._broadcast(serviceName, 'update-creating', `Creating updated container...`) const hostConfig = inspectData.HostConfig || {} + + // Re-run GPU detection for Ollama so updates always reflect the current GPU environment. + // This handles cases where the NVIDIA Container Toolkit was installed after the initial + // Ollama setup, and ensures DeviceRequests are always built fresh rather than relying on + // round-tripping the Docker inspect format back into the create API. + let updatedDeviceRequests: any[] | undefined = undefined + if (serviceName === SERVICE_NAMES.OLLAMA) { + const gpuResult = await this._detectGPUType() + + if (gpuResult.type === 'nvidia') { + this._broadcast( + serviceName, + 'update-gpu-config', + `NVIDIA container runtime detected. Configuring updated container with GPU support...` + ) + updatedDeviceRequests = [ + { + Driver: 'nvidia', + Count: -1, + Capabilities: [['gpu']], + }, + ] + } else if (gpuResult.type === 'amd') { + this._broadcast( + serviceName, + 'update-gpu-config', + `AMD GPU detected. ROCm GPU acceleration is not yet supported — using CPU-only configuration.` + ) + } else if (gpuResult.toolkitMissing) { + this._broadcast( + serviceName, + 'update-gpu-config', + `NVIDIA GPU detected but NVIDIA Container Toolkit is not installed. Using CPU-only configuration. Install the toolkit and reinstall AI Assistant for GPU acceleration: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html` + ) + } else { + this._broadcast(serviceName, 'update-gpu-config', `No GPU detected. Using CPU-only configuration.`) + } + } + const newContainerConfig: any = { Image: newImage, name: serviceName, @@ -865,7 +928,7 @@ export class DockerService { Binds: hostConfig.Binds || undefined, PortBindings: hostConfig.PortBindings || undefined, RestartPolicy: hostConfig.RestartPolicy || undefined, - DeviceRequests: hostConfig.DeviceRequests || undefined, + DeviceRequests: serviceName === SERVICE_NAMES.OLLAMA ? updatedDeviceRequests : (hostConfig.DeviceRequests || undefined), Devices: hostConfig.Devices || undefined, }, NetworkingConfig: inspectData.NetworkSettings?.Networks diff --git a/admin/app/services/download_service.ts b/admin/app/services/download_service.ts index b5db238..a2b7faf 100644 --- a/admin/app/services/download_service.ts +++ b/admin/app/services/download_service.ts @@ -12,7 +12,7 @@ export class DownloadService { async listDownloadJobs(filetype?: string): Promise { // Get regular file download jobs (zim, map, etc.) const queue = this.queueService.getQueue(RunDownloadJob.queue) - const fileJobs = await queue.getJobs(['waiting', 'active', 'delayed']) + const fileJobs = await queue.getJobs(['waiting', 'active', 'delayed', 'failed']) const fileDownloads = fileJobs.map((job) => ({ jobId: job.id!.toString(), @@ -20,11 +20,13 @@ export class DownloadService { progress: parseInt(job.progress.toString(), 10), filepath: normalize(job.data.filepath), filetype: job.data.filetype, + status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', + failedReason: job.failedReason || undefined, })) // Get Ollama model download jobs const modelQueue = this.queueService.getQueue(DownloadModelJob.queue) - const modelJobs = await modelQueue.getJobs(['waiting', 'active', 'delayed']) + const modelJobs = await modelQueue.getJobs(['waiting', 'active', 'delayed', 'failed']) const modelDownloads = modelJobs.map((job) => ({ jobId: job.id!.toString(), @@ -32,6 +34,8 @@ export class DownloadService { progress: parseInt(job.progress.toString(), 10), filepath: job.data.modelName || 'Unknown Model', // Use model name as filepath filetype: 'model', + status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', + failedReason: job.failedReason || undefined, })) const allDownloads = [...fileDownloads, ...modelDownloads] @@ -39,7 +43,22 @@ export class DownloadService { // Filter by filetype if specified const filtered = allDownloads.filter((job) => !filetype || job.filetype === filetype) - // Sort so actively downloading items (progress > 0) appear first, then by progress descending - return filtered.sort((a, b) => b.progress - a.progress) + // Sort: active downloads first (by progress desc), then failed at the bottom + return filtered.sort((a, b) => { + if (a.status === 'failed' && b.status !== 'failed') return 1 + if (a.status !== 'failed' && b.status === 'failed') return -1 + return b.progress - a.progress + }) + } + + async removeFailedJob(jobId: string): Promise { + for (const queueName of [RunDownloadJob.queue, DownloadModelJob.queue]) { + const queue = this.queueService.getQueue(queueName) + const job = await queue.getJob(jobId) + if (job) { + await job.remove() + return + } + } } } diff --git a/admin/app/services/map_service.ts b/admin/app/services/map_service.ts index 6f7cbfd..beb74b2 100644 --- a/admin/app/services/map_service.ts +++ b/admin/app/services/map_service.ts @@ -260,7 +260,7 @@ export class MapService implements IMapService { } } - async generateStylesJSON(host: string | null = null): Promise { + async generateStylesJSON(host: string | null = null, protocol: string = 'http'): Promise { if (!(await this.checkBaseAssetsExist())) { throw new Error('Base map assets are missing from storage/maps') } @@ -281,8 +281,8 @@ export class MapService implements IMapService { * e.g. user is accessing from "example.com", but we would by default generate "localhost:8080/..." so maps would * fail to load. */ - const sources = this.generateSourcesArray(host, regions) - const baseUrl = this.getPublicFileBaseUrl(host, this.basemapsAssetsDir) + const sources = this.generateSourcesArray(host, regions, protocol) + const baseUrl = this.getPublicFileBaseUrl(host, this.basemapsAssetsDir, protocol) const styles = await this.generateStylesFile( rawStyles, @@ -342,9 +342,9 @@ export class MapService implements IMapService { return await listDirectoryContentsRecursive(this.baseDirPath) } - private generateSourcesArray(host: string | null, regions: FileEntry[]): BaseStylesFile['sources'][] { + private generateSourcesArray(host: string | null, regions: FileEntry[], protocol: string = 'http'): BaseStylesFile['sources'][] { const sources: BaseStylesFile['sources'][] = [] - const baseUrl = this.getPublicFileBaseUrl(host, 'pmtiles') + const baseUrl = this.getPublicFileBaseUrl(host, 'pmtiles', protocol) for (const region of regions) { if (region.type === 'file' && region.name.endsWith('.pmtiles')) { @@ -433,7 +433,7 @@ export class MapService implements IMapService { /* * Gets the appropriate public URL for a map asset depending on environment */ - private getPublicFileBaseUrl(specifiedHost: string | null, childPath: string): string { + private getPublicFileBaseUrl(specifiedHost: string | null, childPath: string, protocol: string = 'http'): string { function getHost() { try { const localUrlRaw = env.get('URL') @@ -447,7 +447,7 @@ export class MapService implements IMapService { } const host = specifiedHost || getHost() - const withProtocol = host.startsWith('http') ? host : `http://${host}` + const withProtocol = host.startsWith('http') ? host : `${protocol}://${host}` const baseUrlPath = process.env.NODE_ENV === 'production' ? childPath : urlJoin(this.mapStoragePath, childPath) diff --git a/admin/app/services/system_service.ts b/admin/app/services/system_service.ts index 396ff30..84157af 100644 --- a/admin/app/services/system_service.ts +++ b/admin/app/services/system_service.ts @@ -410,6 +410,117 @@ export class SystemService { } } + async getDebugInfo(): Promise { + const appVersion = SystemService.getAppVersion() + const environment = process.env.NODE_ENV || 'unknown' + + const [systemInfo, services, internetStatus, versionCheck] = await Promise.all([ + this.getSystemInfo(), + this.getServices({ installedOnly: false }), + this.getInternetStatus().catch(() => null), + this.checkLatestVersion().catch(() => null), + ]) + + const lines: string[] = [ + 'Project NOMAD Debug Info', + '========================', + `App Version: ${appVersion}`, + `Environment: ${environment}`, + ] + + if (systemInfo) { + const { cpu, mem, os, disk, fsSize, uptime, graphics } = systemInfo + + lines.push('') + lines.push('System:') + if (os.distro) lines.push(` OS: ${os.distro}`) + if (os.hostname) lines.push(` Hostname: ${os.hostname}`) + if (os.kernel) lines.push(` Kernel: ${os.kernel}`) + if (os.arch) lines.push(` Architecture: ${os.arch}`) + if (uptime?.uptime) lines.push(` Uptime: ${this._formatUptime(uptime.uptime)}`) + + lines.push('') + lines.push('Hardware:') + if (cpu.brand) { + lines.push(` CPU: ${cpu.brand} (${cpu.cores} cores)`) + } + if (mem.total) { + const total = this._formatBytes(mem.total) + const used = this._formatBytes(mem.total - (mem.available || 0)) + const available = this._formatBytes(mem.available || 0) + lines.push(` RAM: ${total} total, ${used} used, ${available} available`) + } + if (graphics.controllers && graphics.controllers.length > 0) { + for (const gpu of graphics.controllers) { + const vram = gpu.vram ? ` (${gpu.vram} MB VRAM)` : '' + lines.push(` GPU: ${gpu.model}${vram}`) + } + } else { + lines.push(' GPU: None detected') + } + + // Disk info — try disk array first, fall back to fsSize + const diskEntries = disk.filter((d) => d.totalSize > 0) + if (diskEntries.length > 0) { + for (const d of diskEntries) { + const size = this._formatBytes(d.totalSize) + const type = d.tran?.toUpperCase() || (d.rota ? 'HDD' : 'SSD') + lines.push(` Disk: ${size}, ${Math.round(d.percentUsed)}% used, ${type}`) + } + } else if (fsSize.length > 0) { + const realFs = fsSize.filter((f) => f.fs.startsWith('/dev/')) + const seen = new Set() + for (const f of realFs) { + if (seen.has(f.size)) continue + seen.add(f.size) + lines.push(` Disk: ${this._formatBytes(f.size)}, ${Math.round(f.use)}% used`) + } + } + } + + const installed = services.filter((s) => s.installed) + lines.push('') + if (installed.length > 0) { + lines.push('Installed Services:') + for (const svc of installed) { + lines.push(` ${svc.friendly_name} (${svc.service_name}): ${svc.status}`) + } + } else { + lines.push('Installed Services: None') + } + + if (internetStatus !== null) { + lines.push('') + lines.push(`Internet Status: ${internetStatus ? 'Online' : 'Offline'}`) + } + + if (versionCheck?.success) { + const updateMsg = versionCheck.updateAvailable + ? `Yes (${versionCheck.latestVersion} available)` + : `No (${versionCheck.currentVersion} is latest)` + lines.push(`Update Available: ${updateMsg}`) + } + + return lines.join('\n') + } + + private _formatUptime(seconds: number): string { + const days = Math.floor(seconds / 86400) + const hours = Math.floor((seconds % 86400) / 3600) + const minutes = Math.floor((seconds % 3600) / 60) + if (days > 0) return `${days}d ${hours}h ${minutes}m` + if (hours > 0) return `${hours}h ${minutes}m` + return `${minutes}m` + } + + private _formatBytes(bytes: number, decimals = 1): string { + if (bytes === 0) return '0 Bytes' + const k = 1024 + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return parseFloat((bytes / Math.pow(k, i)).toFixed(decimals)) + ' ' + sizes[i] + } + async updateSetting(key: KVStoreKey, value: any): Promise { if ((value === '' || value === undefined || value === null) && KV_STORE_SCHEMA[key] === 'string') { await KVStore.clearValue(key) @@ -468,10 +579,21 @@ export class SystemService { return [] } + // Deduplicate: same device path mounted in multiple places (Docker bind-mounts) + // Keep the entry with the largest size — that's the real partition + const deduped = new Map() + for (const entry of fsSize) { + const existing = deduped.get(entry.fs) + if (!existing || entry.size > existing.size) { + deduped.set(entry.fs, entry) + } + } + const dedupedFsSize = Array.from(deduped.values()) + return diskLayout.blockdevices .filter((disk) => disk.type === 'disk') // Only physical disks .map((disk) => { - const filesystems = getAllFilesystems(disk, fsSize) + const filesystems = getAllFilesystems(disk, dedupedFsSize) // Across all partitions const totalUsed = filesystems.reduce((sum, p) => sum + (p.used || 0), 0) diff --git a/admin/app/utils/downloads.ts b/admin/app/utils/downloads.ts index 7c36378..1c26a74 100644 --- a/admin/app/utils/downloads.ts +++ b/admin/app/utils/downloads.ts @@ -88,10 +88,29 @@ export async function doResumableDownload({ let lastProgressTime = Date.now() let lastDownloadedBytes = startByte + // Stall detection: if no data arrives for 5 minutes, abort the download + const STALL_TIMEOUT_MS = 5 * 60 * 1000 + let stallTimer: ReturnType | null = null + + const clearStallTimer = () => { + if (stallTimer) { + clearTimeout(stallTimer) + stallTimer = null + } + } + + const resetStallTimer = () => { + clearStallTimer() + stallTimer = setTimeout(() => { + cleanup(new Error('Download stalled - no data received for 5 minutes')) + }, STALL_TIMEOUT_MS) + } + // Progress tracking stream to monitor data flow const progressStream = new Transform({ transform(chunk: Buffer, _: any, callback: Function) { downloadedBytes += chunk.length + resetStallTimer() // Update progress tracking const now = Date.now() @@ -118,6 +137,7 @@ export async function doResumableDownload({ // Handle errors and cleanup const cleanup = (error?: Error) => { + clearStallTimer() progressStream.destroy() response.data.destroy() writeStream.destroy() @@ -136,6 +156,7 @@ export async function doResumableDownload({ }) writeStream.on('finish', async () => { + clearStallTimer() if (onProgress) { onProgress({ downloadedBytes, @@ -151,7 +172,8 @@ export async function doResumableDownload({ resolve(filepath) }) - // Pipe: response -> progressStream -> writeStream + // Start stall timer and pipe: response -> progressStream -> writeStream + resetStallTimer() response.data.pipe(progressStream).pipe(writeStream) }) } diff --git a/admin/app/utils/fs.ts b/admin/app/utils/fs.ts index 7cc3ba8..59bd5c5 100644 --- a/admin/app/utils/fs.ts +++ b/admin/app/utils/fs.ts @@ -138,14 +138,13 @@ export function matchesDevice(fsPath: string, deviceName: string): boolean { // Remove /dev/ and /dev/mapper/ prefixes const normalized = fsPath.replace('/dev/mapper/', '').replace('/dev/', '') - // Direct match + // Direct match (covers /dev/sda1 ↔ sda1, /dev/nvme0n1p1 ↔ nvme0n1p1) if (normalized === deviceName) { return true } - // LVM volumes use dashes instead of slashes - // e.g., ubuntu--vg-ubuntu--lv matches the device name - if (fsPath.includes(deviceName)) { + // LVM/device-mapper: e.g., /dev/mapper/ubuntu--vg-ubuntu--lv contains "ubuntu--lv" + if (fsPath.startsWith('/dev/mapper/') && fsPath.includes(deviceName)) { return true } diff --git a/admin/commands/queue/work.ts b/admin/commands/queue/work.ts index e39fdbf..453268d 100644 --- a/admin/commands/queue/work.ts +++ b/admin/commands/queue/work.ts @@ -65,8 +65,23 @@ export default class QueueWork extends BaseCommand { } ) - worker.on('failed', (job, err) => { + worker.on('failed', async (job, err) => { this.logger.error(`[${queueName}] Job failed: ${job?.id}, Error: ${err.message}`) + + // If this was a Wikipedia download, mark it as failed in the DB + if (job?.data?.filetype === 'zim' && job?.data?.url?.includes('wikipedia_en_')) { + try { + const { DockerService } = await import('#services/docker_service') + const { ZimService } = await import('#services/zim_service') + const dockerService = new DockerService() + const zimService = new ZimService(dockerService) + await zimService.onWikipediaDownloadComplete(job.data.url, false) + } catch (e: any) { + this.logger.error( + `[${queueName}] Failed to update Wikipedia status: ${e.message}` + ) + } + } }) worker.on('completed', (job) => { diff --git a/admin/config/logger.ts b/admin/config/logger.ts index 59aa141..981e167 100644 --- a/admin/config/logger.ts +++ b/admin/config/logger.ts @@ -18,7 +18,7 @@ const loggerConfig = defineConfig({ targets: targets() .pushIf(!app.inProduction, targets.pretty()) - .pushIf(app.inProduction, targets.file({ destination: "/app/storage/logs/admin.log" })) + .pushIf(app.inProduction, targets.file({ destination: "/app/storage/logs/admin.log", mkdir: true })) .toArray(), }, }, diff --git a/admin/config/transmit.ts b/admin/config/transmit.ts index f8862d7..43f1d42 100644 --- a/admin/config/transmit.ts +++ b/admin/config/transmit.ts @@ -3,7 +3,7 @@ import { defineConfig } from '@adonisjs/transmit' import { redis } from '@adonisjs/transmit/transports' export default defineConfig({ - pingInterval: false, + pingInterval: '30s', transport: { driver: redis({ host: env.get('REDIS_HOST'), diff --git a/admin/constants/kv_store.ts b/admin/constants/kv_store.ts index 7cae751..69872ff 100644 --- a/admin/constants/kv_store.ts +++ b/admin/constants/kv_store.ts @@ -1,3 +1,3 @@ import { KVStoreKey } from "../types/kv_store.js"; -export const SETTINGS_KEYS: KVStoreKey[] = ['chat.suggestionsEnabled', 'chat.lastModel', 'ui.hasVisitedEasySetup', 'system.earlyAccess', 'ai.assistantCustomName']; \ No newline at end of file +export const SETTINGS_KEYS: KVStoreKey[] = ['chat.suggestionsEnabled', 'chat.lastModel', 'ui.hasVisitedEasySetup', 'ui.theme', 'system.earlyAccess', 'ai.assistantCustomName']; \ No newline at end of file diff --git a/admin/docs/faq.md b/admin/docs/faq.md index 50c75b3..aa8aac1 100644 --- a/admin/docs/faq.md +++ b/admin/docs/faq.md @@ -13,12 +13,12 @@ No — that's the whole point. Once your content is downloaded, everything works ### What hardware do I need? N.O.M.A.D. is designed for capable hardware, especially if you want to use the AI features. Recommended: -- Modern multi-core CPU +- Modern multi-core CPU (AMD Ryzen 7 with Radeon graphics is the community sweet spot) - 16GB+ RAM (32GB+ for best AI performance) -- SSD storage (size depends on content — 500GB minimum, 2TB+ recommended) +- SSD storage (size depends on content — 500GB minimum, 1TB+ recommended) - NVIDIA or AMD GPU recommended for faster AI responses -**For detailed build recommendations at three price points ($200–$800+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** +**For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** ### How much storage do I need? It depends on what you download: diff --git a/admin/docs/release-notes.md b/admin/docs/release-notes.md index 74583e6..5f74a57 100644 --- a/admin/docs/release-notes.md +++ b/admin/docs/release-notes.md @@ -1,5 +1,38 @@ # Release Notes +## Version 1.30.0 - March 20, 2026 + +### Features +- **Night Ops**: Added our most requested feature — a dark mode theme for the Command Center interface! Activate it from the footer and enjoy the sleek new look during your late-night missions. Thanks @chriscrosstalk for the contribution! +- **Debug Info**: Added a new "Debug Info" modal accessible from the footer that provides detailed system and application information for troubleshooting and support. Thanks @chriscrosstalk for the contribution! +- **Support the Project**: Added a new "Support the Project" page in settings with links to community resources, donation options, and ways to contribute. +- **Install**: The main Nomad image is now fully self-contained and directly usable with Docker Compose, allowing for more flexible and customizable installations without relying on external scripts. The image remains fully backwards compatible with existing installations, and the install script has been updated to reflect the simpler deployment process. + +### Bug Fixes +- **Settings**: Storage usage display now prefers real block devices over tempfs. Thanks @Bortlesboat for the fix! +- **Settings**: Fixed an issue where device matching and mount entry deduplication logic could cause incorrect storage usage reporting and missing devices in storage displays. +- **Maps**: The Maps page now respects the request protocol (http vs https) to ensure map tiles load correctly. Thanks @davidgross for the bug report! +- **Knowledge Base**: Fixed an issue where file embedding jobs could cause a retry storm if the Ollama service was unavailable. Thanks @skyam25 for the bug report! +- **Curated Collections**: Fixed some broken links in the curated collections definitions (maps and ZIM files) that were causing some resources to fail to download. +- **Easy Setup**: Fixed an issue where the "Start Here" badge would persist even after visiting the Easy Setup Wizard for the first time. Thanks @chriscrosstalk for the fix! +- **UI**: Fixed an issue where the loading spinner could look strange in certain use cases. +- **System Updates**: Fixed an issue where the update banner would persist even after the system was updated successfully. Thanks @chriscrosstalk for the fix! +- **Performance**: Various small memory leak fixes and performance improvements across the UI to ensure a smoother experience. + +### Improvements +- **Ollama**: Improved GPU detection logic to ensure the latest GPU config is always passed to the Ollama container on update +- **Ollama**: The detected GPU type is now persisted in the database for more reliable configuration and troubleshooting across updates and restarts. Thanks @chriscrosstalk for the contribution! +- **Downloads**: Users can now dismiss failed download notifications to reduce clutter in the UI. Thanks @chriscrosstalk for the contribution! +- **Logging**: Changed the default log level to "info" to reduce noise and focus on important messages. Thanks @traxeon for the suggestion! +- **Logging**: Nomad's internal logger now creates it's own log directory on startup if it doesn't already exist to prevent errors on fresh installs where the logs directory hasn't been created yet. +- **Dozzle**: Dozzle shell access and container actions are now disabled by default. Thanks @traxeon for the recommendation! +- **MySQL & Redis**: Removed port exposure to host by default for improved security. Ports can still be exposed manually if needed. Thanks @traxeon for the recommendation! +- **Dependencies**: Various dependency updates to close security vulnerabilities and improve stability +- **Utility Scripts**: Added a check for the expected Docker Compose version (v2) in all utility scripts to provide clearer error messages and guidance if the environment is not set up correctly. +- **Utility Scripts**: Added an additional warning to the installation script to inform about potential overwriting of existing customized configurations and the importance of backing up data before running the installation script again. +- **Documentation**: Updated installation instructions to reflect the new option for manual deployment via Docker Compose without the install script. + + ## Version 1.29.0 - March 11, 2026 ### Features diff --git a/admin/docs/security-audit-v1.md b/admin/docs/security-audit-v1.md deleted file mode 100644 index 9638df0..0000000 --- a/admin/docs/security-audit-v1.md +++ /dev/null @@ -1,281 +0,0 @@ -# Project NOMAD Security Audit Report - -**Date:** 2026-03-08 -**Version audited:** v1.28.0 (main branch) -**Auditor:** Claude Code (automated + manual review) -**Target:** Pre-launch security review - ---- - -## Executive Summary - -Project NOMAD's codebase is **reasonably clean for a LAN appliance**, with no critical authentication bypasses or remote code execution vulnerabilities. However, there are **4 findings that should be fixed before public launch** — all are straightforward path traversal and SSRF issues with known fix patterns already used elsewhere in the codebase. - -| Severity | Count | Summary | -|----------|-------|---------| -| **HIGH** | 4 | Path traversal (3), SSRF (1) | -| **MEDIUM** | 5 | Dozzle shell, unvalidated settings read, content update URL injection, verbose errors, no rate limiting | -| **LOW** | 5 | CSRF disabled, CORS wildcard, debug logging, npm dep CVEs, hardcoded HMAC | -| **INFO** | 2 | No auth by design, Docker socket exposure by design | - ---- - -## Scans Performed - -| Scan | Tool | Result | -|------|------|--------| -| Dependency audit | `npm audit` | 2 CVEs (1 high, 1 moderate) | -| Secret scan | Manual grep (passwords, keys, tokens, certs) | Clean — all secrets from env vars | -| SAST | Semgrep (security-audit, OWASP, nodejs rulesets) | 0 findings (AdonisJS not in rulesets) | -| Docker config review | Manual review of compose, Dockerfiles, scripts | 2 actionable findings | -| Code review | Manual review of services, controllers, validators | 4 path traversal + 1 SSRF | -| API endpoint audit | Manual review of all 60+ routes | Attack surface documented | -| DAST (OWASP ZAP) | Skipped — Docker Desktop not running | Recommended as follow-up | - ---- - -## FIX BEFORE LAUNCH - -### 1. Path Traversal — ZIM File Delete (HIGH) - -**File:** `admin/app/services/zim_service.ts:329-342` -**Endpoint:** `DELETE /api/zim/:filename` - -The `filename` parameter flows into `path.join()` with no directory containment check. An attacker can delete `.zim` files outside the storage directory: - -``` -DELETE /api/zim/..%2F..%2Fsome-file.zim -``` - -**Fix:** Resolve the full path and verify it starts with the expected storage directory: - -```typescript -async delete(file: string): Promise { - let fileName = file - if (!fileName.endsWith('.zim')) { - fileName += '.zim' - } - - const basePath = join(process.cwd(), ZIM_STORAGE_PATH) - const fullPath = resolve(basePath, fileName) - - // Prevent path traversal - if (!fullPath.startsWith(basePath)) { - throw new Error('Invalid filename') - } - - // ... rest of delete logic -} -``` - -This pattern is already used correctly in `rag_service.ts:deleteFileBySource()`. - ---- - -### 2. Path Traversal — Map File Delete (HIGH) - -**File:** `admin/app/services/map_service.ts` (delete method) -**Endpoint:** `DELETE /api/maps/:filename` - -Identical pattern to the ZIM delete. Same fix — resolve path, verify `startsWith(basePath)`. - ---- - -### 3. Path Traversal — Documentation Read (HIGH) - -**File:** `admin/app/services/docs_service.ts:61-83` -**Endpoint:** `GET /docs/:slug` - -The `slug` parameter flows into `path.join(this.docsPath, filename)` with no containment check. An attacker can read arbitrary `.md` files on the filesystem: - -``` -GET /docs/..%2F..%2F..%2Fetc%2Fpasswd -``` - -Limited by the mandatory `.md` extension, but could still read sensitive markdown files outside the docs directory (like CLAUDE.md, README.md, etc.). - -**Fix:** - -```typescript -const basePath = this.docsPath -const fullPath = path.resolve(basePath, filename) - -if (!fullPath.startsWith(path.resolve(basePath))) { - throw new Error('Invalid document slug') -} -``` - ---- - -### 4. SSRF — Download Endpoints (HIGH) - -**File:** `admin/app/validators/common.ts` -**Endpoints:** `POST /api/zim/download-remote`, `POST /api/maps/download-remote`, `POST /api/maps/download-base-assets`, `POST /api/maps/download-remote-preflight` - -The download endpoints accept user-supplied URLs and the server fetches from them. Without validation, an attacker on the LAN (or via CSRF since `shield.ts` disables CSRF protection) could make NOMAD fetch from co-located services: -- `http://localhost:3306` (MySQL) -- `http://localhost:6379` (Redis) -- `http://169.254.169.254/` (cloud metadata — if NOMAD is ever cloud-hosted) - -**Fix:** Added `assertNotPrivateUrl()` that blocks loopback and link-local addresses before any download is initiated. Called in all download controllers. - -**Scope note:** RFC1918 private addresses (10.x, 172.16-31.x, 192.168.x) are intentionally **allowed** because NOMAD is a LAN appliance and users may host content mirrors on their local network. The `require_tld: false` VineJS option is preserved so URLs like `http://my-nas:8080/file.zim` remain valid. - -```typescript -const blockedPatterns = [ - /^localhost$/, - /^127\.\d+\.\d+\.\d+$/, - /^0\.0\.0\.0$/, - /^169\.254\.\d+\.\d+$/, // Link-local / cloud metadata - /^\[::1\]$/, - /^\[?fe80:/i, // IPv6 link-local -] -``` - ---- - -## FIX AFTER LAUNCH (Medium Priority) - -### 5. Dozzle Web Shell Access (MEDIUM) - -**File:** `install/management_compose.yaml:56` - -```yaml -- DOZZLE_ENABLE_SHELL=true -``` - -Dozzle on port 9999 is bound to all interfaces with shell access enabled. Anyone on the LAN can open a web shell into containers, including `nomad_admin` which has the Docker socket mounted. This creates a path from "LAN access" → "container shell" → "Docker socket" → "host root." - -**Fix:** Set `DOZZLE_ENABLE_SHELL=false`. Log viewing and container restart functionality are preserved. - ---- - -### 6. Unvalidated Settings Key Read (MEDIUM) - -**File:** `admin/app/controllers/settings_controller.ts` -**Endpoint:** `GET /api/system/settings?key=...` - -The `updateSetting` endpoint validates the key against an enum, but `getSetting` accepts any arbitrary key string. Currently harmless since the KV store only contains settings data, but could leak sensitive info if new keys are added. - -**Fix:** Apply the same enum validation to the read endpoint. - ---- - -### 7. Content Update URL Injection (MEDIUM) - -**File:** `admin/app/validators/common.ts:72-88` -**Endpoint:** `POST /api/content-updates/apply` - -The `download_url` comes directly from the client request body. An attacker can supply any URL and NOMAD will download from it. The URL should be looked up server-side from the content manifest instead. - -**Fix:** Validate `download_url` against the cached manifest, or apply the same loopback/link-local protections as finding #4 (already applied in this PR). - ---- - -### 8. Verbose Error Messages (MEDIUM) - -**Files:** `rag_controller.ts`, `docker_service.ts`, `system_update_service.ts` - -Several controllers return raw `error.message` in API responses, potentially leaking internal paths, stack details, or Docker error messages to the client. - -**Fix:** Return generic error messages in production. Log the details server-side. - ---- - -### 9. No Rate Limiting (MEDIUM) - -Zero rate limiting across all 60+ endpoints. While acceptable for a LAN appliance, some endpoints are particularly abusable: -- `POST /api/benchmark/run` — spins up Docker containers for CPU/memory/disk stress tests -- `POST /api/rag/upload` — file uploads (20MB limit per bodyparser config) -- `POST /api/system/services/affect` — can stop/start any service repeatedly - -**Fix:** Consider basic rate limiting on the benchmark and service control endpoints (e.g., 1 benchmark per minute, service actions throttled to prevent rapid cycling). - ---- - -## LOW PRIORITY / ACCEPTED RISK - -### 10. CSRF Protection Disabled (LOW) - -**File:** `admin/config/shield.ts` - -CSRF is disabled, meaning any website a LAN user visits could fire requests at NOMAD's API. This amplifies findings 1-4 — path traversal and SSRF could be triggered by a malicious webpage, not just direct LAN access. - -**Assessment:** Acceptable for a LAN appliance with no auth system. Enabling CSRF would require significant auth/session infrastructure changes. - -### 11. CORS Wildcard with Credentials (LOW) - -**File:** `admin/config/cors.ts` - -`origin: ['*']` with `credentials: true`. Standard for LAN appliances. - -### 12. npm Dependency CVEs (LOW) - -``` -tar <=7.5.9 HIGH Hardlink Path Traversal via Drive-Relative Linkpath -ajv <6.14.0 MODERATE ReDoS when using $data option -``` - -Both fixable via `npm audit fix`. Low practical risk since these are build/dev dependencies not directly exposed to user input. - -**Fix:** Run `npm audit fix` and commit the updated lockfile. - -### 13. Hardcoded HMAC Secret (LOW) - -**File:** `admin/app/services/benchmark_service.ts:35` - -The benchmark HMAC secret `'nomad-benchmark-v1-2026'` is hardcoded in open-source code. Anyone can forge leaderboard submissions. - -**Assessment:** Accepted risk. The leaderboard has compensating controls (rate limiting, plausibility validation, hardware fingerprint dedup). The secret stops casual abuse, not determined attackers. - -### 14. Production Debug Logging (LOW) - -**File:** `install/management_compose.yaml:22` - -```yaml -LOG_LEVEL=debug -``` - -Debug logging in production can expose internal state in log files. - -**Fix:** Change to `LOG_LEVEL=info` for production compose template. - ---- - -## INFORMATIONAL (By Design) - -### No Authentication - -All 60+ API endpoints are unauthenticated. This is by design — NOMAD is a LAN appliance and the network boundary is the access control. Issue #73 tracks the edge case of public IP interfaces. - -### Docker Socket Exposure - -The `nomad_admin` container mounts `/var/run/docker.sock`. This is necessary for NOMAD's core functionality (managing Docker containers). The socket is not exposed to the network — only the admin container can use it. - ---- - -## Recommendations Summary - -| Priority | Action | Effort | -|----------|--------|--------| -| **Before launch** | Fix 3 path traversals (ZIM delete, Map delete, Docs read) | ~30 min | -| **Before launch** | Add SSRF protection to download URL validators | ~1 hour | -| **Soon after** | Disable Dozzle shell access | 1 line change | -| **Soon after** | Validate settings key on read endpoint | ~15 min | -| **Soon after** | Sanitize error messages in responses | ~30 min | -| **Nice to have** | Run `npm audit fix` | 5 min | -| **Nice to have** | Change production log level to info | 1 line change | -| **Follow-up** | OWASP ZAP dynamic scan against NOMAD3 | ~1 hour | - ---- - -## What Went Right - -- **No hardcoded secrets** — all credentials properly use environment variables -- **No command injection** — Docker operations use the Docker API (dockerode), not shell commands -- **No SQL injection** — all database queries use AdonisJS Lucid ORM with parameterized queries -- **No eval/Function** — no dynamic code execution anywhere -- **RAG service already has the correct fix pattern** — `deleteFileBySource()` uses `resolve()` + `startsWith()` for path containment -- **Install script generates strong random passwords** — uses `/dev/urandom` for APP_KEY and DB passwords -- **No privileged containers** — GPU passthrough uses DeviceRequests, not --privileged -- **Health checks don't leak data** — internal-only calls diff --git a/admin/inertia/app/app.tsx b/admin/inertia/app/app.tsx index 2eabe10..b71ab64 100644 --- a/admin/inertia/app/app.tsx +++ b/admin/inertia/app/app.tsx @@ -11,6 +11,7 @@ import { generateUUID } from '~/lib/util' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' import { ReactQueryDevtools } from '@tanstack/react-query-devtools' import NotificationsProvider from '~/providers/NotificationProvider' +import { ThemeProvider } from '~/providers/ThemeProvider' import { UsePageProps } from '../../types/system' const appName = import.meta.env.VITE_APP_NAME || 'Project N.O.M.A.D.' @@ -38,14 +39,16 @@ createInertiaApp({ const showDevtools = ['development', 'staging'].includes(environment) createRoot(el).render( - - - - - {showDevtools && } - - - + + + + + + {showDevtools && } + + + + ) }, diff --git a/admin/inertia/components/ActiveDownloads.tsx b/admin/inertia/components/ActiveDownloads.tsx index 5eb30f4..9661f22 100644 --- a/admin/inertia/components/ActiveDownloads.tsx +++ b/admin/inertia/components/ActiveDownloads.tsx @@ -2,6 +2,8 @@ import useDownloads, { useDownloadsProps } from '~/hooks/useDownloads' import HorizontalBarChart from './HorizontalBarChart' import { extractFileName } from '~/lib/util' import StyledSectionHeader from './StyledSectionHeader' +import { IconAlertTriangle, IconX } from '@tabler/icons-react' +import api from '~/lib/api' interface ActiveDownloadProps { filetype?: useDownloadsProps['filetype'] @@ -9,7 +11,12 @@ interface ActiveDownloadProps { } const ActiveDownloads = ({ filetype, withHeader = false }: ActiveDownloadProps) => { - const { data: downloads } = useDownloads({ filetype }) + const { data: downloads, invalidate } = useDownloads({ filetype }) + + const handleDismiss = async (jobId: string) => { + await api.removeDownloadJob(jobId) + invalidate() + } return ( <> @@ -17,22 +24,50 @@ const ActiveDownloads = ({ filetype, withHeader = false }: ActiveDownloadProps)
{downloads && downloads.length > 0 ? ( downloads.map((download) => ( -
- +
+ {download.status === 'failed' ? ( +
+ +
+

+ {extractFileName(download.filepath) || download.url} +

+

+ Download failed{download.failedReason ? `: ${download.failedReason}` : ''} +

+
+ +
+ ) : ( + + )}
)) ) : ( -

No active downloads

+

No active downloads

)}
diff --git a/admin/inertia/components/ActiveEmbedJobs.tsx b/admin/inertia/components/ActiveEmbedJobs.tsx index 5e6914e..9da78bc 100644 --- a/admin/inertia/components/ActiveEmbedJobs.tsx +++ b/admin/inertia/components/ActiveEmbedJobs.tsx @@ -35,7 +35,7 @@ const ActiveEmbedJobs = ({ withHeader = false }: ActiveEmbedJobsProps) => {
)) ) : ( -

No files are currently being processed

+

No files are currently being processed

)} diff --git a/admin/inertia/components/ActiveModelDownloads.tsx b/admin/inertia/components/ActiveModelDownloads.tsx index 1727fe5..d1d0b85 100644 --- a/admin/inertia/components/ActiveModelDownloads.tsx +++ b/admin/inertia/components/ActiveModelDownloads.tsx @@ -33,7 +33,7 @@ const ActiveModelDownloads = ({ withHeader = false }: ActiveModelDownloadsProps) )) ) : ( -

No active model downloads

+

No active model downloads

)} diff --git a/admin/inertia/components/Alert.tsx b/admin/inertia/components/Alert.tsx index ceff2a0..40fca57 100644 --- a/admin/inertia/components/Alert.tsx +++ b/admin/inertia/components/Alert.tsx @@ -43,7 +43,7 @@ export default function Alert({ } const getIconColor = () => { - if (variant === 'solid') return 'text-desert-white' + if (variant === 'solid') return 'text-white' switch (type) { case 'warning': return 'text-desert-orange' @@ -81,15 +81,15 @@ export default function Alert({ case 'solid': variantStyles.push( type === 'warning' - ? 'bg-desert-orange text-desert-white border border-desert-orange-dark' + ? 'bg-desert-orange text-white border border-desert-orange-dark' : type === 'error' - ? 'bg-desert-red text-desert-white border border-desert-red-dark' + ? 'bg-desert-red text-white border border-desert-red-dark' : type === 'success' - ? 'bg-desert-olive text-desert-white border border-desert-olive-dark' + ? 'bg-desert-olive text-white border border-desert-olive-dark' : type === 'info' - ? 'bg-desert-green text-desert-white border border-desert-green-dark' + ? 'bg-desert-green text-white border border-desert-green-dark' : type === 'info-inverted' - ? 'bg-desert-tan text-desert-white border border-desert-tan-dark' + ? 'bg-desert-tan text-white border border-desert-tan-dark' : '' ) return classNames(baseStyles, 'shadow-lg', ...variantStyles) @@ -112,7 +112,7 @@ export default function Alert({ } const getTitleColor = () => { - if (variant === 'solid') return 'text-desert-white' + if (variant === 'solid') return 'text-white' switch (type) { case 'warning': @@ -131,7 +131,7 @@ export default function Alert({ } const getMessageColor = () => { - if (variant === 'solid') return 'text-desert-white text-opacity-90' + if (variant === 'solid') return 'text-white text-opacity-90' switch (type) { case 'warning': @@ -149,7 +149,7 @@ export default function Alert({ const getCloseButtonStyles = () => { if (variant === 'solid') { - return 'text-desert-white hover:text-desert-white hover:bg-black hover:bg-opacity-20' + return 'text-white hover:text-white hover:bg-black hover:bg-opacity-20' } switch (type) { diff --git a/admin/inertia/components/BouncingDots.tsx b/admin/inertia/components/BouncingDots.tsx index e01c3cc..64027f0 100644 --- a/admin/inertia/components/BouncingDots.tsx +++ b/admin/inertia/components/BouncingDots.tsx @@ -9,18 +9,18 @@ interface BouncingDotsProps { export default function BouncingDots({ text, containerClassName, textClassName }: BouncingDotsProps) { return (
- {text} + {text} diff --git a/admin/inertia/components/DebugInfoModal.tsx b/admin/inertia/components/DebugInfoModal.tsx new file mode 100644 index 0000000..63029cb --- /dev/null +++ b/admin/inertia/components/DebugInfoModal.tsx @@ -0,0 +1,103 @@ +import { useEffect, useState } from 'react' +import { IconBug, IconCopy, IconCheck } from '@tabler/icons-react' +import StyledModal from './StyledModal' +import api from '~/lib/api' + +interface DebugInfoModalProps { + open: boolean + onClose: () => void +} + +export default function DebugInfoModal({ open, onClose }: DebugInfoModalProps) { + const [debugText, setDebugText] = useState('') + const [loading, setLoading] = useState(false) + const [copied, setCopied] = useState(false) + + useEffect(() => { + if (!open) return + + setLoading(true) + setCopied(false) + + api.getDebugInfo().then((text) => { + if (text) { + const browserLine = `Browser: ${navigator.userAgent}` + setDebugText(text + '\n' + browserLine) + } else { + setDebugText('Failed to load debug info. Please try again.') + } + setLoading(false) + }).catch(() => { + setDebugText('Failed to load debug info. Please try again.') + setLoading(false) + }) + }, [open]) + + const handleCopy = async () => { + try { + await navigator.clipboard.writeText(debugText) + } catch { + // Fallback for older browsers + const textarea = document.querySelector('#debug-info-text') + if (textarea) { + textarea.select() + document.execCommand('copy') + } + } + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } + + return ( + } + cancelText="Close" + onCancel={onClose} + > +

+ This is non-sensitive system info you can share when reporting issues. + No passwords, IPs, or API keys are included. +

+ +