diff --git a/.github/workflows/build-admin-on-pr.yml b/.github/workflows/build-admin-on-pr.yml new file mode 100644 index 0000000..3afa65f --- /dev/null +++ b/.github/workflows/build-admin-on-pr.yml @@ -0,0 +1,25 @@ +name: Build Admin + +on: pull_request + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Node.js + uses: actions/setup-node@v6 + with: + node-version: '24' + cache: 'npm' + + - name: Install dependencies + run: npm ci + working-directory: ./admin + + - name: Run build + run: npm run build + working-directory: ./admin \ No newline at end of file diff --git a/.github/workflows/build-disk-collector.yml b/.github/workflows/build-disk-collector.yml index 27085fd..0f0fe68 100644 --- a/.github/workflows/build-disk-collector.yml +++ b/.github/workflows/build-disk-collector.yml @@ -46,11 +46,12 @@ jobs: suffix: arm64 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + uses: actions/checkout@v6 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: diff --git a/.github/workflows/build-primary-image.yml b/.github/workflows/build-primary-image.yml index 55f5721..ee45d75 100644 --- a/.github/workflows/build-primary-image.yml +++ b/.github/workflows/build-primary-image.yml @@ -46,11 +46,12 @@ jobs: suffix: arm64 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + uses: actions/checkout@v6 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: diff --git a/.github/workflows/build-sidecar-updater.yml b/.github/workflows/build-sidecar-updater.yml index 5ad28e8..898c0f7 100644 --- a/.github/workflows/build-sidecar-updater.yml +++ b/.github/workflows/build-sidecar-updater.yml @@ -46,11 +46,12 @@ jobs: suffix: arm64 steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + uses: actions/checkout@v6 - name: Log in to GitHub Container Registry uses: docker/login-action@v2 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 54150a3..90bd71c 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,7 @@ jobs: newVersion: ${{ steps.semver.outputs.new_release_version }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 persist-credentials: false diff --git a/.github/workflows/validate-collection-urls.yml b/.github/workflows/validate-collection-urls.yml new file mode 100644 index 0000000..32ac30d --- /dev/null +++ b/.github/workflows/validate-collection-urls.yml @@ -0,0 +1,58 @@ +name: Validate Collection URLs + +on: + push: + paths: + - 'collections/**.json' + pull_request: + paths: + - 'collections/**.json' + +jobs: + validate-urls: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Extract and validate URLs + run: | + FAILED=0 + CHECKED=0 + FAILED_URLS="" + + # Recursively extract all non-null string URLs from every JSON file in collections/ + URLS=$(jq -r '.. | .url? | select(type == "string")' collections/*.json | sort -u) + + while IFS= read -r url; do + [ -z "$url" ] && continue + CHECKED=$((CHECKED + 1)) + printf "Checking: %s ... " "$url" + + # Use Range: bytes=0-0 to avoid downloading the full file. + # --max-filesize 1 aborts early if the server ignores the Range header + # and returns 200 with the full body. The HTTP status is still captured. + HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \ + --range 0-0 \ + --max-filesize 1 \ + --max-time 30 \ + --location \ + "$url") + + if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "206" ]; then + echo "OK ($HTTP_CODE)" + else + echo "FAILED ($HTTP_CODE)" + FAILED=$((FAILED + 1)) + FAILED_URLS="$FAILED_URLS\n - $url (HTTP $HTTP_CODE)" + fi + done <<< "$URLS" + + echo "" + echo "Checked $CHECKED URLs, $FAILED failed." + + if [ "$FAILED" -gt 0 ]; then + echo "" + echo "Broken URLs:" + printf "%b\n" "$FAILED_URLS" + exit 1 + fi diff --git a/Dockerfile b/Dockerfile index 27f3aed..c91f9ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,7 +45,14 @@ COPY --from=production-deps /app/node_modules /app/node_modules COPY --from=build /app/build /app # Copy root package.json for version info COPY package.json /app/version.json + +# Copy docs and README for access within the container COPY admin/docs /app/docs COPY README.md /app/README.md + +# Copy entrypoint script and ensure it's executable +COPY install/entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod +x /usr/local/bin/entrypoint.sh + EXPOSE 8080 -CMD ["node", "./bin/server.js"] \ No newline at end of file +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 0000000..7b2860d --- /dev/null +++ b/FAQ.md @@ -0,0 +1,96 @@ +# Frequently Asked Questions (FAQ) + +Find answers to some of the most common questions about Project N.O.M.A.D. + +## Can I customize the port(s) that NOMAD uses? + +Yes, you can customize the ports that NOMAD's core services (Command Center, MySQL, Redis) use. Please refer to the [Advanced Installation](README.md#advanced-installation) section of the README for more details on how to do this. + +Note: As of 3/24/2026, only the core services defined in the `docker-compose.yml` file currently support port customization - the installable applications (e.g. Ollama, Kiwix, etc.) do not yet support this, but we have multiple PR's in the works to add this feature for all installable applications in a future release. + +## Can I customize the storage location for NOMAD's data? + +Yes, you can customize the storage location for NOMAD's content by modifying the `docker-compose.yml` file to adjust the appropriate bind mounts to point to your desired storage location on your host machine. Please refer to the [Advanced Installation](README.md#advanced-installation) section of the README for more details on how to do this. + +## Can I run NOMAD on MAC, WSL2, or a non-Debian-based Distro? + +See [Why does NOMAD require a Debian-based OS?](#why-does-nomad-require-a-debian-based-os) + +## Why does NOMAD require a Debian-based OS? + +Project N.O.M.A.D. is currently designed to run on Debian-based Linux distributions (with Ubuntu being the recommended distro) because our installation scripts and Docker configurations are optimized for this environment. While it's technically possible to run the Docker containers on other operating systems that support Docker, we have not tested or optimized the installation process for non-Debian-based systems, so we cannot guarantee a smooth experience on those platforms at this time. + +Support for other operating systems will come in the future, but because our development resources are limited as a free and open-source project, we needed to prioritize our efforts and focus on a narrower set of supported platforms for the initial release. We chose Debian-based Linux as our starting point because it's widely used, easy to spin up, and provides a stable environment for running Docker containers. + +Community members have provided guides for running N.O.M.A.D. on other platforms (e.g. WSL2, Mac, etc.) in our Discord community and [Github Discussions](https://github.com/Crosstalk-Solutions/project-nomad/discussions), so if you're interested in running N.O.M.A.D. on a non-Debian-based system, we recommend checking there for any available resources or guides. However, keep in mind that if you choose to run N.O.M.A.D. on a non-Debian-based system, you may encounter issues that we won't be able to provide support for, and you may need to have a higher level of technical expertise to troubleshoot and resolve any problems that arise. + +## Can I run NOMAD on a Raspberry Pi or other ARM-based device? +Project N.O.M.A.D. is currently designed to run on x86-64 architecture, and we have not yet tested or optimized it for ARM-based devices like the Raspberry Pi (and have not published any official images for ARM architecture). + +Support for ARM-based devices is on our roadmap, but our initial focus was on x86-64 hardware due to its widespread use and compatibility with a wide range of applications. + +Community members have forked and published their own ARM-compatible images and installation guides for running N.O.M.A.D. on Raspberry Pi and other ARM-based devices in our Discord community and [Github Discussions](https://github.com/Crosstalk-Solutions/project-nomad/discussions), but these are not officially supported by the core development team, and we cannot guarantee their functionality or provide support for any issues that arise when using these community-created resources. + +## What are the hardware requirements for running NOMAD? + +Project N.O.M.A.D. itself is quite lightweight and can run on even modest x86-64 hardware, but the tools and resources you choose to install with N.O.M.A.D. will determine the specs required for your unique deployment. Please see the [Hardware Guide](https://www.projectnomad.us/hardware) for detailed build recommendations at various price points. + +## Does NOMAD support languages other than English? + +As of March 2026, Project N.O.M.A.D.'s UI is only available in English, and the majority of the tools and resources available through N.O.M.A.D. are also primarily in English. However, we have multi-language support on our roadmap for a future release, and we are actively working on adding support for additional languages both in the UI and in the available tools/resources. If you're interested in contributing to this effort, please check out our [CONTRIBUTING.md](CONTRIBUTING.md) file for guidelines on how to get involved. + +## What technologies is NOMAD built with? + +Project N.O.M.A.D. is built using a combination of technologies, including: +- **Docker:** for containerization of the Command Center and its dependencies +- **Node.js & TypeScript:** for the backend of the Command Center, particularly the [AdonisJS](https://adonisjs.com/) framework +- **React:** for the frontend of the Command Center, utilizing [Vite](https://vitejs.dev/) and [Inertia.js](https://inertiajs.com/) under the hood +- **MySQL:** for the Command Center's database +- **Redis:** for various caching, background jobs, "cron" tasks, and other internal processes within the Command Center + +NOMAD makes use of the Docker-outside-of-Docker ("DooD") pattern, which allows the Command Center to manage and orchestrate other Docker containers on the host machine without needing to run Docker itself inside a container. This approach provides better performance and compatibility with a wider range of host environments while still allowing for powerful container management capabilities through the Command Center's UI. + +## Can I run NOMAD if I have existing Docker containers on my machine? +Yes, you can safely run Project N.O.M.A.D. on a machine that already has existing Docker containers. NOMAD is designed to coexist with other Docker containers and will not interfere with them as long as there are no port conflicts or resource constraints. + +All of NOMAD's containers are prefixed with `nomad_` in their names, so they can be easily identified and managed separately from any other containers you may have running. Just make sure to review the ports that NOMAD's core services (Command Center, MySQL, Redis) use during installation and adjust them if necessary to avoid conflicts with your existing containers. + +## Why does NOMAD require access to the Docker socket? + +See [What technologies is NOMAD built with?](#what-technologies-is-nomad-built-with) + +## Do I have to install the AI features in NOMAD? + +No, the AI features in NOMAD (Ollama, Qdrant, custom RAG pipeline, etc.) are all optional and not required to use the core functionality of NOMAD. + +## Is NOMAD actually free? Are there any hidden costs? +Yes, Project N.O.M.A.D. is completely free and open-source software licensed under the Apache License 2.0. There are no hidden costs or fees associated with using NOMAD itself, and we don't have any plans to introduce "premium" features or paid tiers. + +Aside from the cost of the hardware you choose to run it on, there are no costs associated with using NOMAD. + +## Do you sell hardware or pre-built devices with NOMAD pre-installed? + +No, we do not sell hardware or pre-built devices with NOMAD pre-installed at this time. Project N.O.M.A.D. is a free and open-source software project, and we provide detailed installation instructions and hardware recommendations for users to set up their own NOMAD instances on compatible hardware of their choice. The tradeoff to this DIY approach is some additional setup time and technical know-how required on the user's end, but it also allows for greater flexibility and customization in terms of hardware selection and configuration to best suit each user's unique needs, budget, and preferences. + +## How quickly are issues resolved when reported? + +We strive to address and resolve issues as quickly as possible, but please keep in mind that Project N.O.M.A.D. is a free and open-source project maintained by a small team of volunteers. We prioritize issues based on their severity, impact on users, and the resources required to resolve them. Critical issues that affect a large number of users are typically addressed more quickly, while less severe issues may take longer to resolve. Aside from the development efforts needed to address the issue, we do our best to conduct thorough testing and validation to ensure that any fix we implement doesn't introduce new issues or regressions, which also adds to the time it takes to resolve an issue. + +We also encourage community involvement in troubleshooting and resolving issues, so if you encounter a problem, please consider checking our Discord community and Github Discussions for potential solutions or workarounds while we work on an official fix. + +## How often are new features added or updates released? + +We aim to release updates and new features on a regular basis, but the exact timing can vary based on the complexity of the features being developed, the resources available to our volunteer development team, and the feedback and needs of our community. We typically release smaller "patch" versions more frequently to address bugs and make minor improvements, while larger feature releases may take more time to develop and test before they're ready for release. + +## I opened a PR to contribute a new feature or fix a bug. How long does it usually take for PRs to be reviewed and merged? +We appreciate all contributions to the project and strive to review and merge pull requests (PRs) as quickly as possible. The time it takes for a PR to be reviewed and merged can vary based on several factors, including the complexity of the changes, the current workload of our maintainers, and the need for any additional testing or revisions. + +Because NOMAD is still a young project, some PRs (particularly those for new features) may take longer to review and merge as we prioritize building out the core functionality and ensuring stability before adding new features. However, we do our best to provide timely feedback on all PRs and keep contributors informed about the status of their contributions. + +## I have a question that isn't answered here. Where can I ask for help? + +If you have a question that isn't answered in this FAQ, please feel free to ask for help in our Discord community (https://discord.com/invite/crosstalksolutions) or on our Github Discussions page (https://github.com/Crosstalk-Solutions/project-nomad/discussions). + +## I have a suggestion for a new feature or improvement. How can I share it? + +We welcome and encourage suggestions for new features and improvements! We highly encourage sharing your ideas (or upvoting existing suggestions) on our public roadmap at https://roadmap.projectnomad.us, where we track new feature requests. This is the best way to ensure that your suggestion is seen by the development team and the community, and it also allows other community members to upvote and show support for your idea, which can help prioritize it for future development. \ No newline at end of file diff --git a/README.md b/README.md index 9de76ee..8057a8a 100644 --- a/README.md +++ b/README.md @@ -21,13 +21,16 @@ Project N.O.M.A.D. can be installed on any Debian-based operating system (we rec *Note: sudo/root privileges are required to run the install script* -#### Quick Install +### Quick Install (Debian-based OS Only) ```bash sudo apt-get update && sudo apt-get install -y curl && curl -fsSL https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/install_nomad.sh -o install_nomad.sh && sudo bash install_nomad.sh ``` Project N.O.M.A.D. is now installed on your device! Open a browser and navigate to `http://localhost:8080` (or `http://DEVICE_IP:8080`) to start exploring! +### Advanced Installation +For more control over the installation process, copy and paste the [Docker Compose template](https://raw.githubusercontent.com/Crosstalk-Solutions/project-nomad/refs/heads/main/install/management_compose.yaml) into a `docker-compose.yml` file and customize it to your liking (be sure to replace any placeholders with your actual values). Then, run `docker compose up -d` to start the Command Center and its dependencies. Note: this method is recommended for advanced users only, as it requires familiarity with Docker and manual configuration before starting. + ## How It Works N.O.M.A.D. is a management UI ("Command Center") and API that orchestrates a collection of containerized tools and resources via [Docker](https://www.docker.com/). It handles installation, configuration, and updates for everything — so you don't have to. @@ -82,10 +85,13 @@ To run LLM's and other included AI tools: - OS: Debian-based (Ubuntu recommended) - Stable internet connection (required during install only) -**For detailed build recommendations at three price points ($200–$800+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** +**For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** Again, Project N.O.M.A.D. itself is quite lightweight - it's the tools and resources you choose to install with N.O.M.A.D. that will determine the specs required for your unique deployment +## Frequently Asked Questions (FAQ) +For answers to common questions about Project N.O.M.A.D., please see our [FAQ](FAQ.md) page. + ## About Internet Usage & Privacy Project N.O.M.A.D. is designed for offline usage. An internet connection is only required during the initial installation (to download dependencies) and if you (the user) decide to download additional tools and resources at a later time. Otherwise, N.O.M.A.D. does not require an internet connection and has ZERO built-in telemetry. @@ -94,49 +100,20 @@ To test internet connectivity, N.O.M.A.D. attempts to make a request to Cloudfla ## About Security By design, Project N.O.M.A.D. is intended to be open and available without hurdles - it includes no authentication. If you decide to connect your device to a local network after install (e.g. for allowing other devices to access it's resources), you can block/open ports to control which services are exposed. -**Will authentication be added in the future?** Maybe. It's not currently a priority, but if there's enough demand for it, we may consider building in an optional authentication layer in a future release to support uses cases where multiple users need access to the same instance but with different permission levels (e.g. family use with parental controls, classroom use with teacher/admin accounts, etc.). For now, we recommend using network-level controls to manage access if you're planning to expose your N.O.M.A.D. instance to other devices on a local network. N.O.M.A.D. is not designed to be exposed directly to the internet, and we strongly advise against doing so unless you really know what you're doing, have taken appropriate security measures, and understand the risks involved. +**Will authentication be added in the future?** Maybe. It's not currently a priority, but if there's enough demand for it, we may consider building in an optional authentication layer in a future release to support uses cases where multiple users need access to the same instance but with different permission levels (e.g. family use with parental controls, classroom use with teacher/admin accounts, etc.). We have a suggestion for this on our public roadmap, so if this is something you'd like to see, please upvote it here: https://roadmap.projectnomad.us/posts/1/user-authentication-please-build-in-user-auth-with-admin-user-roles + +For now, we recommend using network-level controls to manage access if you're planning to expose your N.O.M.A.D. instance to other devices on a local network. N.O.M.A.D. is not designed to be exposed directly to the internet, and we strongly advise against doing so unless you really know what you're doing, have taken appropriate security measures, and understand the risks involved. ## Contributing -Contributions are welcome and appreciated! Please read this section fully to understand how to contribute to the project. - -### General Guidelines - -- **Open an issue first**: Before starting work on a new feature or bug fix, please open an issue to discuss your proposed changes. This helps ensure that your contribution aligns with the project's goals and avoids duplicate work. Title the issue clearly and provide a detailed description of the problem or feature you want to work on. -- **Fork the repository**: Click the "Fork" button at the top right of the repository page to create a copy of the project under your GitHub account. -- **Create a new branch**: In your forked repository, create a new branch for your work. Use a descriptive name for the branch that reflects the purpose of your changes (e.g., `fix/issue-123` or `feature/add-new-tool`). -- **Make your changes**: Implement your changes in the new branch. Follow the existing code style and conventions used in the project. Be sure to test your changes locally to ensure they work as expected. -- **Add Release Notes**: If your changes include new features, bug fixes, or improvements, please see the "Release Notes" section below to properly document your contribution for the next release. -- **Conventional Commits**: When committing your changes, please use conventional commit messages to provide clear and consistent commit history. The format is `(): `, where: - - `type` is the type of change (e.g., `feat` for new features, `fix` for bug fixes, `docs` for documentation changes, etc.) - - `scope` is an optional area of the codebase that your change affects (e.g., `api`, `ui`, `docs`, etc.) - - `description` is a brief summary of the change -- **Submit a pull request**: Once your changes are ready, submit a pull request to the main repository. Provide a clear description of your changes and reference any related issues. The project maintainers will review your pull request and may provide feedback or request changes before it can be merged. -- **Be responsive to feedback**: If the maintainers request changes or provide feedback on your pull request, please respond in a timely manner. Stale pull requests may be closed if there is no activity for an extended period. -- **Follow the project's code of conduct**: Please adhere to the project's code of conduct when interacting with maintainers and other contributors. Be respectful and considerate in your communications. -- **No guarantee of acceptance**: The project is community-driven, and all contributions are appreciated, but acceptance is not guaranteed. The maintainers will evaluate each contribution based on its quality, relevance, and alignment with the project's goals. -- **Thank you for contributing to Project N.O.M.A.D.!** Your efforts help make this project better for everyone. - -### Versioning -This project uses semantic versioning. The version is managed in the root `package.json` -and automatically updated by semantic-release. For simplicity's sake, the "project-nomad" image -uses the same version defined there instead of the version in `admin/package.json` (stays at 0.0.0), as it's the only published image derived from the code. - -### Release Notes -Human-readable release notes live in [`admin/docs/release-notes.md`](admin/docs/release-notes.md) and are displayed in the Command Center's built-in documentation. - -When working on changes, add a summary to the `## Unreleased` section at the top of that file under the appropriate heading: - -- **Features** — new user-facing capabilities -- **Bug Fixes** — corrections to existing behavior -- **Improvements** — enhancements, refactors, docs, or dependency updates - -Use the format `- **Area**: Description` to stay consistent with existing entries. When a release is triggered, CI automatically stamps the version and date, commits the update, and pushes the content to the GitHub release. +Contributions are welcome and appreciated! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on how to contribute to the project. ## Community & Resources - **Website:** [www.projectnomad.us](https://www.projectnomad.us) - Learn more about the project - **Discord:** [Join the Community](https://discord.com/invite/crosstalksolutions) - Get help, share your builds, and connect with other NOMAD users - **Benchmark Leaderboard:** [benchmark.projectnomad.us](https://benchmark.projectnomad.us) - See how your hardware stacks up against other NOMAD builds +- **Troubleshooting Guide:** [TROUBLESHOOTING.md](TROUBLESHOOTING.md) - Find solutions to common issues +- **FAQ:** [FAQ.md](FAQ.md) - Find answers to frequently asked questions ## License diff --git a/admin/app/controllers/downloads_controller.ts b/admin/app/controllers/downloads_controller.ts index bd58790..023806b 100644 --- a/admin/app/controllers/downloads_controller.ts +++ b/admin/app/controllers/downloads_controller.ts @@ -15,4 +15,9 @@ export default class DownloadsController { const payload = await request.validateUsing(downloadJobsByFiletypeSchema) return this.downloadService.listDownloadJobs(payload.params.filetype) } + + async removeJob({ params }: HttpContext) { + await this.downloadService.removeFailedJob(params.jobId) + return { success: true } + } } diff --git a/admin/app/controllers/maps_controller.ts b/admin/app/controllers/maps_controller.ts index 8290d45..54f0e8f 100644 --- a/admin/app/controllers/maps_controller.ts +++ b/admin/app/controllers/maps_controller.ts @@ -83,7 +83,7 @@ export default class MapsController { }) } - const styles = await this.mapService.generateStylesJSON(request.host()) + const styles = await this.mapService.generateStylesJSON(request.host(), request.protocol()) return response.json(styles) } diff --git a/admin/app/controllers/settings_controller.ts b/admin/app/controllers/settings_controller.ts index c0a312f..e90370d 100644 --- a/admin/app/controllers/settings_controller.ts +++ b/admin/app/controllers/settings_controller.ts @@ -39,6 +39,10 @@ export default class SettingsController { return inertia.render('settings/legal'); } + async support({ inertia }: HttpContext) { + return inertia.render('settings/support'); + } + async maps({ inertia }: HttpContext) { const baseAssetsCheck = await this.mapService.ensureBaseAssets(); const regionFiles = await this.mapService.listRegions(); diff --git a/admin/app/controllers/system_controller.ts b/admin/app/controllers/system_controller.ts index cdcde7f..0c3e1ad 100644 --- a/admin/app/controllers/system_controller.ts +++ b/admin/app/controllers/system_controller.ts @@ -113,6 +113,11 @@ export default class SystemController { return await this.systemService.subscribeToReleaseNotes(reqData.email); } + async getDebugInfo({}: HttpContext) { + const debugInfo = await this.systemService.getDebugInfo() + return { debugInfo } + } + async checkServiceUpdates({ response }: HttpContext) { await CheckServiceUpdatesJob.dispatch() response.send({ success: true, message: 'Service update check dispatched' }) diff --git a/admin/app/jobs/embed_file_job.ts b/admin/app/jobs/embed_file_job.ts index 0c59b32..0c0a12f 100644 --- a/admin/app/jobs/embed_file_job.ts +++ b/admin/app/jobs/embed_file_job.ts @@ -1,4 +1,4 @@ -import { Job } from 'bullmq' +import { Job, UnrecoverableError } from 'bullmq' import { QueueService } from '#services/queue_service' import { EmbedJobWithProgress } from '../../types/rag.js' import { RagService } from '#services/rag_service' @@ -42,7 +42,15 @@ export class EmbedFileJob { const ragService = new RagService(dockerService, ollamaService) try { - // Check if Ollama and Qdrant services are ready + // Check if Ollama and Qdrant services are installed and ready + // Use UnrecoverableError for "not installed" so BullMQ won't retry — + // retrying 30x when the service doesn't exist just wastes Redis connections + const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') + if (!ollamaUrl) { + logger.warn('[EmbedFileJob] Ollama is not installed. Skipping embedding for: %s', fileName) + throw new UnrecoverableError('Ollama service is not installed. Install AI Assistant to enable file embeddings.') + } + const existingModels = await ollamaService.getModels() if (!existingModels) { logger.warn('[EmbedFileJob] Ollama service not ready yet. Will retry...') @@ -51,8 +59,8 @@ export class EmbedFileJob { const qdrantUrl = await dockerService.getServiceURL('nomad_qdrant') if (!qdrantUrl) { - logger.warn('[EmbedFileJob] Qdrant service not ready yet. Will retry...') - throw new Error('Qdrant service not ready yet') + logger.warn('[EmbedFileJob] Qdrant is not installed. Skipping embedding for: %s', fileName) + throw new UnrecoverableError('Qdrant service is not installed. Install AI Assistant to enable file embeddings.') } logger.info(`[EmbedFileJob] Services ready. Processing file: ${fileName}`) diff --git a/admin/app/jobs/run_download_job.ts b/admin/app/jobs/run_download_job.ts index 3cc09ad..c7f672e 100644 --- a/admin/app/jobs/run_download_job.ts +++ b/admin/app/jobs/run_download_job.ts @@ -82,14 +82,17 @@ export class RunDownloadJob { const zimService = new ZimService(dockerService) await zimService.downloadRemoteSuccessCallback([url], true) - // Dispatch an embedding job for the downloaded ZIM file - try { - await EmbedFileJob.dispatch({ - fileName: url.split('/').pop() || '', - filePath: filepath, - }) - } catch (error) { - console.error(`[RunDownloadJob] Error dispatching EmbedFileJob for URL ${url}:`, error) + // Only dispatch embedding job if AI Assistant (Ollama) is installed + const ollamaUrl = await dockerService.getServiceURL('nomad_ollama') + if (ollamaUrl) { + try { + await EmbedFileJob.dispatch({ + fileName: url.split('/').pop() || '', + filePath: filepath, + }) + } catch (error) { + console.error(`[RunDownloadJob] Error dispatching EmbedFileJob for URL ${url}:`, error) + } } } else if (filetype === 'map') { const mapsService = new MapService() diff --git a/admin/app/services/docker_service.ts b/admin/app/services/docker_service.ts index 8eafc64..5d94f54 100644 --- a/admin/app/services/docker_service.ts +++ b/admin/app/services/docker_service.ts @@ -691,6 +691,7 @@ export class DockerService { const runtimes = dockerInfo.Runtimes || {} if ('nvidia' in runtimes) { logger.info('[DockerService] NVIDIA container runtime detected via Docker API') + await this._persistGPUType('nvidia') return { type: 'nvidia' } } } catch (error) { @@ -722,12 +723,26 @@ export class DockerService { ) if (amdCheck.trim()) { logger.info('[DockerService] AMD GPU detected via lspci') + await this._persistGPUType('amd') return { type: 'amd' } } } catch (error) { // lspci not available, continue } + // Last resort: check if we previously detected a GPU and it's likely still present. + // This handles cases where live detection fails transiently (e.g., Docker daemon + // hiccup, runtime temporarily unavailable) but the hardware hasn't changed. + try { + const savedType = await KVStore.getValue('gpu.type') + if (savedType === 'nvidia' || savedType === 'amd') { + logger.info(`[DockerService] No GPU detected live, but KV store has '${savedType}' from previous detection. Using saved value.`) + return { type: savedType as 'nvidia' | 'amd' } + } + } catch { + // KV store not available, continue + } + logger.info('[DockerService] No GPU detected') return { type: 'none' } } catch (error) { @@ -736,6 +751,15 @@ export class DockerService { } } + private async _persistGPUType(type: 'nvidia' | 'amd'): Promise { + try { + await KVStore.setValue('gpu.type', type) + logger.info(`[DockerService] Persisted GPU type '${type}' to KV store`) + } catch (error) { + logger.warn(`[DockerService] Failed to persist GPU type: ${error.message}`) + } + } + /** * Discover AMD GPU DRI devices dynamically. * Returns an array of device configurations for Docker. @@ -853,6 +877,45 @@ export class DockerService { this._broadcast(serviceName, 'update-creating', `Creating updated container...`) const hostConfig = inspectData.HostConfig || {} + + // Re-run GPU detection for Ollama so updates always reflect the current GPU environment. + // This handles cases where the NVIDIA Container Toolkit was installed after the initial + // Ollama setup, and ensures DeviceRequests are always built fresh rather than relying on + // round-tripping the Docker inspect format back into the create API. + let updatedDeviceRequests: any[] | undefined = undefined + if (serviceName === SERVICE_NAMES.OLLAMA) { + const gpuResult = await this._detectGPUType() + + if (gpuResult.type === 'nvidia') { + this._broadcast( + serviceName, + 'update-gpu-config', + `NVIDIA container runtime detected. Configuring updated container with GPU support...` + ) + updatedDeviceRequests = [ + { + Driver: 'nvidia', + Count: -1, + Capabilities: [['gpu']], + }, + ] + } else if (gpuResult.type === 'amd') { + this._broadcast( + serviceName, + 'update-gpu-config', + `AMD GPU detected. ROCm GPU acceleration is not yet supported — using CPU-only configuration.` + ) + } else if (gpuResult.toolkitMissing) { + this._broadcast( + serviceName, + 'update-gpu-config', + `NVIDIA GPU detected but NVIDIA Container Toolkit is not installed. Using CPU-only configuration. Install the toolkit and reinstall AI Assistant for GPU acceleration: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html` + ) + } else { + this._broadcast(serviceName, 'update-gpu-config', `No GPU detected. Using CPU-only configuration.`) + } + } + const newContainerConfig: any = { Image: newImage, name: serviceName, @@ -865,7 +928,7 @@ export class DockerService { Binds: hostConfig.Binds || undefined, PortBindings: hostConfig.PortBindings || undefined, RestartPolicy: hostConfig.RestartPolicy || undefined, - DeviceRequests: hostConfig.DeviceRequests || undefined, + DeviceRequests: serviceName === SERVICE_NAMES.OLLAMA ? updatedDeviceRequests : (hostConfig.DeviceRequests || undefined), Devices: hostConfig.Devices || undefined, }, NetworkingConfig: inspectData.NetworkSettings?.Networks diff --git a/admin/app/services/download_service.ts b/admin/app/services/download_service.ts index b5db238..a2b7faf 100644 --- a/admin/app/services/download_service.ts +++ b/admin/app/services/download_service.ts @@ -12,7 +12,7 @@ export class DownloadService { async listDownloadJobs(filetype?: string): Promise { // Get regular file download jobs (zim, map, etc.) const queue = this.queueService.getQueue(RunDownloadJob.queue) - const fileJobs = await queue.getJobs(['waiting', 'active', 'delayed']) + const fileJobs = await queue.getJobs(['waiting', 'active', 'delayed', 'failed']) const fileDownloads = fileJobs.map((job) => ({ jobId: job.id!.toString(), @@ -20,11 +20,13 @@ export class DownloadService { progress: parseInt(job.progress.toString(), 10), filepath: normalize(job.data.filepath), filetype: job.data.filetype, + status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', + failedReason: job.failedReason || undefined, })) // Get Ollama model download jobs const modelQueue = this.queueService.getQueue(DownloadModelJob.queue) - const modelJobs = await modelQueue.getJobs(['waiting', 'active', 'delayed']) + const modelJobs = await modelQueue.getJobs(['waiting', 'active', 'delayed', 'failed']) const modelDownloads = modelJobs.map((job) => ({ jobId: job.id!.toString(), @@ -32,6 +34,8 @@ export class DownloadService { progress: parseInt(job.progress.toString(), 10), filepath: job.data.modelName || 'Unknown Model', // Use model name as filepath filetype: 'model', + status: (job.failedReason ? 'failed' : 'active') as 'active' | 'failed', + failedReason: job.failedReason || undefined, })) const allDownloads = [...fileDownloads, ...modelDownloads] @@ -39,7 +43,22 @@ export class DownloadService { // Filter by filetype if specified const filtered = allDownloads.filter((job) => !filetype || job.filetype === filetype) - // Sort so actively downloading items (progress > 0) appear first, then by progress descending - return filtered.sort((a, b) => b.progress - a.progress) + // Sort: active downloads first (by progress desc), then failed at the bottom + return filtered.sort((a, b) => { + if (a.status === 'failed' && b.status !== 'failed') return 1 + if (a.status !== 'failed' && b.status === 'failed') return -1 + return b.progress - a.progress + }) + } + + async removeFailedJob(jobId: string): Promise { + for (const queueName of [RunDownloadJob.queue, DownloadModelJob.queue]) { + const queue = this.queueService.getQueue(queueName) + const job = await queue.getJob(jobId) + if (job) { + await job.remove() + return + } + } } } diff --git a/admin/app/services/map_service.ts b/admin/app/services/map_service.ts index 6f7cbfd..beb74b2 100644 --- a/admin/app/services/map_service.ts +++ b/admin/app/services/map_service.ts @@ -260,7 +260,7 @@ export class MapService implements IMapService { } } - async generateStylesJSON(host: string | null = null): Promise { + async generateStylesJSON(host: string | null = null, protocol: string = 'http'): Promise { if (!(await this.checkBaseAssetsExist())) { throw new Error('Base map assets are missing from storage/maps') } @@ -281,8 +281,8 @@ export class MapService implements IMapService { * e.g. user is accessing from "example.com", but we would by default generate "localhost:8080/..." so maps would * fail to load. */ - const sources = this.generateSourcesArray(host, regions) - const baseUrl = this.getPublicFileBaseUrl(host, this.basemapsAssetsDir) + const sources = this.generateSourcesArray(host, regions, protocol) + const baseUrl = this.getPublicFileBaseUrl(host, this.basemapsAssetsDir, protocol) const styles = await this.generateStylesFile( rawStyles, @@ -342,9 +342,9 @@ export class MapService implements IMapService { return await listDirectoryContentsRecursive(this.baseDirPath) } - private generateSourcesArray(host: string | null, regions: FileEntry[]): BaseStylesFile['sources'][] { + private generateSourcesArray(host: string | null, regions: FileEntry[], protocol: string = 'http'): BaseStylesFile['sources'][] { const sources: BaseStylesFile['sources'][] = [] - const baseUrl = this.getPublicFileBaseUrl(host, 'pmtiles') + const baseUrl = this.getPublicFileBaseUrl(host, 'pmtiles', protocol) for (const region of regions) { if (region.type === 'file' && region.name.endsWith('.pmtiles')) { @@ -433,7 +433,7 @@ export class MapService implements IMapService { /* * Gets the appropriate public URL for a map asset depending on environment */ - private getPublicFileBaseUrl(specifiedHost: string | null, childPath: string): string { + private getPublicFileBaseUrl(specifiedHost: string | null, childPath: string, protocol: string = 'http'): string { function getHost() { try { const localUrlRaw = env.get('URL') @@ -447,7 +447,7 @@ export class MapService implements IMapService { } const host = specifiedHost || getHost() - const withProtocol = host.startsWith('http') ? host : `http://${host}` + const withProtocol = host.startsWith('http') ? host : `${protocol}://${host}` const baseUrlPath = process.env.NODE_ENV === 'production' ? childPath : urlJoin(this.mapStoragePath, childPath) diff --git a/admin/app/services/system_service.ts b/admin/app/services/system_service.ts index 396ff30..84157af 100644 --- a/admin/app/services/system_service.ts +++ b/admin/app/services/system_service.ts @@ -410,6 +410,117 @@ export class SystemService { } } + async getDebugInfo(): Promise { + const appVersion = SystemService.getAppVersion() + const environment = process.env.NODE_ENV || 'unknown' + + const [systemInfo, services, internetStatus, versionCheck] = await Promise.all([ + this.getSystemInfo(), + this.getServices({ installedOnly: false }), + this.getInternetStatus().catch(() => null), + this.checkLatestVersion().catch(() => null), + ]) + + const lines: string[] = [ + 'Project NOMAD Debug Info', + '========================', + `App Version: ${appVersion}`, + `Environment: ${environment}`, + ] + + if (systemInfo) { + const { cpu, mem, os, disk, fsSize, uptime, graphics } = systemInfo + + lines.push('') + lines.push('System:') + if (os.distro) lines.push(` OS: ${os.distro}`) + if (os.hostname) lines.push(` Hostname: ${os.hostname}`) + if (os.kernel) lines.push(` Kernel: ${os.kernel}`) + if (os.arch) lines.push(` Architecture: ${os.arch}`) + if (uptime?.uptime) lines.push(` Uptime: ${this._formatUptime(uptime.uptime)}`) + + lines.push('') + lines.push('Hardware:') + if (cpu.brand) { + lines.push(` CPU: ${cpu.brand} (${cpu.cores} cores)`) + } + if (mem.total) { + const total = this._formatBytes(mem.total) + const used = this._formatBytes(mem.total - (mem.available || 0)) + const available = this._formatBytes(mem.available || 0) + lines.push(` RAM: ${total} total, ${used} used, ${available} available`) + } + if (graphics.controllers && graphics.controllers.length > 0) { + for (const gpu of graphics.controllers) { + const vram = gpu.vram ? ` (${gpu.vram} MB VRAM)` : '' + lines.push(` GPU: ${gpu.model}${vram}`) + } + } else { + lines.push(' GPU: None detected') + } + + // Disk info — try disk array first, fall back to fsSize + const diskEntries = disk.filter((d) => d.totalSize > 0) + if (diskEntries.length > 0) { + for (const d of diskEntries) { + const size = this._formatBytes(d.totalSize) + const type = d.tran?.toUpperCase() || (d.rota ? 'HDD' : 'SSD') + lines.push(` Disk: ${size}, ${Math.round(d.percentUsed)}% used, ${type}`) + } + } else if (fsSize.length > 0) { + const realFs = fsSize.filter((f) => f.fs.startsWith('/dev/')) + const seen = new Set() + for (const f of realFs) { + if (seen.has(f.size)) continue + seen.add(f.size) + lines.push(` Disk: ${this._formatBytes(f.size)}, ${Math.round(f.use)}% used`) + } + } + } + + const installed = services.filter((s) => s.installed) + lines.push('') + if (installed.length > 0) { + lines.push('Installed Services:') + for (const svc of installed) { + lines.push(` ${svc.friendly_name} (${svc.service_name}): ${svc.status}`) + } + } else { + lines.push('Installed Services: None') + } + + if (internetStatus !== null) { + lines.push('') + lines.push(`Internet Status: ${internetStatus ? 'Online' : 'Offline'}`) + } + + if (versionCheck?.success) { + const updateMsg = versionCheck.updateAvailable + ? `Yes (${versionCheck.latestVersion} available)` + : `No (${versionCheck.currentVersion} is latest)` + lines.push(`Update Available: ${updateMsg}`) + } + + return lines.join('\n') + } + + private _formatUptime(seconds: number): string { + const days = Math.floor(seconds / 86400) + const hours = Math.floor((seconds % 86400) / 3600) + const minutes = Math.floor((seconds % 3600) / 60) + if (days > 0) return `${days}d ${hours}h ${minutes}m` + if (hours > 0) return `${hours}h ${minutes}m` + return `${minutes}m` + } + + private _formatBytes(bytes: number, decimals = 1): string { + if (bytes === 0) return '0 Bytes' + const k = 1024 + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return parseFloat((bytes / Math.pow(k, i)).toFixed(decimals)) + ' ' + sizes[i] + } + async updateSetting(key: KVStoreKey, value: any): Promise { if ((value === '' || value === undefined || value === null) && KV_STORE_SCHEMA[key] === 'string') { await KVStore.clearValue(key) @@ -468,10 +579,21 @@ export class SystemService { return [] } + // Deduplicate: same device path mounted in multiple places (Docker bind-mounts) + // Keep the entry with the largest size — that's the real partition + const deduped = new Map() + for (const entry of fsSize) { + const existing = deduped.get(entry.fs) + if (!existing || entry.size > existing.size) { + deduped.set(entry.fs, entry) + } + } + const dedupedFsSize = Array.from(deduped.values()) + return diskLayout.blockdevices .filter((disk) => disk.type === 'disk') // Only physical disks .map((disk) => { - const filesystems = getAllFilesystems(disk, fsSize) + const filesystems = getAllFilesystems(disk, dedupedFsSize) // Across all partitions const totalUsed = filesystems.reduce((sum, p) => sum + (p.used || 0), 0) diff --git a/admin/app/utils/downloads.ts b/admin/app/utils/downloads.ts index 7c36378..1c26a74 100644 --- a/admin/app/utils/downloads.ts +++ b/admin/app/utils/downloads.ts @@ -88,10 +88,29 @@ export async function doResumableDownload({ let lastProgressTime = Date.now() let lastDownloadedBytes = startByte + // Stall detection: if no data arrives for 5 minutes, abort the download + const STALL_TIMEOUT_MS = 5 * 60 * 1000 + let stallTimer: ReturnType | null = null + + const clearStallTimer = () => { + if (stallTimer) { + clearTimeout(stallTimer) + stallTimer = null + } + } + + const resetStallTimer = () => { + clearStallTimer() + stallTimer = setTimeout(() => { + cleanup(new Error('Download stalled - no data received for 5 minutes')) + }, STALL_TIMEOUT_MS) + } + // Progress tracking stream to monitor data flow const progressStream = new Transform({ transform(chunk: Buffer, _: any, callback: Function) { downloadedBytes += chunk.length + resetStallTimer() // Update progress tracking const now = Date.now() @@ -118,6 +137,7 @@ export async function doResumableDownload({ // Handle errors and cleanup const cleanup = (error?: Error) => { + clearStallTimer() progressStream.destroy() response.data.destroy() writeStream.destroy() @@ -136,6 +156,7 @@ export async function doResumableDownload({ }) writeStream.on('finish', async () => { + clearStallTimer() if (onProgress) { onProgress({ downloadedBytes, @@ -151,7 +172,8 @@ export async function doResumableDownload({ resolve(filepath) }) - // Pipe: response -> progressStream -> writeStream + // Start stall timer and pipe: response -> progressStream -> writeStream + resetStallTimer() response.data.pipe(progressStream).pipe(writeStream) }) } diff --git a/admin/app/utils/fs.ts b/admin/app/utils/fs.ts index 7cc3ba8..59bd5c5 100644 --- a/admin/app/utils/fs.ts +++ b/admin/app/utils/fs.ts @@ -138,14 +138,13 @@ export function matchesDevice(fsPath: string, deviceName: string): boolean { // Remove /dev/ and /dev/mapper/ prefixes const normalized = fsPath.replace('/dev/mapper/', '').replace('/dev/', '') - // Direct match + // Direct match (covers /dev/sda1 ↔ sda1, /dev/nvme0n1p1 ↔ nvme0n1p1) if (normalized === deviceName) { return true } - // LVM volumes use dashes instead of slashes - // e.g., ubuntu--vg-ubuntu--lv matches the device name - if (fsPath.includes(deviceName)) { + // LVM/device-mapper: e.g., /dev/mapper/ubuntu--vg-ubuntu--lv contains "ubuntu--lv" + if (fsPath.startsWith('/dev/mapper/') && fsPath.includes(deviceName)) { return true } diff --git a/admin/commands/queue/work.ts b/admin/commands/queue/work.ts index e39fdbf..453268d 100644 --- a/admin/commands/queue/work.ts +++ b/admin/commands/queue/work.ts @@ -65,8 +65,23 @@ export default class QueueWork extends BaseCommand { } ) - worker.on('failed', (job, err) => { + worker.on('failed', async (job, err) => { this.logger.error(`[${queueName}] Job failed: ${job?.id}, Error: ${err.message}`) + + // If this was a Wikipedia download, mark it as failed in the DB + if (job?.data?.filetype === 'zim' && job?.data?.url?.includes('wikipedia_en_')) { + try { + const { DockerService } = await import('#services/docker_service') + const { ZimService } = await import('#services/zim_service') + const dockerService = new DockerService() + const zimService = new ZimService(dockerService) + await zimService.onWikipediaDownloadComplete(job.data.url, false) + } catch (e: any) { + this.logger.error( + `[${queueName}] Failed to update Wikipedia status: ${e.message}` + ) + } + } }) worker.on('completed', (job) => { diff --git a/admin/config/logger.ts b/admin/config/logger.ts index 59aa141..981e167 100644 --- a/admin/config/logger.ts +++ b/admin/config/logger.ts @@ -18,7 +18,7 @@ const loggerConfig = defineConfig({ targets: targets() .pushIf(!app.inProduction, targets.pretty()) - .pushIf(app.inProduction, targets.file({ destination: "/app/storage/logs/admin.log" })) + .pushIf(app.inProduction, targets.file({ destination: "/app/storage/logs/admin.log", mkdir: true })) .toArray(), }, }, diff --git a/admin/config/transmit.ts b/admin/config/transmit.ts index f8862d7..43f1d42 100644 --- a/admin/config/transmit.ts +++ b/admin/config/transmit.ts @@ -3,7 +3,7 @@ import { defineConfig } from '@adonisjs/transmit' import { redis } from '@adonisjs/transmit/transports' export default defineConfig({ - pingInterval: false, + pingInterval: '30s', transport: { driver: redis({ host: env.get('REDIS_HOST'), diff --git a/admin/constants/kv_store.ts b/admin/constants/kv_store.ts index 7cae751..69872ff 100644 --- a/admin/constants/kv_store.ts +++ b/admin/constants/kv_store.ts @@ -1,3 +1,3 @@ import { KVStoreKey } from "../types/kv_store.js"; -export const SETTINGS_KEYS: KVStoreKey[] = ['chat.suggestionsEnabled', 'chat.lastModel', 'ui.hasVisitedEasySetup', 'system.earlyAccess', 'ai.assistantCustomName']; \ No newline at end of file +export const SETTINGS_KEYS: KVStoreKey[] = ['chat.suggestionsEnabled', 'chat.lastModel', 'ui.hasVisitedEasySetup', 'ui.theme', 'system.earlyAccess', 'ai.assistantCustomName']; \ No newline at end of file diff --git a/admin/docs/faq.md b/admin/docs/faq.md index 50c75b3..aa8aac1 100644 --- a/admin/docs/faq.md +++ b/admin/docs/faq.md @@ -13,12 +13,12 @@ No — that's the whole point. Once your content is downloaded, everything works ### What hardware do I need? N.O.M.A.D. is designed for capable hardware, especially if you want to use the AI features. Recommended: -- Modern multi-core CPU +- Modern multi-core CPU (AMD Ryzen 7 with Radeon graphics is the community sweet spot) - 16GB+ RAM (32GB+ for best AI performance) -- SSD storage (size depends on content — 500GB minimum, 2TB+ recommended) +- SSD storage (size depends on content — 500GB minimum, 1TB+ recommended) - NVIDIA or AMD GPU recommended for faster AI responses -**For detailed build recommendations at three price points ($200–$800+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** +**For detailed build recommendations at three price points ($150–$1,000+), see the [Hardware Guide](https://www.projectnomad.us/hardware).** ### How much storage do I need? It depends on what you download: diff --git a/admin/docs/release-notes.md b/admin/docs/release-notes.md index 74583e6..5f74a57 100644 --- a/admin/docs/release-notes.md +++ b/admin/docs/release-notes.md @@ -1,5 +1,38 @@ # Release Notes +## Version 1.30.0 - March 20, 2026 + +### Features +- **Night Ops**: Added our most requested feature — a dark mode theme for the Command Center interface! Activate it from the footer and enjoy the sleek new look during your late-night missions. Thanks @chriscrosstalk for the contribution! +- **Debug Info**: Added a new "Debug Info" modal accessible from the footer that provides detailed system and application information for troubleshooting and support. Thanks @chriscrosstalk for the contribution! +- **Support the Project**: Added a new "Support the Project" page in settings with links to community resources, donation options, and ways to contribute. +- **Install**: The main Nomad image is now fully self-contained and directly usable with Docker Compose, allowing for more flexible and customizable installations without relying on external scripts. The image remains fully backwards compatible with existing installations, and the install script has been updated to reflect the simpler deployment process. + +### Bug Fixes +- **Settings**: Storage usage display now prefers real block devices over tempfs. Thanks @Bortlesboat for the fix! +- **Settings**: Fixed an issue where device matching and mount entry deduplication logic could cause incorrect storage usage reporting and missing devices in storage displays. +- **Maps**: The Maps page now respects the request protocol (http vs https) to ensure map tiles load correctly. Thanks @davidgross for the bug report! +- **Knowledge Base**: Fixed an issue where file embedding jobs could cause a retry storm if the Ollama service was unavailable. Thanks @skyam25 for the bug report! +- **Curated Collections**: Fixed some broken links in the curated collections definitions (maps and ZIM files) that were causing some resources to fail to download. +- **Easy Setup**: Fixed an issue where the "Start Here" badge would persist even after visiting the Easy Setup Wizard for the first time. Thanks @chriscrosstalk for the fix! +- **UI**: Fixed an issue where the loading spinner could look strange in certain use cases. +- **System Updates**: Fixed an issue where the update banner would persist even after the system was updated successfully. Thanks @chriscrosstalk for the fix! +- **Performance**: Various small memory leak fixes and performance improvements across the UI to ensure a smoother experience. + +### Improvements +- **Ollama**: Improved GPU detection logic to ensure the latest GPU config is always passed to the Ollama container on update +- **Ollama**: The detected GPU type is now persisted in the database for more reliable configuration and troubleshooting across updates and restarts. Thanks @chriscrosstalk for the contribution! +- **Downloads**: Users can now dismiss failed download notifications to reduce clutter in the UI. Thanks @chriscrosstalk for the contribution! +- **Logging**: Changed the default log level to "info" to reduce noise and focus on important messages. Thanks @traxeon for the suggestion! +- **Logging**: Nomad's internal logger now creates it's own log directory on startup if it doesn't already exist to prevent errors on fresh installs where the logs directory hasn't been created yet. +- **Dozzle**: Dozzle shell access and container actions are now disabled by default. Thanks @traxeon for the recommendation! +- **MySQL & Redis**: Removed port exposure to host by default for improved security. Ports can still be exposed manually if needed. Thanks @traxeon for the recommendation! +- **Dependencies**: Various dependency updates to close security vulnerabilities and improve stability +- **Utility Scripts**: Added a check for the expected Docker Compose version (v2) in all utility scripts to provide clearer error messages and guidance if the environment is not set up correctly. +- **Utility Scripts**: Added an additional warning to the installation script to inform about potential overwriting of existing customized configurations and the importance of backing up data before running the installation script again. +- **Documentation**: Updated installation instructions to reflect the new option for manual deployment via Docker Compose without the install script. + + ## Version 1.29.0 - March 11, 2026 ### Features diff --git a/admin/docs/security-audit-v1.md b/admin/docs/security-audit-v1.md deleted file mode 100644 index 9638df0..0000000 --- a/admin/docs/security-audit-v1.md +++ /dev/null @@ -1,281 +0,0 @@ -# Project NOMAD Security Audit Report - -**Date:** 2026-03-08 -**Version audited:** v1.28.0 (main branch) -**Auditor:** Claude Code (automated + manual review) -**Target:** Pre-launch security review - ---- - -## Executive Summary - -Project NOMAD's codebase is **reasonably clean for a LAN appliance**, with no critical authentication bypasses or remote code execution vulnerabilities. However, there are **4 findings that should be fixed before public launch** — all are straightforward path traversal and SSRF issues with known fix patterns already used elsewhere in the codebase. - -| Severity | Count | Summary | -|----------|-------|---------| -| **HIGH** | 4 | Path traversal (3), SSRF (1) | -| **MEDIUM** | 5 | Dozzle shell, unvalidated settings read, content update URL injection, verbose errors, no rate limiting | -| **LOW** | 5 | CSRF disabled, CORS wildcard, debug logging, npm dep CVEs, hardcoded HMAC | -| **INFO** | 2 | No auth by design, Docker socket exposure by design | - ---- - -## Scans Performed - -| Scan | Tool | Result | -|------|------|--------| -| Dependency audit | `npm audit` | 2 CVEs (1 high, 1 moderate) | -| Secret scan | Manual grep (passwords, keys, tokens, certs) | Clean — all secrets from env vars | -| SAST | Semgrep (security-audit, OWASP, nodejs rulesets) | 0 findings (AdonisJS not in rulesets) | -| Docker config review | Manual review of compose, Dockerfiles, scripts | 2 actionable findings | -| Code review | Manual review of services, controllers, validators | 4 path traversal + 1 SSRF | -| API endpoint audit | Manual review of all 60+ routes | Attack surface documented | -| DAST (OWASP ZAP) | Skipped — Docker Desktop not running | Recommended as follow-up | - ---- - -## FIX BEFORE LAUNCH - -### 1. Path Traversal — ZIM File Delete (HIGH) - -**File:** `admin/app/services/zim_service.ts:329-342` -**Endpoint:** `DELETE /api/zim/:filename` - -The `filename` parameter flows into `path.join()` with no directory containment check. An attacker can delete `.zim` files outside the storage directory: - -``` -DELETE /api/zim/..%2F..%2Fsome-file.zim -``` - -**Fix:** Resolve the full path and verify it starts with the expected storage directory: - -```typescript -async delete(file: string): Promise { - let fileName = file - if (!fileName.endsWith('.zim')) { - fileName += '.zim' - } - - const basePath = join(process.cwd(), ZIM_STORAGE_PATH) - const fullPath = resolve(basePath, fileName) - - // Prevent path traversal - if (!fullPath.startsWith(basePath)) { - throw new Error('Invalid filename') - } - - // ... rest of delete logic -} -``` - -This pattern is already used correctly in `rag_service.ts:deleteFileBySource()`. - ---- - -### 2. Path Traversal — Map File Delete (HIGH) - -**File:** `admin/app/services/map_service.ts` (delete method) -**Endpoint:** `DELETE /api/maps/:filename` - -Identical pattern to the ZIM delete. Same fix — resolve path, verify `startsWith(basePath)`. - ---- - -### 3. Path Traversal — Documentation Read (HIGH) - -**File:** `admin/app/services/docs_service.ts:61-83` -**Endpoint:** `GET /docs/:slug` - -The `slug` parameter flows into `path.join(this.docsPath, filename)` with no containment check. An attacker can read arbitrary `.md` files on the filesystem: - -``` -GET /docs/..%2F..%2F..%2Fetc%2Fpasswd -``` - -Limited by the mandatory `.md` extension, but could still read sensitive markdown files outside the docs directory (like CLAUDE.md, README.md, etc.). - -**Fix:** - -```typescript -const basePath = this.docsPath -const fullPath = path.resolve(basePath, filename) - -if (!fullPath.startsWith(path.resolve(basePath))) { - throw new Error('Invalid document slug') -} -``` - ---- - -### 4. SSRF — Download Endpoints (HIGH) - -**File:** `admin/app/validators/common.ts` -**Endpoints:** `POST /api/zim/download-remote`, `POST /api/maps/download-remote`, `POST /api/maps/download-base-assets`, `POST /api/maps/download-remote-preflight` - -The download endpoints accept user-supplied URLs and the server fetches from them. Without validation, an attacker on the LAN (or via CSRF since `shield.ts` disables CSRF protection) could make NOMAD fetch from co-located services: -- `http://localhost:3306` (MySQL) -- `http://localhost:6379` (Redis) -- `http://169.254.169.254/` (cloud metadata — if NOMAD is ever cloud-hosted) - -**Fix:** Added `assertNotPrivateUrl()` that blocks loopback and link-local addresses before any download is initiated. Called in all download controllers. - -**Scope note:** RFC1918 private addresses (10.x, 172.16-31.x, 192.168.x) are intentionally **allowed** because NOMAD is a LAN appliance and users may host content mirrors on their local network. The `require_tld: false` VineJS option is preserved so URLs like `http://my-nas:8080/file.zim` remain valid. - -```typescript -const blockedPatterns = [ - /^localhost$/, - /^127\.\d+\.\d+\.\d+$/, - /^0\.0\.0\.0$/, - /^169\.254\.\d+\.\d+$/, // Link-local / cloud metadata - /^\[::1\]$/, - /^\[?fe80:/i, // IPv6 link-local -] -``` - ---- - -## FIX AFTER LAUNCH (Medium Priority) - -### 5. Dozzle Web Shell Access (MEDIUM) - -**File:** `install/management_compose.yaml:56` - -```yaml -- DOZZLE_ENABLE_SHELL=true -``` - -Dozzle on port 9999 is bound to all interfaces with shell access enabled. Anyone on the LAN can open a web shell into containers, including `nomad_admin` which has the Docker socket mounted. This creates a path from "LAN access" → "container shell" → "Docker socket" → "host root." - -**Fix:** Set `DOZZLE_ENABLE_SHELL=false`. Log viewing and container restart functionality are preserved. - ---- - -### 6. Unvalidated Settings Key Read (MEDIUM) - -**File:** `admin/app/controllers/settings_controller.ts` -**Endpoint:** `GET /api/system/settings?key=...` - -The `updateSetting` endpoint validates the key against an enum, but `getSetting` accepts any arbitrary key string. Currently harmless since the KV store only contains settings data, but could leak sensitive info if new keys are added. - -**Fix:** Apply the same enum validation to the read endpoint. - ---- - -### 7. Content Update URL Injection (MEDIUM) - -**File:** `admin/app/validators/common.ts:72-88` -**Endpoint:** `POST /api/content-updates/apply` - -The `download_url` comes directly from the client request body. An attacker can supply any URL and NOMAD will download from it. The URL should be looked up server-side from the content manifest instead. - -**Fix:** Validate `download_url` against the cached manifest, or apply the same loopback/link-local protections as finding #4 (already applied in this PR). - ---- - -### 8. Verbose Error Messages (MEDIUM) - -**Files:** `rag_controller.ts`, `docker_service.ts`, `system_update_service.ts` - -Several controllers return raw `error.message` in API responses, potentially leaking internal paths, stack details, or Docker error messages to the client. - -**Fix:** Return generic error messages in production. Log the details server-side. - ---- - -### 9. No Rate Limiting (MEDIUM) - -Zero rate limiting across all 60+ endpoints. While acceptable for a LAN appliance, some endpoints are particularly abusable: -- `POST /api/benchmark/run` — spins up Docker containers for CPU/memory/disk stress tests -- `POST /api/rag/upload` — file uploads (20MB limit per bodyparser config) -- `POST /api/system/services/affect` — can stop/start any service repeatedly - -**Fix:** Consider basic rate limiting on the benchmark and service control endpoints (e.g., 1 benchmark per minute, service actions throttled to prevent rapid cycling). - ---- - -## LOW PRIORITY / ACCEPTED RISK - -### 10. CSRF Protection Disabled (LOW) - -**File:** `admin/config/shield.ts` - -CSRF is disabled, meaning any website a LAN user visits could fire requests at NOMAD's API. This amplifies findings 1-4 — path traversal and SSRF could be triggered by a malicious webpage, not just direct LAN access. - -**Assessment:** Acceptable for a LAN appliance with no auth system. Enabling CSRF would require significant auth/session infrastructure changes. - -### 11. CORS Wildcard with Credentials (LOW) - -**File:** `admin/config/cors.ts` - -`origin: ['*']` with `credentials: true`. Standard for LAN appliances. - -### 12. npm Dependency CVEs (LOW) - -``` -tar <=7.5.9 HIGH Hardlink Path Traversal via Drive-Relative Linkpath -ajv <6.14.0 MODERATE ReDoS when using $data option -``` - -Both fixable via `npm audit fix`. Low practical risk since these are build/dev dependencies not directly exposed to user input. - -**Fix:** Run `npm audit fix` and commit the updated lockfile. - -### 13. Hardcoded HMAC Secret (LOW) - -**File:** `admin/app/services/benchmark_service.ts:35` - -The benchmark HMAC secret `'nomad-benchmark-v1-2026'` is hardcoded in open-source code. Anyone can forge leaderboard submissions. - -**Assessment:** Accepted risk. The leaderboard has compensating controls (rate limiting, plausibility validation, hardware fingerprint dedup). The secret stops casual abuse, not determined attackers. - -### 14. Production Debug Logging (LOW) - -**File:** `install/management_compose.yaml:22` - -```yaml -LOG_LEVEL=debug -``` - -Debug logging in production can expose internal state in log files. - -**Fix:** Change to `LOG_LEVEL=info` for production compose template. - ---- - -## INFORMATIONAL (By Design) - -### No Authentication - -All 60+ API endpoints are unauthenticated. This is by design — NOMAD is a LAN appliance and the network boundary is the access control. Issue #73 tracks the edge case of public IP interfaces. - -### Docker Socket Exposure - -The `nomad_admin` container mounts `/var/run/docker.sock`. This is necessary for NOMAD's core functionality (managing Docker containers). The socket is not exposed to the network — only the admin container can use it. - ---- - -## Recommendations Summary - -| Priority | Action | Effort | -|----------|--------|--------| -| **Before launch** | Fix 3 path traversals (ZIM delete, Map delete, Docs read) | ~30 min | -| **Before launch** | Add SSRF protection to download URL validators | ~1 hour | -| **Soon after** | Disable Dozzle shell access | 1 line change | -| **Soon after** | Validate settings key on read endpoint | ~15 min | -| **Soon after** | Sanitize error messages in responses | ~30 min | -| **Nice to have** | Run `npm audit fix` | 5 min | -| **Nice to have** | Change production log level to info | 1 line change | -| **Follow-up** | OWASP ZAP dynamic scan against NOMAD3 | ~1 hour | - ---- - -## What Went Right - -- **No hardcoded secrets** — all credentials properly use environment variables -- **No command injection** — Docker operations use the Docker API (dockerode), not shell commands -- **No SQL injection** — all database queries use AdonisJS Lucid ORM with parameterized queries -- **No eval/Function** — no dynamic code execution anywhere -- **RAG service already has the correct fix pattern** — `deleteFileBySource()` uses `resolve()` + `startsWith()` for path containment -- **Install script generates strong random passwords** — uses `/dev/urandom` for APP_KEY and DB passwords -- **No privileged containers** — GPU passthrough uses DeviceRequests, not --privileged -- **Health checks don't leak data** — internal-only calls diff --git a/admin/inertia/app/app.tsx b/admin/inertia/app/app.tsx index 2eabe10..b71ab64 100644 --- a/admin/inertia/app/app.tsx +++ b/admin/inertia/app/app.tsx @@ -11,6 +11,7 @@ import { generateUUID } from '~/lib/util' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' import { ReactQueryDevtools } from '@tanstack/react-query-devtools' import NotificationsProvider from '~/providers/NotificationProvider' +import { ThemeProvider } from '~/providers/ThemeProvider' import { UsePageProps } from '../../types/system' const appName = import.meta.env.VITE_APP_NAME || 'Project N.O.M.A.D.' @@ -38,14 +39,16 @@ createInertiaApp({ const showDevtools = ['development', 'staging'].includes(environment) createRoot(el).render( - - - - - {showDevtools && } - - - + + + + + + {showDevtools && } + + + + ) }, diff --git a/admin/inertia/components/ActiveDownloads.tsx b/admin/inertia/components/ActiveDownloads.tsx index 5eb30f4..9661f22 100644 --- a/admin/inertia/components/ActiveDownloads.tsx +++ b/admin/inertia/components/ActiveDownloads.tsx @@ -2,6 +2,8 @@ import useDownloads, { useDownloadsProps } from '~/hooks/useDownloads' import HorizontalBarChart from './HorizontalBarChart' import { extractFileName } from '~/lib/util' import StyledSectionHeader from './StyledSectionHeader' +import { IconAlertTriangle, IconX } from '@tabler/icons-react' +import api from '~/lib/api' interface ActiveDownloadProps { filetype?: useDownloadsProps['filetype'] @@ -9,7 +11,12 @@ interface ActiveDownloadProps { } const ActiveDownloads = ({ filetype, withHeader = false }: ActiveDownloadProps) => { - const { data: downloads } = useDownloads({ filetype }) + const { data: downloads, invalidate } = useDownloads({ filetype }) + + const handleDismiss = async (jobId: string) => { + await api.removeDownloadJob(jobId) + invalidate() + } return ( <> @@ -17,22 +24,50 @@ const ActiveDownloads = ({ filetype, withHeader = false }: ActiveDownloadProps)
{downloads && downloads.length > 0 ? ( downloads.map((download) => ( -
- +
+ {download.status === 'failed' ? ( +
+ +
+

+ {extractFileName(download.filepath) || download.url} +

+

+ Download failed{download.failedReason ? `: ${download.failedReason}` : ''} +

+
+ +
+ ) : ( + + )}
)) ) : ( -

No active downloads

+

No active downloads

)}
diff --git a/admin/inertia/components/ActiveEmbedJobs.tsx b/admin/inertia/components/ActiveEmbedJobs.tsx index 5e6914e..9da78bc 100644 --- a/admin/inertia/components/ActiveEmbedJobs.tsx +++ b/admin/inertia/components/ActiveEmbedJobs.tsx @@ -35,7 +35,7 @@ const ActiveEmbedJobs = ({ withHeader = false }: ActiveEmbedJobsProps) => {
)) ) : ( -

No files are currently being processed

+

No files are currently being processed

)} diff --git a/admin/inertia/components/ActiveModelDownloads.tsx b/admin/inertia/components/ActiveModelDownloads.tsx index 1727fe5..d1d0b85 100644 --- a/admin/inertia/components/ActiveModelDownloads.tsx +++ b/admin/inertia/components/ActiveModelDownloads.tsx @@ -33,7 +33,7 @@ const ActiveModelDownloads = ({ withHeader = false }: ActiveModelDownloadsProps) )) ) : ( -

No active model downloads

+

No active model downloads

)} diff --git a/admin/inertia/components/Alert.tsx b/admin/inertia/components/Alert.tsx index ceff2a0..40fca57 100644 --- a/admin/inertia/components/Alert.tsx +++ b/admin/inertia/components/Alert.tsx @@ -43,7 +43,7 @@ export default function Alert({ } const getIconColor = () => { - if (variant === 'solid') return 'text-desert-white' + if (variant === 'solid') return 'text-white' switch (type) { case 'warning': return 'text-desert-orange' @@ -81,15 +81,15 @@ export default function Alert({ case 'solid': variantStyles.push( type === 'warning' - ? 'bg-desert-orange text-desert-white border border-desert-orange-dark' + ? 'bg-desert-orange text-white border border-desert-orange-dark' : type === 'error' - ? 'bg-desert-red text-desert-white border border-desert-red-dark' + ? 'bg-desert-red text-white border border-desert-red-dark' : type === 'success' - ? 'bg-desert-olive text-desert-white border border-desert-olive-dark' + ? 'bg-desert-olive text-white border border-desert-olive-dark' : type === 'info' - ? 'bg-desert-green text-desert-white border border-desert-green-dark' + ? 'bg-desert-green text-white border border-desert-green-dark' : type === 'info-inverted' - ? 'bg-desert-tan text-desert-white border border-desert-tan-dark' + ? 'bg-desert-tan text-white border border-desert-tan-dark' : '' ) return classNames(baseStyles, 'shadow-lg', ...variantStyles) @@ -112,7 +112,7 @@ export default function Alert({ } const getTitleColor = () => { - if (variant === 'solid') return 'text-desert-white' + if (variant === 'solid') return 'text-white' switch (type) { case 'warning': @@ -131,7 +131,7 @@ export default function Alert({ } const getMessageColor = () => { - if (variant === 'solid') return 'text-desert-white text-opacity-90' + if (variant === 'solid') return 'text-white text-opacity-90' switch (type) { case 'warning': @@ -149,7 +149,7 @@ export default function Alert({ const getCloseButtonStyles = () => { if (variant === 'solid') { - return 'text-desert-white hover:text-desert-white hover:bg-black hover:bg-opacity-20' + return 'text-white hover:text-white hover:bg-black hover:bg-opacity-20' } switch (type) { diff --git a/admin/inertia/components/BouncingDots.tsx b/admin/inertia/components/BouncingDots.tsx index e01c3cc..64027f0 100644 --- a/admin/inertia/components/BouncingDots.tsx +++ b/admin/inertia/components/BouncingDots.tsx @@ -9,18 +9,18 @@ interface BouncingDotsProps { export default function BouncingDots({ text, containerClassName, textClassName }: BouncingDotsProps) { return (
- {text} + {text} diff --git a/admin/inertia/components/DebugInfoModal.tsx b/admin/inertia/components/DebugInfoModal.tsx new file mode 100644 index 0000000..63029cb --- /dev/null +++ b/admin/inertia/components/DebugInfoModal.tsx @@ -0,0 +1,103 @@ +import { useEffect, useState } from 'react' +import { IconBug, IconCopy, IconCheck } from '@tabler/icons-react' +import StyledModal from './StyledModal' +import api from '~/lib/api' + +interface DebugInfoModalProps { + open: boolean + onClose: () => void +} + +export default function DebugInfoModal({ open, onClose }: DebugInfoModalProps) { + const [debugText, setDebugText] = useState('') + const [loading, setLoading] = useState(false) + const [copied, setCopied] = useState(false) + + useEffect(() => { + if (!open) return + + setLoading(true) + setCopied(false) + + api.getDebugInfo().then((text) => { + if (text) { + const browserLine = `Browser: ${navigator.userAgent}` + setDebugText(text + '\n' + browserLine) + } else { + setDebugText('Failed to load debug info. Please try again.') + } + setLoading(false) + }).catch(() => { + setDebugText('Failed to load debug info. Please try again.') + setLoading(false) + }) + }, [open]) + + const handleCopy = async () => { + try { + await navigator.clipboard.writeText(debugText) + } catch { + // Fallback for older browsers + const textarea = document.querySelector('#debug-info-text') + if (textarea) { + textarea.select() + document.execCommand('copy') + } + } + setCopied(true) + setTimeout(() => setCopied(false), 2000) + } + + return ( + } + cancelText="Close" + onCancel={onClose} + > +

+ This is non-sensitive system info you can share when reporting issues. + No passwords, IPs, or API keys are included. +

+ +