import { Head, router, usePage } from '@inertiajs/react' import { useRef, useState } from 'react' import StyledTable from '~/components/StyledTable' import SettingsLayout from '~/layouts/SettingsLayout' import { NomadOllamaModel } from '../../../types/ollama' import StyledButton from '~/components/StyledButton' import useServiceInstalledStatus from '~/hooks/useServiceInstalledStatus' import Alert from '~/components/Alert' import { useNotifications } from '~/context/NotificationContext' import api from '~/lib/api' import { useModals } from '~/context/ModalContext' import StyledModal from '~/components/StyledModal' import { ModelResponse } from 'ollama' import { SERVICE_NAMES } from '../../../constants/service_names' import Switch from '~/components/inputs/Switch' import StyledSectionHeader from '~/components/StyledSectionHeader' import { useMutation, useQuery } from '@tanstack/react-query' import Input from '~/components/inputs/Input' import { IconSearch, IconRefresh } from '@tabler/icons-react' import useDebounce from '~/hooks/useDebounce' import ActiveModelDownloads from '~/components/ActiveModelDownloads' import { useSystemInfo } from '~/hooks/useSystemInfo' export default function ModelsPage(props: { models: { availableModels: NomadOllamaModel[] installedModels: ModelResponse[] settings: { chatSuggestionsEnabled: boolean; aiAssistantCustomName: string } } }) { const { aiAssistantName } = usePage<{ aiAssistantName: string }>().props const { isInstalled } = useServiceInstalledStatus(SERVICE_NAMES.OLLAMA) const { addNotification } = useNotifications() const { openModal, closeAllModals } = useModals() const { debounce } = useDebounce() const { data: systemInfo } = useSystemInfo({}) const [gpuBannerDismissed, setGpuBannerDismissed] = useState(() => { try { return localStorage.getItem('nomad:gpu-banner-dismissed') === 'true' } catch { return false } }) const [reinstalling, setReinstalling] = useState(false) const handleDismissGpuBanner = () => { setGpuBannerDismissed(true) try { localStorage.setItem('nomad:gpu-banner-dismissed', 'true') } catch {} } const handleForceReinstallOllama = () => { openModal( { closeAllModals() setReinstalling(true) try { const response = await api.forceReinstallService('nomad_ollama') if (!response || !response.success) { throw new Error(response?.message || 'Force reinstall failed') } addNotification({ message: `${aiAssistantName} is being reinstalled with GPU support. This page will reload shortly.`, type: 'success', }) try { localStorage.removeItem('nomad:gpu-banner-dismissed') } catch {} setTimeout(() => window.location.reload(), 5000) } catch (error) { addNotification({ message: `Failed to reinstall: ${error instanceof Error ? error.message : 'Unknown error'}`, type: 'error', }) setReinstalling(false) } }} onCancel={closeAllModals} open={true} confirmText="Reinstall" cancelText="Cancel" >

This will recreate the {aiAssistantName} container with GPU support enabled. Your downloaded models will be preserved. The service will be briefly unavailable during reinstall.

, 'gpu-health-force-reinstall-modal' ) } const [chatSuggestionsEnabled, setChatSuggestionsEnabled] = useState( props.models.settings.chatSuggestionsEnabled ) const [aiAssistantCustomName, setAiAssistantCustomName] = useState( props.models.settings.aiAssistantCustomName ) const [query, setQuery] = useState('') const [queryUI, setQueryUI] = useState('') const [limit, setLimit] = useState(15) const debouncedSetQuery = debounce((val: string) => { setQuery(val) }, 300) const forceRefreshRef = useRef(false) const [isForceRefreshing, setIsForceRefreshing] = useState(false) const { data: availableModelData, isFetching, refetch } = useQuery({ queryKey: ['ollama', 'availableModels', query, limit], queryFn: async () => { const force = forceRefreshRef.current forceRefreshRef.current = false const res = await api.getAvailableModels({ query, recommendedOnly: false, limit, force: force || undefined, }) if (!res) { return { models: [], hasMore: false, } } return res }, initialData: { models: props.models.availableModels, hasMore: false }, }) async function handleForceRefresh() { forceRefreshRef.current = true setIsForceRefreshing(true) await refetch() setIsForceRefreshing(false) addNotification({ message: 'Model list refreshed from remote.', type: 'success' }) } async function handleInstallModel(modelName: string) { try { const res = await api.downloadModel(modelName) if (res.success) { addNotification({ message: `Model download initiated for ${modelName}. It may take some time to complete.`, type: 'success', }) } } catch (error) { console.error('Error installing model:', error) addNotification({ message: `There was an error installing the model: ${modelName}. Please try again.`, type: 'error', }) } } async function handleDeleteModel(modelName: string) { try { const res = await api.deleteModel(modelName) if (res.success) { addNotification({ message: `Model deleted: ${modelName}.`, type: 'success', }) } closeAllModals() router.reload() } catch (error) { console.error('Error deleting model:', error) addNotification({ message: `There was an error deleting the model: ${modelName}. Please try again.`, type: 'error', }) } } async function confirmDeleteModel(model: string) { openModal( { handleDeleteModel(model) }} onCancel={closeAllModals} open={true} confirmText="Delete" cancelText="Cancel" confirmVariant="primary" >

Are you sure you want to delete this model? You will need to download it again if you want to use it in the future.

, 'confirm-delete-model-modal' ) } const updateSettingMutation = useMutation({ mutationFn: async ({ key, value }: { key: string; value: boolean | string }) => { return await api.updateSetting(key, value) }, onSuccess: () => { addNotification({ message: 'Setting updated successfully.', type: 'success', }) }, onError: (error) => { console.error('Error updating setting:', error) addNotification({ message: 'There was an error updating the setting. Please try again.', type: 'error', }) }, }) return (

{aiAssistantName}

Easily manage the {aiAssistantName}'s settings and installed models. We recommend starting with smaller models first to see how they perform on your system before moving on to larger ones.

{!isInstalled && ( )} {isInstalled && systemInfo?.gpuHealth?.status === 'passthrough_failed' && !gpuBannerDismissed && ( )}
{ setChatSuggestionsEnabled(newVal) updateSettingMutation.mutate({ key: 'chat.suggestionsEnabled', value: newVal }) }} label="Chat Suggestions" description="Display AI-generated conversation starters in the chat interface" /> setAiAssistantCustomName(e.target.value)} onBlur={() => updateSettingMutation.mutate({ key: 'ai.assistantCustomName', value: aiAssistantCustomName, }) } />
{ setQueryUI(e.target.value) debouncedSetQuery(e.target.value) }} className="w-1/3" leftIcon={} /> Refresh Models
className="font-semibold mt-4" rowLines={true} columns={[ { accessor: 'name', title: 'Name', render(record) { return (

{record.name}

{record.description}

) }, }, { accessor: 'estimated_pulls', title: 'Estimated Pulls', }, { accessor: 'model_last_updated', title: 'Last Updated', }, ]} data={availableModelData?.models || []} loading={isFetching} expandable={{ expandedRowRender: (record) => (
{record.tags.map((tag, tagIndex) => { const isInstalled = props.models.installedModels.some( (mod) => mod.name === tag.name ) return ( ) })}
Tag Input Type Context Size Model Size Action
{tag.name} {tag.input || 'N/A'} {tag.context || 'N/A'} {tag.size || 'N/A'} { if (!isInstalled) { handleInstallModel(tag.name) } else { confirmDeleteModel(tag.name) } }} icon={isInstalled ? 'IconTrash' : 'IconDownload'} > {isInstalled ? 'Delete' : 'Install'}
), }} />
{availableModelData?.hasMore && ( { setLimit((prev) => prev + 15) }} > Load More )}
) }