{ config, lib, pkgs, namespace, ... }: with lib; let cfg = config.${namespace}.services.ai; aiConfig = lib.${namespace}.mkModule { inherit config; name = "ai"; description = "AI Services"; options = { }; moduleConfig = { services = { ollama = { enable = true; package = pkgs.ollama-rocm; port = 11434; host = "0.0.0.0"; user = "nix-apps"; group = "jallen-nas"; openFirewall = cfg.openFirewall; rocmOverrideGfx = "11.0.2"; loadModels = [ ]; home = "${cfg.configDir}/ollama"; }; llama-cpp = { enable = true; port = 8127; host = "0.0.0.0"; openFirewall = cfg.openFirewall; model = "${cfg.configDir}/llama-cpp/models/Qwen3-Coder-Next-Q4_0.gguf"; package = pkgs.llama-cpp-rocm; extraFlags = [ "--fit" "on" "--seed" "3407" "--temp" "0.7" "--top-p" "0.9" "--min-p" "0.05" "--top-k" "30" "--jinja" "--ctx-size" "4096" "--threads" "8" "--batch-size" "512" "--gpu-layers" "999" "--flash-attn" "auto" "--mlock" ]; }; open-webui = { enable = true; package = pkgs.unstable.open-webui; host = "0.0.0.0"; port = 8888; openFirewall = cfg.openFirewall; # stateDir = "/media/nas/main/nix-app-data/open-webui"; environmentFile = config.sops.secrets."jallen-nas/open-webui".path; environment = { OPENID_PROVIDER_URL = "https://authentik.mjallen.dev/application/o/chat/.well-known/openid-configuration"; OAUTH_PROVIDER_NAME = "authentik"; OPENID_REDIRECT_URI = "https://chat.mjallen.dev/oauth/oidc/callback"; ENABLE_OAUTH_SIGNUP = "False"; OAUTH_MERGE_ACCOUNTS_BY_EMAIL = "True"; ENABLE_SIGNUP = "False"; ENABLE_LOGIN_FORM = "False"; ANONYMIZED_TELEMETRY = "False"; DO_NOT_TRACK = "True"; SCARF_NO_ANALYTICS = "True"; OLLAMA_API_BASE_URL = "http://127.0.0.1:11434"; LOCAL_FILES_ONLY = "False"; WEBUI_AUTH = "False"; }; }; }; # Model update script using HuggingFace Hub environment.systemPackages = with pkgs; [ amdgpu_top python3Packages.huggingface-hub ]; # Systemd service for automatic model updates systemd.services.update-qwen-model = { description = "Update Qwen3-Coder-Next model from HuggingFace"; serviceConfig = { Type = "oneshot"; ExecStart = "${pkgs.writeShellScript "update-qwen-model" '' set -euo pipefail MODEL_DIR="${cfg.configDir}/llama-cpp/models" MODEL_NAME="Qwen3-Coder-Next-Q4_0.gguf" REPO_ID="unsloth/Qwen3-Coder-Next-GGUF" # Create model directory if it doesn't exist mkdir -p "$MODEL_DIR" # Download the latest version of the model echo "Updating $MODEL_NAME from HuggingFace..." ${pkgs.python3Packages.huggingface-hub}/bin/huggingface-cli download \ "$REPO_ID" \ "$MODEL_NAME" \ --local-dir "$MODEL_DIR" echo "Model updated successfully" ''}"; User = "nix-apps"; Group = "jallen-nas"; }; # Run daily at 3 AM startAt = "*-*-* 03:00:00"; }; # Ensure model is available before llama-cpp starts systemd.services.llama-cpp = { after = [ "update-qwen-model.service" ]; wants = [ "update-qwen-model.service" ]; }; }; }; in { imports = [ aiConfig ]; }