repeated_keys

This commit is contained in:
mjallen18
2026-04-05 14:15:20 -05:00
parent 14477a8d85
commit c439495d7a
11 changed files with 559 additions and 516 deletions

View File

@@ -158,7 +158,8 @@
# Add a module to a specific host.
systems = {
# common modules
modules.nixos = with inputs; [
modules = {
nixos = with inputs; [
authentik-nix.nixosModules.default
disko.nixosModules.disko
impermanence.nixosModules.impermanence
@@ -174,7 +175,7 @@
# The snowfall-lib fork patches create-systems to pass systems.modules.home
# into create-home-system-modules so both paths are covered from here.
# The ARM guard for steam-rom-manager is handled by that module itself.
modules.home = with inputs; [
home = with inputs; [
nix-index-database.homeModules.nix-index
steam-rom-manager.homeManagerModules.default
sops-nix.homeManagerModules.sops
@@ -182,7 +183,7 @@
plasma-manager.homeModules.plasma-manager
];
modules.darwin = with inputs; [
darwin = with inputs; [
nix-homebrew.darwinModules.nix-homebrew
home-manager.darwinModules.home-manager
nix-plist-manager.darwinModules.default
@@ -190,6 +191,7 @@
nix-index-database.darwinModules.nix-index
stylix.darwinModules.stylix
];
};
# Host config
hosts = {

View File

@@ -15,9 +15,11 @@ let
in
{
home.username = "matt";
home.homeDirectory = "/home/matt";
home.stateVersion = "23.11";
home = {
username = "matt";
homeDirectory = "/home/matt";
stateVersion = "23.11";
};
${namespace} = {
desktop.plasma = lib.mkForce enabled;

View File

@@ -16,9 +16,11 @@ in
coolercontrol.enable = mkBoolOpt false "Enable CoolerControl fan/cooling control";
corectrl.enable = mkBoolOpt false "Enable CoreCtrl GPU control";
corectrl.enablePolkit = mkBoolOpt false "Enable CoreCtrl polkit rules";
corectrl.polkitGroup = mkOpt types.str "wheel" "Group allowed to use CoreCtrl without password";
corectrl = {
enable = mkBoolOpt false "Enable CoreCtrl GPU control";
enablePolkit = mkBoolOpt false "Enable CoreCtrl polkit rules";
polkitGroup = mkOpt types.str "wheel" "Group allowed to use CoreCtrl without password";
};
lact.enable = mkBoolOpt false "Enable LACT daemon (AMD GPU control)";
};

View File

@@ -118,7 +118,9 @@ let
];
# Systemd service for automatic model updates
systemd.services.update-qwen-model = {
systemd = {
services = {
update-qwen-model = {
description = "Update Qwen3-Coder-Next model from HuggingFace";
serviceConfig = {
Type = "oneshot";
@@ -150,7 +152,7 @@ let
startAt = "*-*-* 03:00:00";
};
systemd.services.update-qwen-model-notify-failure = {
update-qwen-model-notify-failure = {
description = "Notify ntfy on update-qwen-model failure";
serviceConfig = {
Type = "oneshot";
@@ -160,12 +162,14 @@ let
};
# Ensure model is available before llama-cpp starts
systemd.services.llama-cpp = {
llama-cpp = {
after = [ "update-qwen-model.service" ];
wants = [ "update-qwen-model.service" ];
};
};
};
};
};
in
{
imports = [ aiConfig ];

View File

@@ -5,7 +5,6 @@
namespace,
...
}:
with lib;
let
name = "arrs";
cfg = config.${namespace}.services.${name};
@@ -55,7 +54,8 @@ let
};
# Enable radarr service
services.radarr = {
services = {
radarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
@@ -64,7 +64,7 @@ let
};
# Enable Sonarr service
services.sonarr = {
sonarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
@@ -73,7 +73,7 @@ let
package = pkgs.sonarr;
};
services.lidarr = {
lidarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
@@ -82,7 +82,7 @@ let
};
# Enable Sabnzbd service
services.sabnzbd = {
sabnzbd = {
enable = true;
user = "nix-apps";
group = "jallen-nas";
@@ -171,7 +171,7 @@ let
};
};
services.deluge = {
deluge = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
@@ -184,7 +184,7 @@ let
};
};
services.jackett = {
jackett = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
@@ -192,6 +192,7 @@ let
};
};
};
};
in
{
imports = [ arrsConfig ];

View File

@@ -167,26 +167,44 @@ let
# but /var/lib/crowdsec already exists as a real dir. Disabling DynamicUser on
# those two services lets them use the real crowdsec user/group instead, which is
# consistent with how crowdsec.service itself runs.
systemd.services.crowdsec.serviceConfig = lib.mkMerge [
systemd = {
# The ntfy plugin config YAML (with credentials baked in) is managed as a
# SOPS template in sops.nix — it renders to /run/secrets/rendered/crowdsec/
# notifications/ntfy.yaml at runtime. We use a tmpfiles symlink to expose
# it at the path CrowdSec scans, since environment.etc can't reference
# /run paths as source.
tmpfiles.rules = lib.mkIf cfg.ntfy.enable [
"L /etc/crowdsec/notifications/ntfy.yaml - - - - ${
config.sops.templates."crowdsec/notifications/ntfy.yaml".path
}"
];
services = {
crowdsec = {
serviceConfig = lib.mkMerge [
{ DynamicUser = lib.mkForce false; }
(lib.mkIf (cfg.ntfy.enable && cfg.ntfy.envFile != "") {
EnvironmentFile = [ cfg.ntfy.envFile ];
})
];
systemd.services.crowdsec-firewall-bouncer.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.crowdsec-firewall-bouncer-register.serviceConfig.DynamicUser = lib.mkForce false;
};
# The upstream unit has Requires= but no After= for the register service, so
# the bouncer starts in parallel and hits LoadCredential before the key file
# exists. Adding After= enforces that the register service completes first.
systemd.services.crowdsec-firewall-bouncer.after = [ "crowdsec-firewall-bouncer-register.service" ];
crowdsec-firewall-bouncer = {
serviceConfig.DynamicUser = lib.mkForce false;
after = [ "crowdsec-firewall-bouncer-register.service" ];
};
crowdsec-firewall-bouncer-register = {
serviceConfig.DynamicUser = lib.mkForce false;
# The upstream register script exits with an error when the bouncer is already
# registered in the LAPI but the local api-key.cred file is missing (e.g. after
# a system wipe or impermanence rotation). Override the script so that when the
# key file is absent it deletes the stale registration and re-registers, producing
# a fresh key file.
systemd.services.crowdsec-firewall-bouncer-register.script =
script =
let
apiKeyFile = "/var/lib/crowdsec-firewall-bouncer-register/api-key.cred";
bouncerName = "nas-bouncer";
@@ -214,6 +232,9 @@ let
fi
fi
'';
};
};
};
# crowdsec-firewall-bouncer-register calls cscli without -c, so cscli
# looks for /etc/crowdsec/config.yaml. The upstream crowdsec.service uses
@@ -277,17 +298,6 @@ let
};
};
# The ntfy plugin config YAML (with credentials baked in) is managed as a
# SOPS template in sops.nix — it renders to /run/secrets/rendered/crowdsec/
# notifications/ntfy.yaml at runtime. We use a tmpfiles symlink to expose
# it at the path CrowdSec scans, since environment.etc can't reference
# /run paths as source.
systemd.tmpfiles.rules = lib.mkIf cfg.ntfy.enable [
"L /etc/crowdsec/notifications/ntfy.yaml - - - - ${
config.sops.templates."crowdsec/notifications/ntfy.yaml".path
}"
];
};
};
in

View File

@@ -31,41 +31,21 @@ let
in
{
config = lib.mkIf cfg.enable {
services.livekit = {
services = {
livekit = {
enable = true;
openFirewall = true;
settings.room.auto_create = false;
inherit keyFile;
};
services.lk-jwt-service = {
lk-jwt-service = {
enable = true;
port = 8585;
# can be on the same virtualHost as synapse
livekitUrl = "wss://mjallen.dev/livekit/sfu";
inherit keyFile;
};
# generate the key when needed
systemd.services.livekit-key = {
before = [
"lk-jwt-service.service"
"livekit.service"
];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
livekit
coreutils
gawk
];
script = ''
echo "Key missing, generating key"
echo "lk-jwt-service: $(livekit-server generate-keys | tail -1 | awk '{print $3}')" > "${keyFile}"
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!${keyFile}";
};
# restrict access to livekit room creation to a homeserver
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = "mjallen.dev";
services.nginx = {
nginx = {
enable = true;
defaultHTTPListenPort = 8188;
virtualHosts = {
@@ -110,4 +90,30 @@ in
};
};
};
# generate the key when needed
systemd = {
services = {
livekit-key = {
before = [
"lk-jwt-service.service"
"livekit.service"
];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
livekit
coreutils
gawk
];
script = ''
echo "Key missing, generating key"
echo "lk-jwt-service: $(livekit-server generate-keys | tail -1 | awk '{print $3}')" > "${keyFile}"
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!${keyFile}";
};
# restrict access to livekit room creation to a homeserver
lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = "mjallen.dev";
};
};
};
}

View File

@@ -5,7 +5,6 @@
namespace,
...
}:
with lib;
let
name = "nextcloud";
cfg = config.${namespace}.services.${name};
@@ -18,13 +17,9 @@ let
options = { };
moduleConfig = {
# Override the empty systemd service created by mkModule.
# The native NixOS nextcloud module doesn't create a persistent "nextcloud.service"
# (it uses PHP-FPM pools and cron instead), so we clear this to avoid the error:
# "Service has no ExecStart=, ExecStop=, or SuccessAction=. Refusing."
systemd.services.nextcloud = lib.mkForce { };
# Setup the native NixOS Nextcloud service
services.nextcloud = {
services = {
nextcloud = {
enable = true;
package = pkgs.nextcloud33;
hostName = "cloud.mjallen.dev";
@@ -114,24 +109,7 @@ let
};
};
users.users.nextcloud.isSystemUser = lib.mkForce true;
users.users.nextcloud.isNormalUser = lib.mkForce false;
users.groups.nextcloud = { };
# Ensure nextcloud services start after PostgreSQL is ready.
# The upstream NixOS module only adds this ordering when services.postgresql.enable
# is true in the same config, but here PostgreSQL is managed separately.
systemd.services.nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
systemd.services.nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
# Configure web server
services.nginx = {
nginx = {
enable = true;
group = "jallen-nas";
virtualHosts.${config.services.nextcloud.hostName} = {
@@ -145,6 +123,40 @@ let
};
};
};
users = {
users = {
nextcloud = {
isSystemUser = lib.mkForce true;
isNormalUser = lib.mkForce false;
};
};
groups = {
nextcloud = { };
};
};
# Ensure nextcloud services start after PostgreSQL is ready.
# The upstream NixOS module only adds this ordering when services.postgresql.enable
# is true in the same config, but here PostgreSQL is managed separately.
systemd = {
services = {
# Override the empty systemd service created by mkModule.
# The native NixOS nextcloud module doesn't create a persistent "nextcloud.service"
# (it uses PHP-FPM pools and cron instead), so we clear this to avoid the error:
# "Service has no ExecStart=, ExecStop=, or SuccessAction=. Refusing."
nextcloud = lib.mkForce { };
nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
};
};
};
};
in
{

View File

@@ -5,7 +5,6 @@
# empty_pattern: { ... }: is a valid and readable no-arg pattern.
# unquoted_uri: false-positives inside shell heredocs in Nix strings.
# useless_has_attr: if/has-attr patterns are sometimes clearer.
# repeated_keys: intentionally split across sections for readability/context.
disabled = [
"manual_inherit",
"manual_inherit_from",
@@ -13,7 +12,6 @@ disabled = [
"empty_pattern",
"unquoted_uri",
"useless_has_attr",
"repeated_keys",
]
# Exclude files where statix's parser fails on complex shell-in-Nix content.

View File

@@ -8,22 +8,28 @@
(modulesPath + "/installer/scan/not-detected.nix")
];
boot.initrd.availableKernelModules = [
boot = {
initrd = {
luks.devices."cryptroot".device = "/dev/disk/by-uuid/6fc86225-2bd4-4d9f-ba51-c3bc6b1dc7f9";
availableKernelModules = [
"usbhid"
"usb_storage"
"sdhci_pci"
];
boot.initrd.kernelModules = [ ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
kernelModules = [ ];
};
kernelModules = [ ];
extraModulePackages = [ ];
};
fileSystems."/" = {
fileSystems = {
"/" = {
device = "none";
fsType = "tmpfs";
options = [ "mode=755" ];
};
fileSystems."/boot" = {
"/boot" = {
device = "/dev/disk/by-uuid/80CC-18FC";
fsType = "vfat";
options = [
@@ -32,7 +38,7 @@
];
};
fileSystems."/home" = {
"/home" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -41,10 +47,7 @@
];
};
boot.initrd.luks.devices."cryptroot".device =
"/dev/disk/by-uuid/6fc86225-2bd4-4d9f-ba51-c3bc6b1dc7f9";
fileSystems."/persist" = {
"/persist" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -53,7 +56,7 @@
];
};
fileSystems."/etc" = {
"/etc" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -62,7 +65,7 @@
];
};
fileSystems."/root" = {
"/root" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -71,7 +74,7 @@
];
};
fileSystems."/nix" = {
"/nix" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -80,7 +83,7 @@
];
};
fileSystems."/var/log" = {
"/var/log" = {
device = "/dev/mapper/cryptroot";
fsType = "btrfs";
options = [
@@ -88,6 +91,7 @@
"compress=zstd"
];
};
};
swapDevices = [ ];

View File

@@ -9,8 +9,6 @@ in
{
# Define a user account. Don't forget to set a password with passwd.
users = {
groups.nut.name = "nut";
groups."jallen-nas".name = "jallen-nas";
# Nix app account
users = {
nix-apps = {
@@ -66,7 +64,11 @@ in
};
};
groups.nextcloud-exporter = { };
groups.crowdsec = { };
groups = {
nextcloud-exporter = { };
crowdsec = { };
nut.name = "nut";
"jallen-nas".name = "jallen-nas";
};
};
}