couple fixes

This commit is contained in:
mjallen18
2026-03-23 14:07:48 -05:00
parent 6f77344d42
commit e647794a0f
10 changed files with 126 additions and 35 deletions

View File

@@ -357,8 +357,7 @@ in
};
recorder = {
# Connect via Unix socket — peer auth via identMap maps OS user 'hass' → DB user 'homeassistant'.
db_url = "postgresql://homeassistant@/homeassistant?host=/run/postgresql";
db_url = "postgresql://homeassistant@10.0.1.3/homeassistant";
purge_keep_days = 180;
};

View File

@@ -0,0 +1,25 @@
{
config,
lib,
namespace,
...
}:
let
name = "cockpit";
cfg = config.${namespace}.services.${name};
cockpitConfig = lib.${namespace}.mkModule {
inherit config name;
description = "Cockpit web-based server management UI";
moduleConfig = {
services.cockpit = {
enable = true;
port = cfg.port;
openFirewall = cfg.openFirewall;
};
};
};
in
{
imports = [ cockpitConfig ];
}

View File

@@ -1,6 +1,7 @@
{
config,
lib,
pkgs,
namespace,
...
}:
@@ -142,6 +143,45 @@ let
systemd.services.crowdsec-firewall-bouncer.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.crowdsec-firewall-bouncer-register.serviceConfig.DynamicUser = lib.mkForce false;
# The upstream unit has Requires= but no After= for the register service, so
# the bouncer starts in parallel and hits LoadCredential before the key file
# exists. Adding After= enforces that the register service completes first.
systemd.services.crowdsec-firewall-bouncer.after = [ "crowdsec-firewall-bouncer-register.service" ];
# The upstream register script exits with an error when the bouncer is already
# registered in the LAPI but the local api-key.cred file is missing (e.g. after
# a system wipe or impermanence rotation). Override the script so that when the
# key file is absent it deletes the stale registration and re-registers, producing
# a fresh key file.
systemd.services.crowdsec-firewall-bouncer-register.script =
let
apiKeyFile = "/var/lib/crowdsec-firewall-bouncer-register/api-key.cred";
bouncerName = "nas-bouncer";
cscli = lib.getExe' config.services.crowdsec.package "cscli";
jq = lib.getExe pkgs.jq;
in
lib.mkForce ''
if ${cscli} bouncers list --output json | ${jq} -e -- 'any(.[]; .name == "${bouncerName}")' >/dev/null; then
# Bouncer already registered. Verify the API key is still present.
if [ ! -f ${apiKeyFile} ]; then
echo "Bouncer registered but API key file missing deleting stale registration and re-registering"
${cscli} bouncers delete -- ${bouncerName}
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
else
# Bouncer not registered fresh registration.
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
'';
# crowdsec-firewall-bouncer-register calls cscli without -c, so cscli
# looks for /etc/crowdsec/config.yaml. The upstream crowdsec.service uses
# a nix store path via -c and never creates that file. Expose the config

View File

@@ -145,6 +145,11 @@ let
}
];
};
systemd.services.matrix-synapse = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
};
};
in

View File

@@ -104,6 +104,18 @@ let
users.users.nextcloud.isNormalUser = lib.mkForce false;
users.groups.nextcloud = { };
# Ensure nextcloud services start after PostgreSQL is ready.
# The upstream NixOS module only adds this ordering when services.postgresql.enable
# is true in the same config, but here PostgreSQL is managed separately.
systemd.services.nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
systemd.services.nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
# Configure web server
services.nginx = {
enable = true;

View File

@@ -13,7 +13,6 @@ let
serverCfg = config.${namespace}.services.${serverName};
frontendCfg = config.${namespace}.services.${frontendName};
dbCfg = config.${namespace}.services.${dbName};
in
{
imports = [
@@ -51,15 +50,17 @@ in
environment = {
SPARKY_FITNESS_LOG_LEVEL = "0";
ALLOW_PRIVATE_NETWORK_CORS = "false";
SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS = "";
SPARKY_FITNESS_DB_USER = "sparkyfitness";
SPARKY_FITNESS_DB_HOST = "10.0.1.3";
SPARKY_FITNESS_DB_NAME = "sparkyfitness";
SPARKY_FITNESS_APP_DB_USER = "sparkyfitness";
SPARKY_FITNESS_DB_PORT = "${toString dbCfg.port}";
SPARKY_FITNESS_FRONTEND_URL = "http://10.0.1.3:${toString frontendCfg.port}";
SPARKY_FITNESS_DISABLE_SIGNUP = "false";
SPARKY_FITNESS_DB_PORT = "5432";
SPARKY_FITNESS_FRONTEND_URL = "https://sparky.mjallen.dev";
SPARKY_FITNESS_DISABLE_SIGNUP = "true";
SPARKY_FITNESS_ADMIN_EMAIL = "jalle008@proton.me";
SPARKY_FITNESS_FORCE_EMAIL_LOGIN = "true";
SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS = "http://10.0.1.3:${toString serverCfg.port}";
SPARKY_FITNESS_OIDC_AUTH_ENABLED = "true";
};
})
@@ -74,21 +75,5 @@ in
SPARKY_FITNESS_SERVER_PORT = "${toString serverCfg.port}";
};
})
(mkContainerService {
inherit config;
name = dbName;
image = "postgres:15-alpine";
internalPort = 5432;
volumes = [
"${dbCfg.configDir}/sparky-fitness/db:/var/lib/postgresql/data"
];
environment = {
POSTGRES_DB = "sparkyfitness";
POSTGRES_USER = "sparkyfitness";
# TODO: move POSTGRES_PASSWORD to sops
POSTGRES_PASSWORD = "sparkyfitness";
};
})
];
}

View File

@@ -51,6 +51,10 @@ in
port = 6066;
};
caddy = enabled;
cockpit = {
enable = true;
port = 9090;
};
calibre = {
enable = false;
port = 8084;
@@ -201,10 +205,6 @@ in
enable = true;
port = 8008;
};
sparky-fitness-db = {
enable = false;
port = 5432;
};
sparky-fitness-server = {
enable = true;
port = 3010;
@@ -212,6 +212,10 @@ in
sparky-fitness = {
enable = true;
port = 3004;
reverseProxy = {
enable = true;
subdomain = "sparky";
};
};
sunshine = {
enable = true;

View File

@@ -322,6 +322,7 @@ in
protonvpn-gui
qrencode
sbctl
systemctl-tui
tigervnc
tpm2-tools
tpm2-tss

View File

@@ -76,7 +76,6 @@ in
"restic"
"sparky-fitness"
"sparky-fitness-server"
"sparky-fitness-db"
"sunshine"
"tdarr"
"termix"

View File

@@ -82,32 +82,37 @@ in
# postgres (admin) — Unix socket, peer (OS user postgres = DB user postgres)
# authentik — Unix socket, peer (OS user authentik = DB user authentik)
# nextcloud — Unix socket, peer (OS user nextcloud = DB user nextcloud)
# homeassistant — Unix socket, peer via identMap (OS user hass → DB user homeassistant)
# synapse — Unix socket, peer via identMap (OS user matrix-synapse → DB user synapse)
# onlyoffice — Unix socket, peer (OS user onlyoffice = DB user onlyoffice) [disabled]
# sparkyfitness — Podman container TCP (10.88.0.0/16), scram-sha-256
# synapse — Unix socket, peer via identMap (OS user matrix-synapse → DB user synapse)
# homeassistant — TCP from nuc-nixos (10.0.1.4), scram-sha-256
# sparkyfitness — TCP from Podman bridge (10.88.0.0/16), scram-sha-256
authentication = lib.mkForce ''
# TYPE DATABASE USER ADDRESS METHOD
# All local Unix socket connections use peer auth (with identMap for mismatched names)
local all all peer map=system
# homeassistant runs on nuc-nixos (10.0.1.4), not on this machine.
# trust is acceptable here: access is locked to a single known host IP on the LAN.
# TODO: set a password via ensureClauses and switch to scram-sha-256.
host homeassistant homeassistant 10.0.1.4/32 trust
# Podman container network sparkyfitness server connects via host LAN IP
host sparkyfitness sparkyfitness 10.88.0.0/16 scram-sha-256
'';
# identMap — maps OS usernames to PostgreSQL usernames for peer auth.
# The catch-all regex rule (/^(.*)$ \1) allows any OS user whose name matches
# their DB user directly (authentik, nextcloud, onlyoffice, postgres).
# Explicit entries cover the mismatches.
# The catch-all regex rule allows any OS user whose name matches their DB user
# directly (authentik, nextcloud, onlyoffice, postgres).
# Explicit entries cover mismatched names.
identMap = lib.mkForce ''
# MAPNAME OS-USERNAME DB-USERNAME
system hass homeassistant
system matrix-synapse synapse
system /^(.*)$ \1
'';
# TODO: set sparkyfitness password declaratively via ensureUsers.*.ensureClauses.password
# once the SCRAM-SHA-256 hash is stored in SOPS (jallen-nas/sparky-fitness/db-password).
# The old initialScript has been removed — it only ran on first DB init and is now stale.
};
mysql = {
@@ -162,4 +167,20 @@ in
};
};
# Pre-create extensions and grant superuser-owned objects that the sparkyfitness
# role cannot manage itself. Appended to postgresql-setup.service which already
# runs as the postgres superuser after the DB is confirmed ready.
#
# 1. pg_stat_statements requires superuser to CREATE EXTENSION.
# 2. The extension installs functions owned by the postgres superuser; the
# sparkyfitness role cannot GRANT EXECUTE on objects it doesn't own, so we
# pre-grant them here before the app's grantPermissions() runs.
systemd.services.postgresql-setup.script = lib.mkAfter ''
psql -d sparkyfitness -c "
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO sparkyfitness;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA pg_catalog TO sparkyfitness;
"
'';
}