couple fixes

This commit is contained in:
mjallen18
2026-03-23 14:07:48 -05:00
parent 6f77344d42
commit e647794a0f
10 changed files with 126 additions and 35 deletions

View File

@@ -357,8 +357,7 @@ in
};
recorder = {
# Connect via Unix socket — peer auth via identMap maps OS user 'hass' → DB user 'homeassistant'.
db_url = "postgresql://homeassistant@/homeassistant?host=/run/postgresql";
db_url = "postgresql://homeassistant@10.0.1.3/homeassistant";
purge_keep_days = 180;
};

View File

@@ -0,0 +1,25 @@
{
config,
lib,
namespace,
...
}:
let
name = "cockpit";
cfg = config.${namespace}.services.${name};
cockpitConfig = lib.${namespace}.mkModule {
inherit config name;
description = "Cockpit web-based server management UI";
moduleConfig = {
services.cockpit = {
enable = true;
port = cfg.port;
openFirewall = cfg.openFirewall;
};
};
};
in
{
imports = [ cockpitConfig ];
}

View File

@@ -1,6 +1,7 @@
{
config,
lib,
pkgs,
namespace,
...
}:
@@ -142,6 +143,45 @@ let
systemd.services.crowdsec-firewall-bouncer.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.crowdsec-firewall-bouncer-register.serviceConfig.DynamicUser = lib.mkForce false;
# The upstream unit has Requires= but no After= for the register service, so
# the bouncer starts in parallel and hits LoadCredential before the key file
# exists. Adding After= enforces that the register service completes first.
systemd.services.crowdsec-firewall-bouncer.after = [ "crowdsec-firewall-bouncer-register.service" ];
# The upstream register script exits with an error when the bouncer is already
# registered in the LAPI but the local api-key.cred file is missing (e.g. after
# a system wipe or impermanence rotation). Override the script so that when the
# key file is absent it deletes the stale registration and re-registers, producing
# a fresh key file.
systemd.services.crowdsec-firewall-bouncer-register.script =
let
apiKeyFile = "/var/lib/crowdsec-firewall-bouncer-register/api-key.cred";
bouncerName = "nas-bouncer";
cscli = lib.getExe' config.services.crowdsec.package "cscli";
jq = lib.getExe pkgs.jq;
in
lib.mkForce ''
if ${cscli} bouncers list --output json | ${jq} -e -- 'any(.[]; .name == "${bouncerName}")' >/dev/null; then
# Bouncer already registered. Verify the API key is still present.
if [ ! -f ${apiKeyFile} ]; then
echo "Bouncer registered but API key file missing deleting stale registration and re-registering"
${cscli} bouncers delete -- ${bouncerName}
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
else
# Bouncer not registered fresh registration.
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
'';
# crowdsec-firewall-bouncer-register calls cscli without -c, so cscli
# looks for /etc/crowdsec/config.yaml. The upstream crowdsec.service uses
# a nix store path via -c and never creates that file. Expose the config

View File

@@ -145,6 +145,11 @@ let
}
];
};
systemd.services.matrix-synapse = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
};
};
in

View File

@@ -104,6 +104,18 @@ let
users.users.nextcloud.isNormalUser = lib.mkForce false;
users.groups.nextcloud = { };
# Ensure nextcloud services start after PostgreSQL is ready.
# The upstream NixOS module only adds this ordering when services.postgresql.enable
# is true in the same config, but here PostgreSQL is managed separately.
systemd.services.nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
systemd.services.nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
# Configure web server
services.nginx = {
enable = true;

View File

@@ -13,7 +13,6 @@ let
serverCfg = config.${namespace}.services.${serverName};
frontendCfg = config.${namespace}.services.${frontendName};
dbCfg = config.${namespace}.services.${dbName};
in
{
imports = [
@@ -51,15 +50,17 @@ in
environment = {
SPARKY_FITNESS_LOG_LEVEL = "0";
ALLOW_PRIVATE_NETWORK_CORS = "false";
SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS = "";
SPARKY_FITNESS_DB_USER = "sparkyfitness";
SPARKY_FITNESS_DB_HOST = "10.0.1.3";
SPARKY_FITNESS_DB_NAME = "sparkyfitness";
SPARKY_FITNESS_APP_DB_USER = "sparkyfitness";
SPARKY_FITNESS_DB_PORT = "${toString dbCfg.port}";
SPARKY_FITNESS_FRONTEND_URL = "http://10.0.1.3:${toString frontendCfg.port}";
SPARKY_FITNESS_DISABLE_SIGNUP = "false";
SPARKY_FITNESS_DB_PORT = "5432";
SPARKY_FITNESS_FRONTEND_URL = "https://sparky.mjallen.dev";
SPARKY_FITNESS_DISABLE_SIGNUP = "true";
SPARKY_FITNESS_ADMIN_EMAIL = "jalle008@proton.me";
SPARKY_FITNESS_FORCE_EMAIL_LOGIN = "true";
SPARKY_FITNESS_EXTRA_TRUSTED_ORIGINS = "http://10.0.1.3:${toString serverCfg.port}";
SPARKY_FITNESS_OIDC_AUTH_ENABLED = "true";
};
})
@@ -74,21 +75,5 @@ in
SPARKY_FITNESS_SERVER_PORT = "${toString serverCfg.port}";
};
})
(mkContainerService {
inherit config;
name = dbName;
image = "postgres:15-alpine";
internalPort = 5432;
volumes = [
"${dbCfg.configDir}/sparky-fitness/db:/var/lib/postgresql/data"
];
environment = {
POSTGRES_DB = "sparkyfitness";
POSTGRES_USER = "sparkyfitness";
# TODO: move POSTGRES_PASSWORD to sops
POSTGRES_PASSWORD = "sparkyfitness";
};
})
];
}