repeated_keys

This commit is contained in:
mjallen18
2026-04-05 14:15:20 -05:00
parent 14477a8d85
commit c439495d7a
11 changed files with 559 additions and 516 deletions

View File

@@ -16,9 +16,11 @@ in
coolercontrol.enable = mkBoolOpt false "Enable CoolerControl fan/cooling control";
corectrl.enable = mkBoolOpt false "Enable CoreCtrl GPU control";
corectrl.enablePolkit = mkBoolOpt false "Enable CoreCtrl polkit rules";
corectrl.polkitGroup = mkOpt types.str "wheel" "Group allowed to use CoreCtrl without password";
corectrl = {
enable = mkBoolOpt false "Enable CoreCtrl GPU control";
enablePolkit = mkBoolOpt false "Enable CoreCtrl polkit rules";
polkitGroup = mkOpt types.str "wheel" "Group allowed to use CoreCtrl without password";
};
lact.enable = mkBoolOpt false "Enable LACT daemon (AMD GPU control)";
};

View File

@@ -118,51 +118,55 @@ let
];
# Systemd service for automatic model updates
systemd.services.update-qwen-model = {
description = "Update Qwen3-Coder-Next model from HuggingFace";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.writeShellScript "update-qwen-model" ''
set -euo pipefail
systemd = {
services = {
update-qwen-model = {
description = "Update Qwen3-Coder-Next model from HuggingFace";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.writeShellScript "update-qwen-model" ''
set -euo pipefail
MODEL_DIR="${cfg.configDir}/llama-cpp/models"
MODEL_NAME="${cfg.llama-cpp.model}.gguf"
REPO_ID="unsloth/Qwen3-Coder-Next-GGUF"
MODEL_DIR="${cfg.configDir}/llama-cpp/models"
MODEL_NAME="${cfg.llama-cpp.model}.gguf"
REPO_ID="unsloth/Qwen3-Coder-Next-GGUF"
# Create model directory if it doesn't exist
mkdir -p "$MODEL_DIR"
# Create model directory if it doesn't exist
mkdir -p "$MODEL_DIR"
# Download the latest version of the model
echo "Updating $MODEL_NAME from HuggingFace..."
${pkgs.python3Packages.huggingface-hub}/bin/hf download \
"$REPO_ID" \
"$MODEL_NAME" \
--local-dir "$MODEL_DIR"
# Download the latest version of the model
echo "Updating $MODEL_NAME from HuggingFace..."
${pkgs.python3Packages.huggingface-hub}/bin/hf download \
"$REPO_ID" \
"$MODEL_NAME" \
--local-dir "$MODEL_DIR"
echo "Model updated successfully"
''}";
User = "nix-apps";
Group = "jallen-nas";
EnvironmentFile = [ config.sops.templates."ntfy.env".path ];
echo "Model updated successfully"
''}";
User = "nix-apps";
Group = "jallen-nas";
EnvironmentFile = [ config.sops.templates."ntfy.env".path ];
};
unitConfig.OnFailure = "update-qwen-model-notify-failure.service";
# Run daily at 3 AM
startAt = "*-*-* 03:00:00";
};
update-qwen-model-notify-failure = {
description = "Notify ntfy on update-qwen-model failure";
serviceConfig = {
Type = "oneshot";
ExecStart = "${ntfyModelFailScript}";
EnvironmentFile = [ config.sops.templates."ntfy.env".path ];
};
};
# Ensure model is available before llama-cpp starts
llama-cpp = {
after = [ "update-qwen-model.service" ];
wants = [ "update-qwen-model.service" ];
};
};
unitConfig.OnFailure = "update-qwen-model-notify-failure.service";
# Run daily at 3 AM
startAt = "*-*-* 03:00:00";
};
systemd.services.update-qwen-model-notify-failure = {
description = "Notify ntfy on update-qwen-model failure";
serviceConfig = {
Type = "oneshot";
ExecStart = "${ntfyModelFailScript}";
EnvironmentFile = [ config.sops.templates."ntfy.env".path ];
};
};
# Ensure model is available before llama-cpp starts
systemd.services.llama-cpp = {
after = [ "update-qwen-model.service" ];
wants = [ "update-qwen-model.service" ];
};
};
};

View File

@@ -5,7 +5,6 @@
namespace,
...
}:
with lib;
let
name = "arrs";
cfg = config.${namespace}.services.${name};
@@ -55,141 +54,143 @@ let
};
# Enable radarr service
services.radarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/radarr";
};
services = {
radarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/radarr";
};
# Enable Sonarr service
services.sonarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/sonarr";
package = pkgs.sonarr;
};
# Enable Sonarr service
sonarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/sonarr";
package = pkgs.sonarr;
};
services.lidarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/lidarr";
};
lidarr = {
enable = true;
openFirewall = cfg.openFirewall;
user = "nix-apps";
group = "jallen-nas";
dataDir = "${cfg.configDir}/lidarr";
};
# Enable Sabnzbd service
services.sabnzbd = {
enable = true;
user = "nix-apps";
group = "jallen-nas";
secretFiles = [
config.sops.templates."sabnzbd.ini".path
];
configFile = null;
settings = lib.mkForce {
misc = {
host = "0.0.0.0";
port = 8280;
cache_limit = "10G";
download_dir = "${cfg.configDir}/downloads";
complete_dir = "${cfg.configDir}/incomplete";
username = "admin";
};
servers = {
"news.newsgroupdirect.com" = {
name = "news.newsgroupdirect.com";
displayname = "news.newsgroupdirect.com";
host = "news.newsgroupdirect.com";
port = 563;
timeout = 60;
connections = 8;
ssl = true;
ssl_verify = 2;
ssl_ciphers = "";
enable = true;
required = false;
optional = false;
retention = 0;
expire_date = "";
quota = "";
usage_at_start = 0;
priority = 0;
notes = "";
# Enable Sabnzbd service
sabnzbd = {
enable = true;
user = "nix-apps";
group = "jallen-nas";
secretFiles = [
config.sops.templates."sabnzbd.ini".path
];
configFile = null;
settings = lib.mkForce {
misc = {
host = "0.0.0.0";
port = 8280;
cache_limit = "10G";
download_dir = "${cfg.configDir}/downloads";
complete_dir = "${cfg.configDir}/incomplete";
username = "admin";
};
};
categories = {
"*" = {
name = "*";
order = 0;
pp = 3;
script = "None";
dir = "";
newzbin = "";
priority = 0;
servers = {
"news.newsgroupdirect.com" = {
name = "news.newsgroupdirect.com";
displayname = "news.newsgroupdirect.com";
host = "news.newsgroupdirect.com";
port = 563;
timeout = 60;
connections = 8;
ssl = true;
ssl_verify = 2;
ssl_ciphers = "";
enable = true;
required = false;
optional = false;
retention = 0;
expire_date = "";
quota = "";
usage_at_start = 0;
priority = 0;
notes = "";
};
};
audio = {
name = "audio";
order = 3;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
software = {
name = "software";
order = 4;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
"movies" = {
name = "movies";
order = 5;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
"tv" = {
name = "tv";
order = 6;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
categories = {
"*" = {
name = "*";
order = 0;
pp = 3;
script = "None";
dir = "";
newzbin = "";
priority = 0;
};
audio = {
name = "audio";
order = 3;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
software = {
name = "software";
order = 4;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
"movies" = {
name = "movies";
order = 5;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
"tv" = {
name = "tv";
order = 6;
pp = "";
script = "Default";
dir = "";
newzbin = "";
priority = -100;
};
};
};
};
};
services.deluge = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
openFirewall = cfg.openFirewall;
dataDir = cfg.dataDir;
web = {
enable = true;
port = 8112;
deluge = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
openFirewall = cfg.openFirewall;
dataDir = cfg.dataDir;
web = {
enable = true;
port = 8112;
openFirewall = cfg.openFirewall;
};
};
jackett = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
openFirewall = cfg.openFirewall;
};
};
services.jackett = {
enable = false;
user = "nix-apps";
group = "jallen-nas";
openFirewall = cfg.openFirewall;
};
};
};
in

View File

@@ -167,53 +167,74 @@ let
# but /var/lib/crowdsec already exists as a real dir. Disabling DynamicUser on
# those two services lets them use the real crowdsec user/group instead, which is
# consistent with how crowdsec.service itself runs.
systemd.services.crowdsec.serviceConfig = lib.mkMerge [
{ DynamicUser = lib.mkForce false; }
(lib.mkIf (cfg.ntfy.enable && cfg.ntfy.envFile != "") {
EnvironmentFile = [ cfg.ntfy.envFile ];
})
];
systemd.services.crowdsec-firewall-bouncer.serviceConfig.DynamicUser = lib.mkForce false;
systemd.services.crowdsec-firewall-bouncer-register.serviceConfig.DynamicUser = lib.mkForce false;
systemd = {
# The ntfy plugin config YAML (with credentials baked in) is managed as a
# SOPS template in sops.nix — it renders to /run/secrets/rendered/crowdsec/
# notifications/ntfy.yaml at runtime. We use a tmpfiles symlink to expose
# it at the path CrowdSec scans, since environment.etc can't reference
# /run paths as source.
tmpfiles.rules = lib.mkIf cfg.ntfy.enable [
"L /etc/crowdsec/notifications/ntfy.yaml - - - - ${
config.sops.templates."crowdsec/notifications/ntfy.yaml".path
}"
];
services = {
crowdsec = {
serviceConfig = lib.mkMerge [
{ DynamicUser = lib.mkForce false; }
(lib.mkIf (cfg.ntfy.enable && cfg.ntfy.envFile != "") {
EnvironmentFile = [ cfg.ntfy.envFile ];
})
];
};
# The upstream unit has Requires= but no After= for the register service, so
# the bouncer starts in parallel and hits LoadCredential before the key file
# exists. Adding After= enforces that the register service completes first.
systemd.services.crowdsec-firewall-bouncer.after = [ "crowdsec-firewall-bouncer-register.service" ];
# The upstream unit has Requires= but no After= for the register service, so
# the bouncer starts in parallel and hits LoadCredential before the key file
# exists. Adding After= enforces that the register service completes first.
crowdsec-firewall-bouncer = {
serviceConfig.DynamicUser = lib.mkForce false;
after = [ "crowdsec-firewall-bouncer-register.service" ];
};
# The upstream register script exits with an error when the bouncer is already
# registered in the LAPI but the local api-key.cred file is missing (e.g. after
# a system wipe or impermanence rotation). Override the script so that when the
# key file is absent it deletes the stale registration and re-registers, producing
# a fresh key file.
systemd.services.crowdsec-firewall-bouncer-register.script =
let
apiKeyFile = "/var/lib/crowdsec-firewall-bouncer-register/api-key.cred";
bouncerName = "nas-bouncer";
cscli = lib.getExe' config.services.crowdsec.package "cscli";
jq = lib.getExe pkgs.jq;
in
lib.mkForce ''
if ${cscli} bouncers list --output json | ${jq} -e -- 'any(.[]; .name == "${bouncerName}")' >/dev/null; then
# Bouncer already registered. Verify the API key is still present.
if [ ! -f ${apiKeyFile} ]; then
echo "Bouncer registered but API key file missing deleting stale registration and re-registering"
${cscli} bouncers delete -- ${bouncerName}
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
else
# Bouncer not registered fresh registration.
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
'';
crowdsec-firewall-bouncer-register = {
serviceConfig.DynamicUser = lib.mkForce false;
# The upstream register script exits with an error when the bouncer is already
# registered in the LAPI but the local api-key.cred file is missing (e.g. after
# a system wipe or impermanence rotation). Override the script so that when the
# key file is absent it deletes the stale registration and re-registers, producing
# a fresh key file.
script =
let
apiKeyFile = "/var/lib/crowdsec-firewall-bouncer-register/api-key.cred";
bouncerName = "nas-bouncer";
cscli = lib.getExe' config.services.crowdsec.package "cscli";
jq = lib.getExe pkgs.jq;
in
lib.mkForce ''
if ${cscli} bouncers list --output json | ${jq} -e -- 'any(.[]; .name == "${bouncerName}")' >/dev/null; then
# Bouncer already registered. Verify the API key is still present.
if [ ! -f ${apiKeyFile} ]; then
echo "Bouncer registered but API key file missing deleting stale registration and re-registering"
${cscli} bouncers delete -- ${bouncerName}
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
else
# Bouncer not registered fresh registration.
rm -f '${apiKeyFile}'
if ! ${cscli} bouncers add --output raw -- ${bouncerName} >${apiKeyFile}; then
rm -f '${apiKeyFile}'
exit 1
fi
fi
'';
};
};
};
# crowdsec-firewall-bouncer-register calls cscli without -c, so cscli
# looks for /etc/crowdsec/config.yaml. The upstream crowdsec.service uses
@@ -277,17 +298,6 @@ let
};
};
# The ntfy plugin config YAML (with credentials baked in) is managed as a
# SOPS template in sops.nix — it renders to /run/secrets/rendered/crowdsec/
# notifications/ntfy.yaml at runtime. We use a tmpfiles symlink to expose
# it at the path CrowdSec scans, since environment.etc can't reference
# /run paths as source.
systemd.tmpfiles.rules = lib.mkIf cfg.ntfy.enable [
"L /etc/crowdsec/notifications/ntfy.yaml - - - - ${
config.sops.templates."crowdsec/notifications/ntfy.yaml".path
}"
];
};
};
in

View File

@@ -31,83 +31,89 @@ let
in
{
config = lib.mkIf cfg.enable {
services.livekit = {
enable = true;
openFirewall = true;
settings.room.auto_create = false;
inherit keyFile;
};
services.lk-jwt-service = {
enable = true;
port = 8585;
# can be on the same virtualHost as synapse
livekitUrl = "wss://mjallen.dev/livekit/sfu";
inherit keyFile;
};
# generate the key when needed
systemd.services.livekit-key = {
before = [
"lk-jwt-service.service"
"livekit.service"
];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
livekit
coreutils
gawk
];
script = ''
echo "Key missing, generating key"
echo "lk-jwt-service: $(livekit-server generate-keys | tail -1 | awk '{print $3}')" > "${keyFile}"
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!${keyFile}";
};
# restrict access to livekit room creation to a homeserver
systemd.services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = "mjallen.dev";
services.nginx = {
enable = true;
defaultHTTPListenPort = 8188;
virtualHosts = {
"matrix.mjallen.dev".locations = {
"= /.well-known/matrix/client" = {
alias = file;
extraConfig = ''
default_type application/json;
add_header Access-Control-Allow-Origin "*";
'';
};
};
"mjallen.dev".locations = {
"= /.well-known/matrix/client" = {
alias = file;
extraConfig = ''
default_type application/json;
add_header Access-Control-Allow-Origin "*";
'';
services = {
livekit = {
enable = true;
openFirewall = true;
settings.room.auto_create = false;
inherit keyFile;
};
lk-jwt-service = {
enable = true;
port = 8585;
# can be on the same virtualHost as synapse
livekitUrl = "wss://mjallen.dev/livekit/sfu";
inherit keyFile;
};
nginx = {
enable = true;
defaultHTTPListenPort = 8188;
virtualHosts = {
"matrix.mjallen.dev".locations = {
"= /.well-known/matrix/client" = {
alias = file;
extraConfig = ''
default_type application/json;
add_header Access-Control-Allow-Origin "*";
'';
};
};
"mjallen.dev".locations = {
"= /.well-known/matrix/client" = {
alias = file;
extraConfig = ''
default_type application/json;
add_header Access-Control-Allow-Origin "*";
'';
};
"^~ /livekit/jwt/" = {
priority = 400;
proxyPass = "http://[::1]:${toString config.services.lk-jwt-service.port}/";
};
"^~ /livekit/jwt/" = {
priority = 400;
proxyPass = "http://[::1]:${toString config.services.lk-jwt-service.port}/";
};
"^~ /livekit/sfu/" = {
extraConfig = ''
proxy_send_timeout 120;
proxy_read_timeout 120;
proxy_buffering off;
"^~ /livekit/sfu/" = {
extraConfig = ''
proxy_send_timeout 120;
proxy_read_timeout 120;
proxy_buffering off;
proxy_set_header Accept-Encoding gzip;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
'';
priority = 400;
proxyPass = "http://[::1]:${toString config.services.livekit.settings.port}/";
proxyWebsockets = true;
proxy_set_header Accept-Encoding gzip;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
'';
priority = 400;
proxyPass = "http://[::1]:${toString config.services.livekit.settings.port}/";
proxyWebsockets = true;
};
};
};
};
};
# generate the key when needed
systemd = {
services = {
livekit-key = {
before = [
"lk-jwt-service.service"
"livekit.service"
];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
livekit
coreutils
gawk
];
script = ''
echo "Key missing, generating key"
echo "lk-jwt-service: $(livekit-server generate-keys | tail -1 | awk '{print $3}')" > "${keyFile}"
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!${keyFile}";
};
# restrict access to livekit room creation to a homeserver
lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = "mjallen.dev";
};
};
};
}

View File

@@ -5,7 +5,6 @@
namespace,
...
}:
with lib;
let
name = "nextcloud";
cfg = config.${namespace}.services.${name};
@@ -18,130 +17,143 @@ let
options = { };
moduleConfig = {
# Override the empty systemd service created by mkModule.
# The native NixOS nextcloud module doesn't create a persistent "nextcloud.service"
# (it uses PHP-FPM pools and cron instead), so we clear this to avoid the error:
# "Service has no ExecStart=, ExecStop=, or SuccessAction=. Refusing."
systemd.services.nextcloud = lib.mkForce { };
# Setup the native NixOS Nextcloud service
services.nextcloud = {
enable = true;
package = pkgs.nextcloud33;
hostName = "cloud.mjallen.dev";
home = "${cfg.configDir}/nextcloud";
datadir = "${cfg.dataDir}/nextcloud";
configureRedis = true;
enableImagemagick = true;
appstoreEnable = true;
services = {
nextcloud = {
enable = true;
package = pkgs.nextcloud33;
hostName = "cloud.mjallen.dev";
home = "${cfg.configDir}/nextcloud";
datadir = "${cfg.dataDir}/nextcloud";
configureRedis = true;
enableImagemagick = true;
appstoreEnable = true;
# extraApps = with pkgs.${namespace}; {
# richdocumentscode = nextcloud-code-server;
# # richdocuments = nextcloud-richdocuments;
# };
# extraApps = with pkgs.${namespace}; {
# richdocumentscode = nextcloud-code-server;
# # richdocuments = nextcloud-richdocuments;
# };
# Use PostgreSQL for database
config = {
dbtype = "pgsql";
dbname = "nextcloud";
dbuser = "nextcloud";
dbhost = "/run/postgresql"; # Socket directory
# dbpassFile = config.sops.secrets."jallen-nas/nextcloud/dbpassword".path;
adminuser = "mjallen";
adminpassFile = config.sops.secrets."matt_password".path;
};
# PHP settings
phpOptions = lib.mkOverride 90 {
memory_limit = "512M";
upload_max_filesize = "10G";
post_max_size = "10G";
output_buffering = "0";
"opcache.interned_strings_buffer" = "16";
"opcache.max_accelerated_files" = "10000";
"opcache.memory_consumption" = "128";
"opcache.save_comments" = "1";
"opcache.revalidate_freq" = "1";
};
# Configure caching for better performance
caching = {
apcu = true;
redis = true;
memcached = false;
};
# Auto-update apps
autoUpdateApps = {
enable = false;
startAt = "05:00:00";
};
# Configure HTTPS if enabled
https = false;
settings = {
installed = true;
auth.bruteforce.protection.enabled = false;
user_oidc = {
auto_provision = false;
# Use PostgreSQL for database
config = {
dbtype = "pgsql";
dbname = "nextcloud";
dbuser = "nextcloud";
dbhost = "/run/postgresql"; # Socket directory
# dbpassFile = config.sops.secrets."jallen-nas/nextcloud/dbpassword".path;
adminuser = "mjallen";
adminpassFile = config.sops.secrets."matt_password".path;
};
# PHP settings
phpOptions = lib.mkOverride 90 {
memory_limit = "512M";
upload_max_filesize = "10G";
post_max_size = "10G";
output_buffering = "0";
"opcache.interned_strings_buffer" = "16";
"opcache.max_accelerated_files" = "10000";
"opcache.memory_consumption" = "128";
"opcache.save_comments" = "1";
"opcache.revalidate_freq" = "1";
};
# Configure caching for better performance
caching = {
apcu = true;
redis = true;
memcached = false;
};
# Auto-update apps
autoUpdateApps = {
enable = false;
startAt = "05:00:00";
};
# Configure HTTPS if enabled
https = false;
settings = {
installed = true;
auth.bruteforce.protection.enabled = false;
user_oidc = {
auto_provision = false;
};
overwrite.cli.url = "https://cloud.mjallen.dev";
overwriteprotocol = "https";
overwritehost = "cloud.mjallen.dev";
log_type = "file";
default_phone_region = "US";
trusted_proxies = [
net.hosts.nas.lan
"127.0.0.1"
"::1"
];
trusted_domains = [
"cloud.mjallen.dev"
"${net.hosts.nas.lan}:${toString cfg.port}"
];
enabledPreviewProviders = [
"OC\\Preview\\PNG"
"OC\\Preview\\JPEG"
"OC\\Preview\\GIF"
"OC\\Preview\\BMP"
"OC\\Preview\\XBitmap"
"OC\\Preview\\Krita"
"OC\\Preview\\WebP"
"OC\\Preview\\MarkDown"
"OC\\Preview\\TXT"
"OC\\Preview\\OpenDocument"
];
};
};
nginx = {
enable = true;
group = "jallen-nas";
virtualHosts.${config.services.nextcloud.hostName} = {
listen = [
{
addr = "0.0.0.0";
port = cfg.port;
ssl = false;
}
];
};
overwrite.cli.url = "https://cloud.mjallen.dev";
overwriteprotocol = "https";
overwritehost = "cloud.mjallen.dev";
log_type = "file";
default_phone_region = "US";
trusted_proxies = [
net.hosts.nas.lan
"127.0.0.1"
"::1"
];
trusted_domains = [
"cloud.mjallen.dev"
"${net.hosts.nas.lan}:${toString cfg.port}"
];
enabledPreviewProviders = [
"OC\\Preview\\PNG"
"OC\\Preview\\JPEG"
"OC\\Preview\\GIF"
"OC\\Preview\\BMP"
"OC\\Preview\\XBitmap"
"OC\\Preview\\Krita"
"OC\\Preview\\WebP"
"OC\\Preview\\MarkDown"
"OC\\Preview\\TXT"
"OC\\Preview\\OpenDocument"
];
};
};
users.users.nextcloud.isSystemUser = lib.mkForce true;
users.users.nextcloud.isNormalUser = lib.mkForce false;
users.groups.nextcloud = { };
users = {
users = {
nextcloud = {
isSystemUser = lib.mkForce true;
isNormalUser = lib.mkForce false;
};
};
groups = {
nextcloud = { };
};
};
# Ensure nextcloud services start after PostgreSQL is ready.
# The upstream NixOS module only adds this ordering when services.postgresql.enable
# is true in the same config, but here PostgreSQL is managed separately.
systemd.services.nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
systemd.services.nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
# Configure web server
services.nginx = {
enable = true;
group = "jallen-nas";
virtualHosts.${config.services.nextcloud.hostName} = {
listen = [
{
addr = "0.0.0.0";
port = cfg.port;
ssl = false;
}
];
systemd = {
services = {
# Override the empty systemd service created by mkModule.
# The native NixOS nextcloud module doesn't create a persistent "nextcloud.service"
# (it uses PHP-FPM pools and cron instead), so we clear this to avoid the error:
# "Service has no ExecStart=, ExecStop=, or SuccessAction=. Refusing."
nextcloud = lib.mkForce { };
nextcloud-setup = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
nextcloud-update-db = {
after = [ "postgresql.service" ];
requires = [ "postgresql.service" ];
};
};
};
};