258 lines
5.7 KiB
Nix
258 lines
5.7 KiB
Nix
{
|
|
inputs,
|
|
outputs,
|
|
config,
|
|
lib,
|
|
pkgs,
|
|
...
|
|
}:
|
|
let
|
|
enableDisplayManager = false;
|
|
hostname = "jallen-nas";
|
|
in
|
|
{
|
|
# Services configs
|
|
services = {
|
|
udisks2.enable = true;
|
|
|
|
# Enable the X11 windowing system.
|
|
xserver = {
|
|
enable = enableDisplayManager;
|
|
|
|
# Enable the Plasma 6 Desktop Environment.
|
|
desktopManager.plasma5.enable = enableDisplayManager;
|
|
};
|
|
|
|
displayManager = {
|
|
sddm.enable = enableDisplayManager;
|
|
#defaultSession = "plasma";
|
|
};
|
|
|
|
# Set to enable Flatpak
|
|
flatpak.enable = false;
|
|
|
|
# Enable RDP
|
|
xrdp = {
|
|
enable = enableDisplayManager;
|
|
defaultWindowManager = "startplasma-x11";
|
|
openFirewall = enableDisplayManager;
|
|
};
|
|
|
|
avahi = {
|
|
enable = true;
|
|
nssmdns4 = true;
|
|
publish = {
|
|
enable = true;
|
|
addresses = true;
|
|
domain = true;
|
|
hinfo = true;
|
|
userServices = true;
|
|
workstation = true;
|
|
};
|
|
extraServiceFiles = {
|
|
# TODO is this needed?
|
|
smb = ''
|
|
<?xml version="1.0" standalone='no'?><!--*-nxml-*-->
|
|
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
|
|
<service-group>
|
|
<name replace-wildcards="yes">%h</name>
|
|
<service>
|
|
<type>_smb._tcp</type>
|
|
<port>445</port>
|
|
</service>
|
|
</service-group>
|
|
'';
|
|
};
|
|
};
|
|
|
|
apcupsd = {
|
|
enable = true;
|
|
};
|
|
|
|
grafana = {
|
|
enable = false;
|
|
settings.server = {
|
|
http_port = 2342;
|
|
domain = hostname;
|
|
serve_from_sub_path = true;
|
|
http_addr = "";
|
|
};
|
|
dataDir = "/media/nas/ssd/nix-app-data/grafana";
|
|
};
|
|
|
|
prometheus = {
|
|
enable = false;
|
|
port = 9001;
|
|
exporters = {
|
|
node = {
|
|
enable = true;
|
|
enabledCollectors = [
|
|
"diskstats"
|
|
"systemd"
|
|
];
|
|
port = 9002;
|
|
};
|
|
smartctl = {
|
|
enable = true;
|
|
group = "disk";
|
|
devices = [
|
|
"/dev/sda"
|
|
"/dev/sdb"
|
|
"/dev/sdc"
|
|
"/dev/sdd"
|
|
"/dev/sde"
|
|
"/dev/sdf"
|
|
"/dev/sdg"
|
|
"/dev/sdh"
|
|
"/dev/sdi"
|
|
"/dev/nvme0n1"
|
|
"/dev/nvme1n1"
|
|
];
|
|
};
|
|
};
|
|
|
|
scrapeConfigs = [
|
|
{
|
|
job_name = hostname;
|
|
static_configs = [
|
|
{
|
|
targets = [
|
|
"127.0.0.1:${toString config.services.prometheus.exporters.node.port}"
|
|
"127.0.0.1:${toString config.services.prometheus.exporters.smartctl.port}"
|
|
];
|
|
}
|
|
];
|
|
}
|
|
];
|
|
};
|
|
|
|
tailscale = {
|
|
enable = true;
|
|
openFirewall = true;
|
|
useRoutingFeatures = "client";
|
|
extraUpFlags = [ "--advertise-exit-node" ];
|
|
authKeyFile = "/media/nas/ssd/nix-app-data/tailscale/auth";
|
|
};
|
|
|
|
btrfs = {
|
|
autoScrub.enable = false;
|
|
autoScrub.fileSystems = [
|
|
"/nix"
|
|
"/root"
|
|
"/etc"
|
|
"/var/log"
|
|
"/home"
|
|
"/media/nas/ssd/nix-app-data"
|
|
"/media/nas/ssd/ssd_app_data"
|
|
"/media/nas/ssd/mariadb"
|
|
"/media/nas/main/3d_printer"
|
|
"/media/nas/main/backup"
|
|
"/media/nas/main/documents"
|
|
"/media/nas/main/nextcloud"
|
|
"/media/nas/main/movies"
|
|
"/media/nas/main/tv"
|
|
"/media/nas/main/isos"
|
|
];
|
|
};
|
|
|
|
authentik = {
|
|
enable = true;
|
|
environmentFile = "/media/nas/ssd/nix-app-data/authentik/.env";
|
|
|
|
};
|
|
|
|
postgresql = {
|
|
enable = true;
|
|
package = pkgs.postgresql_16;
|
|
dataDir = "/media/nas/ssd/nix-app-data/postgresql";
|
|
ensureDatabases = [ "authentik" ];
|
|
ensureUsers = [
|
|
{
|
|
name = "authentik";
|
|
ensureDBOwnership = true;
|
|
}
|
|
];
|
|
};
|
|
|
|
redis = {
|
|
servers = {
|
|
authentik = {
|
|
enable = true;
|
|
port = 6379;
|
|
};
|
|
|
|
nextcloud = {
|
|
enable = true;
|
|
port = 6380;
|
|
};
|
|
};
|
|
};
|
|
};
|
|
|
|
systemd.user.services = {
|
|
protonmail-bridge = {
|
|
description = "Protonmail Bridge";
|
|
enable = true;
|
|
script = "${pkgs.protonmail-bridge}/bin/protonmail-bridge --noninteractive";
|
|
path = [ pkgs.pass pkgs.protonmail-bridge ];
|
|
wantedBy = [ "multi-user.target" ];
|
|
partOf = [ "multi-user.target" ];
|
|
};
|
|
};
|
|
|
|
systemd.services = {
|
|
|
|
rsync-ssd = {
|
|
path = [ pkgs.bash pkgs.rsync ];
|
|
script = ''
|
|
rsync -rtpogvPlHzs --ignore-existing /media/nas/ssd /media/nas/main/backup/ssd
|
|
'';
|
|
};
|
|
|
|
tailscale-autoconnect = {
|
|
enable = false;
|
|
description = "Automatic connection to Tailscale";
|
|
|
|
# make sure tailscale is running before trying to connect to tailscale
|
|
after = [
|
|
"network-pre.target"
|
|
"tailscale.service"
|
|
];
|
|
wants = [
|
|
"network-pre.target"
|
|
"tailscale.service"
|
|
];
|
|
wantedBy = [ "multi-user.target" ];
|
|
|
|
# set this service as a oneshot job
|
|
serviceConfig.Type = "oneshot";
|
|
|
|
# have the job run this shell script
|
|
script = with pkgs; ''
|
|
# wait for tailscaled to settle
|
|
sleep 2
|
|
|
|
# check if we are already authenticated to tailscale
|
|
status="$(${tailscale}/bin/tailscale status -json | ${jq}/bin/jq -r .BackendState)"
|
|
if [ $status = "Running" ]; then # if so, then do nothing
|
|
exit 0
|
|
fi
|
|
|
|
# otherwise authenticate with tailscale
|
|
${tailscale}/bin/tailscale up -authkey nodekey:e4557e761f8fa2cb51a189d32484092036d3954b61502b7e19688869a5107707
|
|
'';
|
|
};
|
|
|
|
glances-server = {
|
|
path = [
|
|
pkgs.bash
|
|
pkgs.glances
|
|
];
|
|
script = ''
|
|
glances -w
|
|
'';
|
|
wantedBy = [ "multi-user.target" ];
|
|
};
|
|
};
|
|
}
|