nix/nixos/beefcake.nix

1895 lines
55 KiB
Nix
Raw Normal View History

2023-10-12 22:54:05 -05:00
/*
if ur fans get loud:
# enable manual fan control
sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x01 0x00
# set fan speed to last byte as decimal
sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00
*/
2023-10-03 11:52:44 -05:00
{
2024-09-12 11:58:24 -05:00
/*
inputs,
outputs,
*/
2024-02-21 22:15:41 -06:00
lib,
2024-02-21 20:39:10 -06:00
config,
2023-10-03 11:52:44 -05:00
pkgs,
...
2024-03-13 21:12:14 -05:00
}: {
2024-09-04 10:31:06 -05:00
system.stateVersion = "24.05";
2024-07-29 10:01:59 -05:00
home-manager.users.daniel.home.stateVersion = "24.05";
2024-03-13 21:12:14 -05:00
networking.hostName = "beefcake";
2024-03-13 21:12:14 -05:00
imports = [
{
2024-09-11 11:57:27 -05:00
# hardware and boot module
2024-09-06 08:39:30 -05:00
networking.hostId = "541ede55";
2024-03-13 21:12:14 -05:00
boot = {
2024-09-06 08:39:30 -05:00
zfs = {
extraPools = ["zstorage"];
};
supportedFilesystems = {
zfs = true;
};
initrd.supportedFilesystems = {
zfs = true;
};
2024-12-02 13:31:59 -06:00
# kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
2024-09-03 20:03:24 -05:00
initrd.availableKernelModules = ["ehci_pci" "mpt3sas" "usbhid" "sd_mod"];
2024-03-13 21:12:14 -05:00
kernelModules = ["kvm-intel"];
2024-09-11 11:57:27 -05:00
kernelParams = ["nohibernate"];
2024-03-13 21:12:14 -05:00
loader.systemd-boot.enable = true;
loader.efi.canTouchEfiVariables = true;
};
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems."/" = {
2024-09-03 20:03:24 -05:00
device = "/dev/disk/by-uuid/992ce55c-7507-4d6b-938c-45b7e891f395";
2024-03-13 21:12:14 -05:00
fsType = "ext4";
};
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems."/boot" = {
2024-09-03 20:03:24 -05:00
device = "/dev/disk/by-uuid/B6C4-7CF4";
2024-03-13 21:12:14 -05:00
fsType = "vfat";
2024-09-03 20:03:24 -05:00
options = ["fmask=0022" "dmask=0022"];
2024-03-13 21:12:14 -05:00
};
2023-09-05 21:46:55 -05:00
2024-09-12 11:58:24 -05:00
/*
2024-09-06 08:39:30 -05:00
# should be mounted by auto-import; see boot.zfs.extraPools
2024-09-12 11:58:24 -05:00
fileSystems."/storage" = {
device = "zstorage/storage";
fsType = "zfs";
};
*/
2024-09-06 08:39:30 -05:00
fileSystems."/nix" = {
device = "zstorage/nix";
fsType = "zfs";
};
services.zfs.autoScrub.enable = true;
services.zfs.autoSnapshot.enable = true;
# TODO: nfs with zfs?
# services.nfs.server.enable = true;
2024-03-13 21:12:14 -05:00
}
2024-09-11 11:57:27 -05:00
({
options,
config,
...
}: let
inherit (lib) mkOption types;
in {
options.services.restic.commonPaths = mkOption {
type = types.nullOr (types.listOf types.str);
default = [];
description = ''
Which paths to backup, in addition to ones specified via
`dynamicFilesFrom`. If null or an empty array and
`dynamicFilesFrom` is also null, no backup command will be run.
This can be used to create a prune-only job.
'';
example = [
"/var/lib/postgresql"
"/home/user/backup"
];
};
})
2024-09-04 10:31:06 -05:00
{
2024-09-11 11:57:27 -05:00
# sops secrets config
2024-03-13 21:12:14 -05:00
sops = {
defaultSopsFile = ../secrets/beefcake/secrets.yml;
age = {
sshKeyPaths = ["/etc/ssh/ssh_host_ed25519_key"];
keyFile = "/var/lib/sops-nix/key.txt";
generateKey = true;
};
};
}
{
2024-09-11 11:57:27 -05:00
sops.secrets = {
netlify-ddns-password = {mode = "0400";};
};
services.deno-netlify-ddns-client = {
passwordFile = config.sops.secrets.netlify-ddns-password.path;
};
}
2024-03-13 21:12:14 -05:00
{
# nix binary cache
2024-09-11 11:57:27 -05:00
sops.secrets = {
nix-cache-priv-key = {mode = "0400";};
};
2024-03-13 21:12:14 -05:00
services.nix-serve = {
2024-09-06 15:35:50 -05:00
enable = true; # TODO: true
secretKeyFile = config.sops.secrets.nix-cache-priv-key.path;
2024-03-13 21:12:14 -05:00
};
services.caddy.virtualHosts."nix.h.lyte.dev" = {
extraConfig = ''
reverse_proxy :${toString config.services.nix-serve.port}
'';
};
networking.firewall.allowedTCPPorts = [
80
443
];
# regularly build this flake so we have stuff in the cache
2024-03-28 13:12:00 -05:00
# TODO: schedule this for nightly builds instead of intervals based on boot time
2024-09-06 16:32:10 -05:00
systemd.timers."build-lytedev-flake" = {
wantedBy = ["timers.target"];
timerConfig = {
OnBootSec = "30m"; # 30 minutes after booting
OnUnitActiveSec = "1d"; # every day afterwards
Unit = "build-lytedev-flake.service";
};
};
2024-09-12 10:36:29 -05:00
systemd.tmpfiles.settings = {
"10-daniel-nightly-flake-build" = {
"/home/daniel/.home/.cache/nightly-flake-builds" = {
"d" = {
mode = "0750";
user = "daniel";
group = "daniel";
};
};
};
};
2024-09-06 16:32:10 -05:00
systemd.services."build-lytedev-flake" = {
2024-09-11 11:57:27 -05:00
# TODO: might want to add root for the most recent results?
2024-09-06 16:32:10 -05:00
script = ''
# build self (main server) configuration
nixos-rebuild build --flake git+https://git.lyte.dev/lytedev/nix.git --accept-flake-config
# build desktop configuration
nixos-rebuild build --flake git+https://git.lyte.dev/lytedev/nix.git#dragon --accept-flake-config
# build main laptop configuration
nixos-rebuild build --flake git+https://git.lyte.dev/lytedev/nix.git#foxtrot --accept-flake-config
'';
path = with pkgs; [openssh git nixos-rebuild];
serviceConfig = {
# TODO: mkdir -p...?
2024-09-12 10:36:29 -05:00
WorkingDirectory = "/home/daniel/.home/.cache/nightly-flake-builds";
2024-09-06 16:32:10 -05:00
Type = "oneshot";
2024-09-11 11:57:27 -05:00
User = "daniel";
2024-09-06 16:32:10 -05:00
};
};
2024-03-30 07:50:23 -05:00
networking = {
extraHosts = ''
::1 nix.h.lyte.dev
127.0.0.1 nix.h.lyte.dev
2024-03-13 21:12:14 -05:00
'';
};
}
2024-09-06 16:36:53 -05:00
{
services.headscale = {
2024-09-11 11:57:27 -05:00
enable = false; # TODO: setup headscale?
2024-09-06 16:36:53 -05:00
address = "127.0.0.1";
port = 7777;
settings = {
server_url = "https://tailscale.vpn.h.lyte.dev";
db_type = "sqlite3";
db_path = "/var/lib/headscale/db.sqlite";
derp.server = {
enable = true;
region_id = 999;
stun_listen_addr = "0.0.0.0:3478";
};
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
dns_config = {
magic_dns = true;
base_domain = "vpn.h.lyte.dev";
domains = [
"ts.vpn.h.lyte.dev"
];
nameservers = [
"1.1.1.1"
# "192.168.0.1"
];
override_local_dns = true;
};
};
};
services.caddy.virtualHosts."tailscale.vpn.h.lyte.dev" = lib.mkIf config.services.headscale.enable {
extraConfig = ''
reverse_proxy http://localhost:${toString config.services.headscale.port}
'';
};
networking.firewall.allowedUDPPorts = lib.mkIf config.services.headscale.enable [3478];
}
2024-03-13 21:12:14 -05:00
{
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = ["/var/lib/soju" "/var/lib/private/soju"];
2024-03-13 21:12:14 -05:00
services.soju = {
enable = true;
listen = ["irc+insecure://:6667"];
};
networking.firewall.allowedTCPPorts = [
6667
];
}
{
2024-09-11 11:57:27 -05:00
# nextcloud
2024-09-12 22:37:20 -05:00
users.users.nextcloud = {
isSystemUser = true;
createHome = false;
group = "nextcloud";
};
users.groups.nextcloud = {};
sops.secrets = {
nextcloud-admin-password = {
owner = "nextcloud";
group = "nextcloud";
mode = "400";
};
};
systemd.tmpfiles.settings = {
"10-nextcloud" = {
"/storage/nextcloud" = {
"d" = {
mode = "0750";
user = "nextcloud";
group = "nextcloud";
};
};
};
};
services.restic.commonPaths = [
"/storage/nextcloud"
];
2024-09-12 11:58:24 -05:00
services.postgresql = {
2024-09-12 22:37:20 -05:00
ensureDatabases = ["nextcloud"];
2024-09-12 11:58:24 -05:00
ensureUsers = [
{
name = "nextcloud";
ensureDBOwnership = true;
}
];
};
2024-09-12 22:37:20 -05:00
services.nextcloud = {
2024-12-02 13:27:49 -06:00
enable = false;
2024-09-12 22:37:20 -05:00
hostName = "nextcloud.h.lyte.dev";
maxUploadSize = "100G";
extraAppsEnable = true;
autoUpdateApps.enable = true;
extraApps = with config.services.nextcloud.package.packages.apps; {
inherit calendar contacts notes onlyoffice tasks;
};
package = pkgs.nextcloud28;
home = "/storage/nextcloud";
configureRedis = true;
caching.redis = true;
settings = {
# TODO: SMTP
maintenance_window_start = 1;
};
config = {
adminpassFile = config.sops.secrets.nextcloud-admin-password.path;
adminuser = "daniel";
dbtype = "pgsql";
dbhost = "/run/postgresql";
};
phpOptions = {
"xdebug.mode" = "debug";
"xdebug.client_host" = "10.0.2.2";
"xdebug.client_port" = "9000";
"xdebug.start_with_request" = "yes";
"xdebug.idekey" = "ECLIPSE";
};
2024-09-12 11:58:24 -05:00
};
2024-09-12 22:37:20 -05:00
services.nginx.enable = false;
systemd.services.nextcloud = {
serviceConfig.User = "nextcloud";
serviceConfig.Group = "nextcloud";
};
2024-12-02 13:27:49 -06:00
services.phpfpm = lib.mkIf config.services.nextcloud.enable {
pools.nextcloud.settings = {
"listen.owner" = "caddy";
"listen.group" = "caddy";
};
2024-09-12 22:37:20 -05:00
};
services.caddy.virtualHosts."nextcloud.h.lyte.dev" = let
fpm-nextcloud-pool = config.services.phpfpm.pools.nextcloud;
root = config.services.nginx.virtualHosts.${config.services.nextcloud.hostName}.root;
in
lib.mkIf config.services.nextcloud.enable {
extraConfig = ''
encode zstd gzip
root * ${root}
redir /.well-known/carddav /remote.php/dav 301
redir /.well-known/caldav /remote.php/dav 301
redir /.well-known/* /index.php{uri} 301
redir /remote/* /remote.php{uri} 301
header {
Strict-Transport-Security max-age=31536000
Permissions-Policy interest-cohort=()
X-Content-Type-Options nosniff
X-Frame-Options SAMEORIGIN
Referrer-Policy no-referrer
X-XSS-Protection "1; mode=block"
X-Permitted-Cross-Domain-Policies none
X-Robots-Tag "noindex, nofollow"
X-Forwarded-Host nextcloud.h.lyte.dev
-X-Powered-By
}
php_fastcgi unix/${fpm-nextcloud-pool.socket} {
root ${root}
env front_controller_active true
env modHeadersAvailable true
}
@forbidden {
path /build/* /tests/* /config/* /lib/* /3rdparty/* /templates/* /data/*
path /.* /autotest* /occ* /issue* /indie* /db_* /console*
not path /.well-known/*
}
error @forbidden 404
@immutable {
path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite
query v=*
}
header @immutable Cache-Control "max-age=15778463, immutable"
@static {
path *.css *.js *.mjs *.svg *.gif *.png *.jpg *.ico *.wasm *.tflite
not query v=*
}
header @static Cache-Control "max-age=15778463"
@woff2 path *.woff2
header @woff2 Cache-Control "max-age=604800"
file_server
'';
};
2024-03-13 21:12:14 -05:00
}
2024-09-06 16:44:15 -05:00
{
# plausible
2024-09-11 11:57:27 -05:00
services.postgresql = {
ensureDatabases = ["plausible"];
ensureUsers = [
{
name = "plausible";
ensureDBOwnership = true;
}
];
};
users.users.plausible = {
isSystemUser = true;
createHome = false;
group = "plausible";
};
users.extraGroups = {
"plausible" = {};
};
services.plausible = {
enable = true;
database = {
clickhouse.setup = true;
postgres = {
setup = false;
dbname = "plausible";
};
};
server = {
baseUrl = "https://a.lyte.dev";
disableRegistration = true;
port = 8899;
secretKeybaseFile = config.sops.secrets.plausible-secret-key-base.path;
};
adminUser = {
activate = false;
email = "daniel@lyte.dev";
passwordFile = config.sops.secrets.plausible-admin-password.path;
};
};
sops.secrets = {
plausible-secret-key-base = {
owner = "plausible";
group = "plausible";
};
plausible-admin-password = {
owner = "plausible";
group = "plausible";
};
};
systemd.services.plausible = {
serviceConfig.User = "plausible";
serviceConfig.Group = "plausible";
};
services.caddy.virtualHosts."a.lyte.dev" = {
extraConfig = ''
reverse_proxy :${toString config.services.plausible.server.port}
'';
};
}
{
# clickhouse
environment.etc = {
"clickhouse-server/users.d/disable-logging-query.xml" = {
text = ''
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>
'';
};
"clickhouse-server/config.d/reduce-logging.xml" = {
text = ''
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>
'';
};
};
services.restic.commonPaths = [
# "/var/lib/clickhouse"
];
}
{
# family storage
2024-09-12 10:36:29 -05:00
users.extraGroups = {
"family" = {};
};
2024-09-11 11:57:27 -05:00
systemd.tmpfiles.settings = {
2024-09-11 14:31:48 -05:00
"10-family" = {
2024-09-11 11:57:27 -05:00
"/storage/family" = {
"d" = {
mode = "0770";
user = "root";
group = "family";
};
};
2024-09-12 15:16:09 -05:00
"/storage/valerie" = {
2024-09-11 11:57:27 -05:00
"d" = {
2024-09-12 15:16:09 -05:00
mode = "0700";
2024-09-11 11:57:27 -05:00
user = "valerie";
group = "family";
};
};
};
};
services.restic.commonPaths = [
"/storage/family"
2024-09-12 15:16:09 -05:00
"/storage/valerie"
2024-09-11 11:57:27 -05:00
];
2024-09-06 16:44:15 -05:00
}
2024-03-13 21:12:14 -05:00
{
# daniel augments
2024-09-11 11:57:27 -05:00
systemd.tmpfiles.settings = {
2024-09-11 14:31:48 -05:00
"10-daniel" = {
2024-09-11 11:57:27 -05:00
"/storage/daniel" = {
"d" = {
mode = "0700";
user = "daniel";
group = "nogroup";
};
};
"/storage/daniel/critical" = {
"d" = {
mode = "0700";
user = "daniel";
group = "nogroup";
};
};
};
};
2024-03-13 21:12:14 -05:00
users.groups.daniel.members = ["daniel"];
users.groups.nixadmin.members = ["daniel"];
users.users.daniel = {
extraGroups = [
2024-09-03 20:03:24 -05:00
# "nixadmin" # write access to /etc/nixos/ files
2024-03-13 21:12:14 -05:00
"wheel" # sudo access
2024-09-03 20:03:24 -05:00
"caddy" # write access to public static files
2024-03-13 21:12:14 -05:00
"users" # general users group
2024-09-03 20:03:24 -05:00
"jellyfin" # write access to jellyfin files
"audiobookshelf" # write access to audiobookshelf files
"flanilla" # minecraft server manager
2024-09-06 16:15:58 -05:00
"forgejo"
2024-03-13 21:12:14 -05:00
];
2024-08-13 14:35:09 -05:00
};
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = [
"/storage/daniel"
];
2024-09-06 16:36:53 -05:00
services.postgresql = {
ensureDatabases = ["daniel"];
ensureUsers = [
{
name = "daniel";
2024-09-12 22:37:20 -05:00
ensureClauses = {
# superuser = true;
# createrole = true;
# createdb = true;
# bypassrls = true;
};
2024-09-06 16:36:53 -05:00
ensureDBOwnership = true;
}
];
};
2024-08-13 14:35:09 -05:00
}
2024-09-06 16:36:53 -05:00
{
systemd.tmpfiles.settings = {
"10-jellyfin" = {
"/storage/jellyfin" = {
"d" = {
mode = "0770";
user = "jellyfin";
group = "wheel";
};
};
"/storage/jellyfin/movies" = {
"d" = {
mode = "0770";
user = "jellyfin";
group = "wheel";
};
};
"/storage/jellyfin/tv" = {
"d" = {
mode = "0770";
user = "jellyfin";
group = "wheel";
};
};
"/storage/jellyfin/music" = {
"d" = {
mode = "0770";
user = "jellyfin";
group = "wheel";
};
};
};
};
services.jellyfin = {
enable = true;
openFirewall = false;
# uses port 8096 by default, configurable from admin UI
};
services.caddy.virtualHosts."video.lyte.dev" = {
extraConfig = ''reverse_proxy :8096'';
};
2024-09-12 11:58:24 -05:00
/*
NOTE: this server's xeon chips DO NOT seem to support quicksync or graphics in general
but I can probably throw in a crappy GPU (or a big, cheap ebay GPU for ML
stuff, too?) and get good transcoding performance
*/
2024-09-06 16:36:53 -05:00
# jellyfin hardware encoding
2024-09-12 11:58:24 -05:00
/*
hardware.graphics = {
enable = true;
extraPackages = with pkgs; [
intel-media-driver
vaapiIntel
vaapiVdpau
libvdpau-va-gl
intel-compute-runtime
];
};
nixpkgs.config.packageOverrides = pkgs: {
vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
};
*/
2024-09-06 16:36:53 -05:00
}
{
2024-09-06 16:44:15 -05:00
systemd.tmpfiles.settings = {
2024-09-11 14:31:48 -05:00
"10-postgres" = {
2024-09-06 16:44:15 -05:00
"/storage/postgres" = {
"d" = {
mode = "0750";
2024-09-06 16:44:15 -05:00
user = "postgres";
group = "postgres";
};
};
};
};
2024-09-06 16:36:53 -05:00
services.postgresql = {
enable = true;
dataDir = "/storage/postgres";
enableTCPIP = true;
package = pkgs.postgresql_15;
# https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
2024-09-11 11:57:27 -05:00
# TODO: give the "daniel" user access to all databases
2024-09-12 22:37:20 -05:00
/*
2024-09-06 16:36:53 -05:00
authentication = pkgs.lib.mkOverride 10 ''
#type database user auth-method auth-options
local all postgres peer map=superuser_map
local all daniel peer map=superuser_map
local sameuser all peer map=superuser_map
# lan ipv4
host all daniel 192.168.0.0/16 trust
host all daniel 10.0.0.0/24 trust
# tailnet ipv4
host all daniel 100.64.0.0/10 trust
'';
2024-09-12 22:37:20 -05:00
*/
2024-09-03 20:03:24 -05:00
2024-09-12 22:37:20 -05:00
/*
2024-09-06 16:36:53 -05:00
identMap = ''
# map system_user db_user
superuser_map root postgres
superuser_map postgres postgres
superuser_map daniel postgres
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
# Let other names login as themselves
superuser_map /^(.*)$ \1
'';
2024-09-12 22:37:20 -05:00
*/
2024-09-06 16:36:53 -05:00
};
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
services.postgresqlBackup = {
enable = true;
backupAll = true;
2024-09-11 11:57:27 -05:00
compression = "none"; # hoping for restic deduplication here?
2024-09-06 16:36:53 -05:00
location = "/storage/postgres-backups";
startAt = "*-*-* 03:00:00";
};
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = [
"/storage/postgres-backups"
];
2024-09-06 16:36:53 -05:00
}
2024-09-11 11:57:27 -05:00
{
# friends
users.users.ben = {
isNormalUser = true;
packages = [pkgs.vim];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKUfLZ+IX85p9355Po2zP1H2tAxiE0rE6IYb8Sf+eF9T ben@benhany.com"
];
};
2024-09-03 20:03:24 -05:00
2024-09-11 11:57:27 -05:00
users.users.alan = {
isNormalUser = true;
packages = [pkgs.vim];
# openssh.authorizedKeys.keys = [];
};
}
{
2024-09-11 11:57:27 -05:00
# restic backups
sops.secrets = {
restic-ssh-priv-key-benland = {mode = "0400";};
restic-rascal-passphrase = {
mode = "0400";
};
restic-rascal-ssh-private-key = {
mode = "0400";
};
};
users.groups.restic = {};
users.users.restic = {
# used for other machines to backup to
isSystemUser = true;
2024-09-12 14:47:21 -05:00
createHome = true;
home = "/storage/backups/restic";
group = "restic";
2024-09-12 14:47:21 -05:00
extraGroups = ["sftponly"];
openssh.authorizedKeys.keys = [] ++ config.users.users.daniel.openssh.authorizedKeys.keys;
};
2024-09-12 14:47:21 -05:00
services.openssh.extraConfig = ''
Match Group sftponly
ChrootDirectory /storage/backups/%u
ForceCommand internal-sftp
AllowTcpForwarding no
'';
systemd.tmpfiles.settings = {
2024-09-11 14:31:48 -05:00
"10-backups-local" = {
"/storage/backups/local" = {
"d" = {
mode = "0750";
user = "root";
group = "wheel";
};
};
};
};
2024-09-11 11:57:27 -05:00
services.restic.backups = let
2024-09-12 14:47:21 -05:00
# TODO: How do I set things up so that a compromised server doesn't have access to my backups so that it can corrupt or ransomware them?
2024-09-11 11:57:27 -05:00
defaults = {
2024-09-11 13:31:07 -05:00
passwordFile = config.sops.secrets.restic-rascal-passphrase.path;
2024-09-11 11:57:27 -05:00
paths =
config.services.restic.commonPaths
++ [
];
initialize = true;
exclude = [];
timerConfig = {
OnCalendar = ["04:45" "17:45"];
};
};
in {
local =
defaults
// {
repository = "/storage/backups/local";
};
rascal =
defaults
// {
extraOptions = [
2024-09-17 08:56:54 -05:00
''sftp.command="ssh beefcake@rascal.hare-cod.ts.net -i ${config.sops.secrets.restic-rascal-ssh-private-key.path} -s sftp"''
2024-09-11 11:57:27 -05:00
];
2024-09-17 08:56:54 -05:00
repository = "sftp://beefcake@rascal.hare-cod.ts.net://storage/backups/beefcake";
2024-09-11 11:57:27 -05:00
};
# TODO: add ruby?
benland =
defaults
// {
extraOptions = [
2024-09-14 07:20:34 -05:00
''sftp.command="ssh daniel@n.benhaney.com -p 10022 -i ${config.sops.secrets.restic-ssh-priv-key-benland.path} -s sftp"''
2024-09-11 11:57:27 -05:00
];
repository = "sftp://daniel@n.benhaney.com://storage/backups/beefcake";
};
};
}
{
systemd.tmpfiles.settings = {
"10-caddy" = {
"/storage/files.lyte.dev" = {
"d" = {
mode = "2775";
user = "root";
group = "wheel";
};
};
};
};
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = [
"/storage/files.lyte.dev"
];
services.caddy = {
# TODO: 502 and other error pages
enable = true;
email = "daniel@lyte.dev";
adapter = "caddyfile";
virtualHosts = {
"files.lyte.dev" = {
# TODO: customize the files.lyte.dev template?
extraConfig = ''
header {
Access-Control-Allow-Origin "{http.request.header.Origin}"
Access-Control-Allow-Credentials true
Access-Control-Allow-Methods *
Access-Control-Allow-Headers *
Vary Origin
defer
}
2024-09-12 11:58:24 -05:00
file_server browse {
2024-09-12 11:58:24 -05:00
## browse template
## hide .*
root /storage/files.lyte.dev
}
'';
};
};
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
};
}
2024-09-06 15:39:26 -05:00
{
2024-09-12 22:37:20 -05:00
# systemd.tmpfiles.settings = {
# "10-forgejo" = {
# "/storage/forgejo" = {
# "d" = {
# mode = "0700";
# user = "forgejo";
# group = "nogroup";
# };
# };
# };
# };
2024-09-06 15:39:26 -05:00
services.forgejo = {
2024-09-06 16:15:58 -05:00
enable = true;
2024-09-06 15:39:26 -05:00
stateDir = "/storage/forgejo";
settings = {
DEFAULT = {
APP_NAME = "git.lyte.dev";
};
server = {
ROOT_URL = "https://git.lyte.dev";
HTTP_ADDR = "127.0.0.1";
HTTP_PORT = 3088;
DOMAIN = "git.lyte.dev";
};
2024-09-16 16:38:16 -05:00
migrations = {
ALLOWED_DOMAINS = "*.github.com,github.com,gitlab.com,*.gitlab.com";
};
2024-09-06 15:39:26 -05:00
actions = {
ENABLED = true;
};
service = {
DISABLE_REGISTRATION = true;
};
session = {
COOKIE_SECURE = true;
};
log = {
# LEVEL = "Debug";
};
ui = {
THEMES = "forgejo-auto,forgejo-light,forgejo-dark";
DEFAULT_THEME = "forgejo-auto";
};
indexer = {
REPO_INDEXER_ENABLED = "true";
REPO_INDEXER_PATH = "indexers/repos.bleve";
MAX_FILE_SIZE = "1048576";
# REPO_INDEXER_INCLUDE =
REPO_INDEXER_EXCLUDE = "resources/bin/**";
};
};
lfs = {
enable = true;
};
dump = {
2024-09-11 11:57:27 -05:00
enable = false;
2024-09-06 15:39:26 -05:00
};
database = {
# TODO: move to postgres?
type = "sqlite3";
};
};
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = [
config.services.forgejo.stateDir
];
sops.secrets = {
"forgejo-runner.env" = {mode = "0400";};
};
systemd.services.gitea-runner-beefcake.after = ["sops-nix.service"];
2024-09-06 15:39:26 -05:00
services.gitea-actions-runner = {
# TODO: simple git-based automation would be dope? maybe especially for
# mirroring to github super easy?
package = pkgs.forgejo-runner;
instances."beefcake" = {
2024-09-06 16:26:53 -05:00
enable = true;
2024-09-06 15:39:26 -05:00
name = "beefcake";
url = "https://git.lyte.dev";
settings = {
container = {
# use the shared network which is bridged by default
# this lets us hit git.lyte.dev just fine
network = "podman";
};
};
labels = [
# type ":host" does not depend on docker/podman/lxc
"podman"
"nix:docker://git.lyte.dev/lytedev/nix:latest"
"beefcake:host"
"nixos-host:host"
];
tokenFile = config.sops.secrets."forgejo-runner.env".path;
hostPackages = with pkgs; [
nix
bash
coreutils
curl
gawk
gitMinimal
gnused
nodejs
gnutar # needed for cache action
wget
];
};
};
# environment.systemPackages = with pkgs; [nodejs];
services.caddy.virtualHosts."git.lyte.dev" = {
extraConfig = ''
reverse_proxy :${toString config.services.forgejo.settings.server.HTTP_PORT}
'';
};
services.caddy.virtualHosts."http://git.beefcake.lan" = {
extraConfig = ''
reverse_proxy :${toString config.services.forgejo.settings.server.HTTP_PORT}
'';
};
}
2024-09-06 16:05:29 -05:00
{
2024-09-11 11:57:27 -05:00
services.restic.commonPaths = [
config.services.vaultwarden.backupDir
];
2024-09-06 16:05:29 -05:00
services.vaultwarden = {
enable = true;
2024-09-11 11:57:27 -05:00
backupDir = "/storage/vaultwarden/backups";
2024-09-06 16:05:29 -05:00
config = {
DOMAIN = "https://bw.lyte.dev";
SIGNUPS_ALLOWED = "false";
ROCKET_ADDRESS = "127.0.0.1";
ROCKET_PORT = 8222;
2024-09-12 11:58:24 -05:00
/*
TODO: smtp setup?
right now, I think I configured this manually by temporarily setting ADMIN_TOKEN
and then configuring in https://bw.lyte.dev/admin
*/
2024-09-06 16:05:29 -05:00
};
};
services.caddy.virtualHosts."bw.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.vaultwarden.config.ROCKET_PORT}'';
};
}
2024-09-06 16:44:15 -05:00
{
2024-09-11 16:04:32 -05:00
users.users.atuin = {
isSystemUser = true;
createHome = false;
group = "atuin";
};
users.extraGroups = {
"atuin" = {};
};
2024-09-06 16:44:15 -05:00
services.postgresql = {
ensureDatabases = ["atuin"];
ensureUsers = [
{
name = "atuin";
ensureDBOwnership = true;
}
];
};
services.atuin = {
enable = true;
database = {
createLocally = false;
2024-09-11 11:57:27 -05:00
# NOTE: this uses postgres over the unix domain socket by default
2024-09-06 16:57:30 -05:00
# uri = "postgresql://atuin@localhost:5432/atuin";
2024-09-06 16:44:15 -05:00
};
openRegistration = false;
2024-09-11 16:04:32 -05:00
# TODO: would be neat to have a way to "force" a registration on the server
};
systemd.services.atuin.serviceConfig = {
Group = "atuin";
User = "atuin";
2024-09-06 16:44:15 -05:00
};
services.caddy.virtualHosts."atuin.h.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.atuin.port}'';
};
}
2024-09-12 11:58:24 -05:00
{
# jland minecraft server
/*
users.groups.jland = {
gid = 982;
};
users.users.jland = {
uid = 986;
isSystemUser = true;
createHome = false;
group = "jland";
};
virtualisation.oci-containers.containers.minecraft-jland = {
autoStart = false;
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = "docker.io/itzg/minecraft-server";
# user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}";
extraOptions = [
"--tty"
"--interactive"
];
environment = {
EULA = "true";
## UID = toString config.users.users.jland.uid;
## GID = toString config.users.groups.jland.gid;
STOP_SERVER_ANNOUNCE_DELAY = "20";
TZ = "America/Chicago";
VERSION = "1.20.1";
MEMORY = "8G";
MAX_MEMORY = "16G";
TYPE = "FORGE";
FORGE_VERSION = "47.1.3";
ALLOW_FLIGHT = "true";
ENABLE_QUERY = "true";
MODPACK = "/data/origination-files/Server-Files-0.2.14.zip";
## TYPE = "AUTO_CURSEFORGE";
## CF_SLUG = "monumental-experience";
## CF_FILE_ID = "4826863"; # 2.2.53
## due to
## Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory.
## we must upload manually
## CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip";
## ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time
## May also have mod/loader incompatibilities?
## https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/
};
environmentFiles = [
# config.sops.secrets."jland.env".path
];
ports = ["26965:25565"];
volumes = [
"/storage/jland/data:/data"
"/storage/jland/worlds:/worlds"
];
};
networking.firewall.allowedTCPPorts = [
26965
];
}
{
# dawncraft minecraft server
systemd.tmpfiles.rules = [
"d /storage/dawncraft/ 0770 1000 1000 -"
"d /storage/dawncraft/data/ 0770 1000 1000 -"
"d /storage/dawncraft/worlds/ 0770 1000 1000 -"
"d /storage/dawncraft/downloads/ 0770 1000 1000 -"
];
virtualisation.oci-containers.containers.minecraft-dawncraft = {
autoStart = false;
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = "docker.io/itzg/minecraft-server";
extraOptions = [
"--tty"
"--interactive"
];
environment = {
EULA = "true";
STOP_SERVER_ANNOUNCE_DELAY = "20";
TZ = "America/Chicago";
VERSION = "1.18.2";
MEMORY = "8G";
MAX_MEMORY = "32G";
ALLOW_FLIGHT = "true";
ENABLE_QUERY = "true";
SERVER_PORT = "26968";
QUERY_PORT = "26968";
TYPE = "AUTO_CURSEFORGE";
CF_SLUG = "dawn-craft";
CF_EXCLUDE_MODS = "368398";
CF_FORCE_SYNCHRONIZE = "true";
# CF_FILE_ID = "5247696"; # 2.0.7 server
};
environmentFiles = [
config.sops.secrets."dawncraft.env".path
];
ports = ["26968:26968/tcp" "26968:26968/udp"];
volumes = [
"/storage/dawncraft/data:/data"
"/storage/dawncraft/worlds:/worlds"
"/storage/dawncraft/downloads:/downloads"
];
};
networking.firewall.allowedTCPPorts = [
26968
];
*/
}
2024-10-03 09:23:44 -05:00
({...}: let
port = 26969;
dir = "/storage/flanilla";
user = "flanilla";
2024-10-10 11:46:03 -05:00
# uid = config.users.users.flanilla.uid;
# gid = config.users.groups.flanilla.gid;
2024-10-03 09:23:44 -05:00
in {
2024-09-12 11:58:24 -05:00
# flanilla family minecraft server
2024-10-03 09:23:44 -05:00
users.groups.${user} = {};
users.users.${user} = {
2024-09-12 11:58:24 -05:00
isSystemUser = true;
createHome = false;
2024-10-10 11:46:03 -05:00
home = dir;
2024-10-03 09:23:44 -05:00
group = user;
2024-09-12 11:58:24 -05:00
};
virtualisation.oci-containers.containers.minecraft-flanilla = {
2024-10-10 11:46:03 -05:00
autoStart = false;
2024-10-03 09:23:44 -05:00
image = "docker.io/itzg/minecraft-server";
2024-10-10 11:46:03 -05:00
# user = "${toString uid}:${toString gid}";
2024-10-03 09:23:44 -05:00
extraOptions = ["--tty" "--interactive"];
environment = {
EULA = "true";
2024-10-03 09:41:10 -05:00
MOTD = "Flanilla Survival! Happy hunting!";
2024-10-10 11:46:03 -05:00
# UID = toString uid;
# GID = toString gid;
2024-10-03 09:23:44 -05:00
STOP_SERVER_ANNOUNCE_DELAY = "20";
TZ = "America/Chicago";
VERSION = "1.21";
OPS = "lytedev";
MODE = "survival";
DIFFICULTY = "easy";
ONLINE_MODE = "false";
MEMORY = "8G";
MAX_MEMORY = "16G";
ALLOW_FLIGHT = "true";
ENABLE_QUERY = "true";
ENABLE_COMMAND_BLOCK = "true";
};
ports = ["${toString port}:25565"];
2024-09-12 11:58:24 -05:00
2024-10-03 09:23:44 -05:00
volumes = [
"${dir}/data:/data"
"${dir}/worlds:/worlds"
];
};
2024-10-10 11:46:03 -05:00
systemd.services.podman-minecraft-flanilla.serviceConfig = {
User = user;
Group = user;
};
2024-10-03 09:23:44 -05:00
systemd.tmpfiles.settings = {
"10-${user}-survival" = {
2024-10-03 09:29:26 -05:00
"${dir}/data" = {
"d" = {
mode = "0770";
user = user;
group = user;
};
};
"${dir}/worlds" = {
2024-10-03 09:23:44 -05:00
"d" = {
mode = "0770";
user = user;
group = user;
};
};
};
};
services.restic.commonPaths = [dir];
networking.firewall.allowedTCPPorts = [
port
];
})
({...}: let
port = 26968;
dir = "/storage/flanilla-creative";
user = "flanilla";
2024-10-10 11:46:03 -05:00
# uid = config.users.users.flanilla.uid;
# gid = config.users.groups.flanilla.gid;
2024-10-03 09:23:44 -05:00
in {
# flanilla family minecraft server
users.groups.${user} = {};
users.users.${user} = {
isSystemUser = true;
createHome = false;
2024-10-10 11:46:03 -05:00
home = lib.mkForce dir;
2024-10-03 09:23:44 -05:00
group = user;
};
virtualisation.oci-containers.containers.minecraft-flanilla-creative = {
autoStart = true;
2024-09-12 11:58:24 -05:00
image = "docker.io/itzg/minecraft-server";
2024-10-10 11:46:03 -05:00
# user = "${toString uid}:${toString gid}";
2024-09-12 11:58:24 -05:00
extraOptions = ["--tty" "--interactive"];
environment = {
EULA = "true";
2024-10-03 09:41:10 -05:00
MOTD = "Flanilla Creative! Have fun building!";
2024-10-10 11:46:03 -05:00
# UID = toString uid;
# GID = toString gid;
2024-09-12 11:58:24 -05:00
STOP_SERVER_ANNOUNCE_DELAY = "20";
TZ = "America/Chicago";
2024-10-03 09:23:44 -05:00
VERSION = "1.21";
2024-09-12 11:58:24 -05:00
OPS = "lytedev";
MODE = "creative";
DIFFICULTY = "peaceful";
ONLINE_MODE = "false";
MEMORY = "8G";
MAX_MEMORY = "16G";
ALLOW_FLIGHT = "true";
ENABLE_QUERY = "true";
ENABLE_COMMAND_BLOCK = "true";
};
2024-10-03 09:23:44 -05:00
ports = ["${toString port}:25565"];
2024-09-12 11:58:24 -05:00
volumes = [
2024-10-03 09:23:44 -05:00
"${dir}/data:/data"
"${dir}/worlds:/worlds"
2024-09-12 11:58:24 -05:00
];
};
2024-10-10 11:46:03 -05:00
# systemd.services.podman-minecraft-flanilla-creative.serviceConfig = {
# User = user;
# Group = user;
# };
2024-10-03 09:23:44 -05:00
systemd.tmpfiles.settings = {
"10-${user}-creative" = {
2024-10-03 09:29:26 -05:00
"${dir}/data" = {
"d" = {
mode = "0770";
user = user;
group = user;
};
};
"${dir}/worlds" = {
2024-10-03 09:23:44 -05:00
"d" = {
mode = "0770";
user = user;
group = user;
};
};
};
};
services.restic.commonPaths = [dir];
2024-09-12 11:58:24 -05:00
networking.firewall.allowedTCPPorts = [
2024-10-03 09:23:44 -05:00
port
2024-09-12 11:58:24 -05:00
];
2024-10-03 09:23:44 -05:00
})
2024-11-03 11:08:49 -06:00
({
config,
options,
...
}: let
2024-09-12 11:58:24 -05:00
toml = pkgs.formats.toml {};
2024-11-03 11:08:49 -06:00
kanidm-package = config.services.kanidm.package;
2024-09-12 11:58:24 -05:00
domain = "idm.h.lyte.dev";
name = "kanidm";
storage = "/storage/${name}";
cert = "${storage}/certs/idm.h.lyte.dev.crt";
key = "${storage}/certs/idm.h.lyte.dev.key";
serverSettings = {
inherit domain;
bindaddress = "127.0.0.1:8443";
# ldapbindaddress
tls_chain = cert;
tls_key = key;
origin = "https://${domain}";
db_path = "${storage}/data/kanidm.db";
log_level = "info";
online_backup = {
path = "${storage}/backups/";
schedule = "00 22 * * *";
# versions = 7;
};
};
unixdSettings = {
hsm_pin_path = "/var/cache/${name}-unixd/hsm-pin";
pam_allowed_login_groups = [];
};
clientSettings = {
uri = "https://idm.h.lyte.dev";
};
user = name;
group = name;
serverConfigFile = toml.generate "server.toml" serverSettings;
unixdConfigFile = toml.generate "kanidm-unixd.toml" unixdSettings;
clientConfigFile = toml.generate "kanidm-config.toml" clientSettings;
defaultServiceConfig = {
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
];
CapabilityBoundingSet = [];
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = "";
# Implies ProtectSystem=strict, which re-mounts all paths
# DynamicUser = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateNetwork = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectHome = true;
ProtectHostname = true;
# Would re-mount paths ignored by temporary root
#ProtectSystem = "strict";
ProtectControlGroups = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictAddressFamilies = [];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
# Does not work well with the temporary root
#UMask = "0066";
};
in {
# kanidm
config = {
2024-11-03 11:08:49 -06:00
# reload certs from caddy every 5 minutes
# TODO: ideally some kind of file watcher service would make way more sense here?
# or we could simply setup the permissions properly somehow?
2024-09-12 11:58:24 -05:00
systemd.timers."copy-kanidm-certificates-from-caddy" = {
wantedBy = ["timers.target"];
timerConfig = {
OnBootSec = "10m"; # 10 minutes after booting
OnUnitActiveSec = "5m"; # every 5 minutes afterwards
Unit = "copy-kanidm-certificates-from-caddy.service";
};
};
systemd.services."copy-kanidm-certificates-from-caddy" = {
2024-11-03 11:08:49 -06:00
# get the certificates that caddy provisions for us
2024-09-12 11:58:24 -05:00
script = ''
umask 077
2024-11-03 11:08:49 -06:00
# this line should be unnecessary now that we have this in tmpfiles
2024-09-12 11:58:24 -05:00
install -d -m 0700 -o "${user}" -g "${group}" "${storage}/data" "${storage}/certs"
cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev
install -m 0700 -o "${user}" -g "${group}" idm.h.lyte.dev.key idm.h.lyte.dev.crt "${storage}/certs"
'';
path = with pkgs; [rsync];
serviceConfig = {
Type = "oneshot";
User = "root";
};
};
2024-11-03 11:08:49 -06:00
environment.systemPackages = [kanidm-package];
2024-09-12 11:58:24 -05:00
systemd.tmpfiles.settings."10-kanidm" = {
"${serverSettings.online_backup.path}".d = {
inherit user group;
mode = "0700";
};
## "${builtins.dirOf unixdSettings.hsm_pin_path}".d = {
## user = "${user}-unixd";
## group = "${group}-unixd";
## mode = "0700";
## };
"${storage}/data".d = {
inherit user group;
mode = "0700";
};
"${storage}/certs".d = {
inherit user group;
mode = "0700";
};
};
users.groups = {
${group} = {};
"${group}-unixd" = {};
};
users.users.${user} = {
inherit group;
description = "kanidm server";
isSystemUser = true;
2024-11-03 11:08:49 -06:00
packages = [kanidm-package];
2024-09-12 11:58:24 -05:00
};
users.users."${user}-unixd" = {
group = "${group}-unixd";
description = lib.mkForce "kanidm PAM daemon";
isSystemUser = true;
};
# the kanidm module in nixpkgs was not working for me, so I rolled my own
# loosely based off it
systemd.services.kanidm = {
enable = true;
2024-11-03 11:08:49 -06:00
path = with pkgs; [openssl] ++ [kanidm-package];
2024-09-12 11:58:24 -05:00
description = "kanidm identity management daemon";
wantedBy = ["multi-user.target"];
after = ["network.target"];
requires = ["copy-kanidm-certificates-from-caddy.service"];
script = ''
pwd
ls -la
ls -laR /storage/kanidm
2024-11-03 11:08:49 -06:00
${kanidm-package}/bin/kanidmd server -c ${serverConfigFile}
2024-09-12 11:58:24 -05:00
'';
# environment.RUST_LOG = serverSettings.log_level;
serviceConfig = lib.mkMerge [
defaultServiceConfig
{
StateDirectory = name;
StateDirectoryMode = "0700";
RuntimeDirectory = "${name}d";
User = user;
Group = group;
AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
PrivateUsers = lib.mkForce false;
PrivateNetwork = lib.mkForce false;
RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
# TemporaryFileSystem = "/:ro";
BindReadOnlyPaths = [
"${storage}/certs"
];
BindPaths = [
"${storage}/data"
# socket
"/run/${name}d:/run/${name}d"
# backups
serverSettings.online_backup.path
];
}
];
};
systemd.services.kanidm-unixd = {
description = "Kanidm PAM daemon";
wantedBy = ["multi-user.target"];
after = ["network.target"];
restartTriggers = [unixdConfigFile clientConfigFile];
serviceConfig = lib.mkMerge [
defaultServiceConfig
{
CacheDirectory = "${name}-unixd";
CacheDirectoryMode = "0700";
RuntimeDirectory = "${name}-unixd";
2024-11-03 11:08:49 -06:00
ExecStart = "${kanidm-package}/bin/kanidm_unixd";
2024-09-12 11:58:24 -05:00
User = "${user}-unixd";
Group = "${group}-unixd";
BindReadOnlyPaths = [
"-/etc/kanidm"
"-/etc/static/kanidm"
"-/etc/ssl"
"-/etc/static/ssl"
"-/etc/passwd"
"-/etc/group"
];
BindPaths = [
# socket
"/run/kanidm-unixd:/var/run/kanidm-unixd"
];
# Needs to connect to kanidmd
PrivateNetwork = lib.mkForce false;
RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
TemporaryFileSystem = "/:ro";
}
];
environment.RUST_LOG = serverSettings.log_level;
};
systemd.services.kanidm-unixd-tasks = {
description = "Kanidm PAM home management daemon";
wantedBy = ["multi-user.target"];
after = ["network.target" "kanidm-unixd.service"];
partOf = ["kanidm-unixd.service"];
restartTriggers = [unixdConfigFile clientConfigFile];
serviceConfig = {
2024-11-03 11:08:49 -06:00
ExecStart = "${kanidm-package}/bin/kanidm_unixd_tasks";
2024-09-12 11:58:24 -05:00
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
"-/etc/kanidm"
"-/etc/static/kanidm"
];
BindPaths = [
# To manage home directories
"/home"
# To connect to kanidm-unixd
"/run/kanidm-unixd:/var/run/kanidm-unixd"
];
# CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
CapabilityBoundingSet = ["CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH"];
IPAddressDeny = "any";
# Need access to users
PrivateUsers = false;
# Need access to home directories
ProtectHome = false;
RestrictAddressFamilies = ["AF_UNIX"];
TemporaryFileSystem = "/:ro";
Restart = "on-failure";
};
environment.RUST_LOG = serverSettings.log_level;
};
environment.etc = {
"kanidm/server.toml".source = serverConfigFile;
"kanidm/config".source = clientConfigFile;
"kanidm/unixd".source = unixdConfigFile;
};
2024-11-03 11:08:49 -06:00
system.nssModules = [kanidm-package];
2024-09-12 11:58:24 -05:00
system.nssDatabases.group = [name];
system.nssDatabases.passwd = [name];
## environment.etc."kanidm/server.toml" = {
## mode = "0600";
## group = "kanidm";
## user = "kanidm";
## };
## environment.etc."kanidm/config" = {
## mode = "0600";
## group = "kanidm";
## user = "kanidm";
## };
services.caddy.virtualHosts."idm.h.lyte.dev" = {
extraConfig = ''reverse_proxy https://idm.h.lyte.dev:8443'';
};
networking = {
extraHosts = ''
::1 idm.h.lyte.dev
127.0.0.1 idm.h.lyte.dev
'';
};
};
})
2024-09-06 16:48:59 -05:00
{
systemd.tmpfiles.settings = {
"10-audiobookshelf" = {
"/storage/audiobookshelf" = {
"d" = {
mode = "0770";
user = "audiobookshelf";
group = "wheel";
};
};
"/storage/audiobookshelf/audiobooks" = {
"d" = {
mode = "0770";
user = "audiobookshelf";
group = "wheel";
};
};
"/storage/audiobookshelf/podcasts" = {
"d" = {
mode = "0770";
user = "audiobookshelf";
group = "wheel";
};
};
};
};
users.groups.audiobookshelf = {};
users.users.audiobookshelf = {
isSystemUser = true;
group = "audiobookshelf";
};
2024-09-06 16:48:59 -05:00
services.audiobookshelf = {
enable = true;
dataDir = "/storage/audiobookshelf";
port = 8523;
};
systemd.services.audiobookshelf.serviceConfig = {
WorkingDirectory = lib.mkForce config.services.audiobookshelf.dataDir;
StateDirectory = lib.mkForce config.services.audiobookshelf.dataDir;
Group = "audiobookshelf";
User = "audiobookshelf";
};
2024-09-06 16:48:59 -05:00
services.caddy.virtualHosts."audio.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.audiobookshelf.port}'';
2024-09-06 16:48:59 -05:00
};
}
2024-09-11 14:31:48 -05:00
{
# prometheus
services.restic.commonPaths = [
# TODO: do I want this backed up?
# "/var/lib/prometheus"
];
services.prometheus = {
enable = true;
checkConfig = true;
listenAddress = "127.0.0.1";
port = 9090;
2024-09-11 15:28:52 -05:00
scrapeConfigs = [
{
job_name = "beefcake";
static_configs = [
{
targets = let inherit (config.services.prometheus.exporters.node) port listenAddress; in ["${listenAddress}:${toString port}"];
}
2024-09-11 16:04:32 -05:00
{
targets = let inherit (config.services.prometheus.exporters.zfs) port listenAddress; in ["${listenAddress}:${toString port}"];
}
{
targets = let inherit (config.services.prometheus.exporters.postgres) port listenAddress; in ["${listenAddress}:${toString port}"];
}
2024-09-11 15:28:52 -05:00
];
}
];
2024-09-11 14:31:48 -05:00
exporters = {
postgres = {
enable = true;
2024-09-11 16:04:32 -05:00
listenAddress = "127.0.0.1";
runAsLocalSuperUser = true;
2024-09-11 14:31:48 -05:00
};
2024-09-11 15:28:52 -05:00
node = {
enable = true;
listenAddress = "127.0.0.1";
enabledCollectors = [
"systemd"
];
};
zfs = {
enable = true;
2024-09-11 16:04:32 -05:00
listenAddress = "127.0.0.1";
2024-09-11 15:28:52 -05:00
};
2024-09-11 14:31:48 -05:00
};
};
2024-09-12 11:58:24 -05:00
/*
TODO: promtail?
idrac exporter?
restic exporter?
smartctl exporter?
systemd exporter?
NOTE: we probably don't want this exposed
services.caddy.virtualHosts."prometheus.h.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.prometheus.port}'';
};
*/
2024-09-11 14:31:48 -05:00
}
{
# grafana
systemd.tmpfiles.settings = {
"10-grafana" = {
"/storage/grafana" = {
"d" = {
mode = "0750";
2024-09-13 00:50:31 -05:00
user = "grafana";
group = "grafana";
2024-09-11 14:31:48 -05:00
};
};
};
};
services.restic.commonPaths = [
2024-09-11 14:58:17 -05:00
"/storage/grafana"
2024-09-11 14:31:48 -05:00
];
2024-09-11 14:58:17 -05:00
sops.secrets = {
grafana-admin-password = {
owner = "grafana";
group = "grafana";
mode = "0400";
};
2024-09-11 15:28:52 -05:00
grafana-smtp-password = {
owner = "grafana";
group = "grafana";
mode = "0400";
};
2024-09-11 14:58:17 -05:00
};
2024-09-11 14:31:48 -05:00
services.grafana = {
enable = true;
dataDir = "/storage/grafana";
provision = {
enable = true;
2024-09-11 15:28:52 -05:00
datasources = {
settings = {
datasources = [
{
name = "Prometheus";
type = "prometheus";
access = "proxy";
url = "http://localhost:${toString config.services.prometheus.port}";
isDefault = true;
}
];
};
};
2024-09-11 14:31:48 -05:00
};
settings = {
server = {
http_port = 3814;
2024-09-13 13:25:34 -05:00
root_url = "https://grafana.h.lyte.dev";
2024-09-11 14:31:48 -05:00
};
2024-09-11 15:28:52 -05:00
smtp = {
enabled = true;
from_address = "grafana@lyte.dev";
2024-09-11 15:29:58 -05:00
user = "grafana@lyte.dev";
host = "smtp.mailgun.org:587";
2024-09-11 15:28:52 -05:00
password = ''$__file{${config.sops.secrets.grafana-smtp-password.path}}'';
};
2024-09-11 14:58:17 -05:00
security = {
admin_email = "daniel@lyte.dev";
admin_user = "lytedev";
admin_file = ''$__file{${config.sops.secrets.grafana-admin-password.path}}'';
};
# database = {
# };
2024-09-11 14:31:48 -05:00
};
};
networking.firewall.allowedTCPPorts = [
9000
];
services.caddy.virtualHosts."grafana.h.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.grafana.settings.server.http_port}'';
};
}
2024-09-12 23:45:03 -05:00
{
2024-09-13 00:38:04 -05:00
systemd.tmpfiles.settings = {
"10-paperless" = {
"/storage/paperless" = {
"d" = {
mode = "0750";
user = "paperless";
group = "paperless";
};
};
};
};
services.restic.commonPaths = [
"/storage/paperless"
];
sops.secrets.paperless-superuser-password = {
owner = "paperless";
group = "paperless";
mode = "400";
};
services.paperless = {
enable = true;
package = pkgs.paperless-ngx;
dataDir = "/storage/paperless";
passwordFile = config.sops.secrets.paperless-superuser-password.path;
};
services.caddy.virtualHosts."paperless.h.lyte.dev" = {
extraConfig = ''reverse_proxy :${toString config.services.paperless.port}'';
};
2024-09-12 23:45:03 -05:00
}
{
systemd.tmpfiles.settings = {
"10-actual" = {
"/storage/actual" = {
"d" = {
mode = "0750";
user = "root";
group = "family";
};
};
};
};
services.restic.commonPaths = [
2024-09-13 00:02:57 -05:00
"/storage/actual"
2024-09-12 23:45:03 -05:00
];
virtualisation.oci-containers = {
containers.actual = {
2024-11-11 14:39:47 -06:00
image = "ghcr.io/actualbudget/actual-server:24.11.0";
2024-09-12 23:45:03 -05:00
autoStart = true;
ports = ["5006:5006"];
volumes = ["/storage/actual:/data"];
};
};
services.caddy.virtualHosts."finances.h.lyte.dev" = {
extraConfig = ''reverse_proxy :5006'';
};
}
{
services.factorio = {
enable = true;
package = pkgs.factorio-headless.override {
versionsJson = ./factorio-versions.json;
};
admins = ["lytedev"];
autosave-interval = 5;
game-name = "Flanwheel Online";
description = "Space Age 2.0";
openFirewall = true;
lan = true;
# public = true; # NOTE: cannot be true if requireUserVerification is false
port = 34197;
requireUserVerification = false; # critical for DRM-free users
# contains the game password and account password for "public" servers
extraSettingsFile = config.sops.secrets.factorio-server-settings.path;
};
sops.secrets = {
factorio-server-settings = {mode = "0777";};
};
}
2024-03-13 21:12:14 -05:00
];
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
/*
TODO: non-root processes and services that access secrets need to be part of
the 'keys' group
maybe this will fix plausible?
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
systemd.services.some-service = {
serviceConfig.SupplementaryGroups = [ config.users.groups.keys.name ];
};
or
users.users.example-user.extraGroups = [ config.users.groups.keys.name ];
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
TODO: declarative directory quotas? for storage/$USER and /home/$USER
*/
2023-09-04 11:40:30 -05:00
2024-02-21 22:22:40 -06:00
environment.systemPackages = with pkgs; [
aria2
restic
2024-09-04 10:31:06 -05:00
btrfs-progs
zfs
smartmontools
2024-02-21 22:22:40 -06:00
htop
bottom
2024-02-21 22:59:49 -06:00
curl
xh
2024-02-21 22:22:40 -06:00
];
2024-09-03 20:03:24 -05:00
2024-03-13 21:12:14 -05:00
services.tailscale.useRoutingFeatures = "server";
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
/*
2024-03-13 21:12:14 -05:00
# https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72
2024-09-12 11:58:24 -05:00
services.lidarr = {
enable = true;
dataDir = "/storage/lidarr";
};
services.radarr = {
enable = true;
dataDir = "/storage/radarr";
};
services.sonarr = {
enable = true;
dataDir = "/storage/sonarr";
};
services.bazarr = {
enable = true;
listenPort = 6767;
};
networking.firewall.allowedTCPPorts = [9876 9877];
networking.firewall.allowedUDPPorts = [9876 9877];
networking.firewall.allowedUDPPortRanges = [
{
from = 27000;
to = 27100;
}
];
*/
2023-09-04 11:40:30 -05:00
}