diff --git a/.sops.yaml b/.sops.yaml index a073525..d598e74 100644 --- a/.sops.yaml +++ b/.sops.yaml @@ -1,6 +1,7 @@ keys: + # after updating this, you will need to `sops updatekeys secrets.file` for any files that need the new key(s) - &daniel age1stdue5q5teskee057ced6rh9pzzr93xsy66w4sc3zu49rgxl7cjshztt45 # pass age-key | rg '# pub' - - &sshd-at-beefcake age1k8s590x34ghz7yrjyrgzkd24j252srf0mhfy34halp4frwr065csrlt2ev # ssh beefcake "nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'" + - &sshd-at-beefcake age1etv56f7kf78a55lxqtydrdd32dpmsjnxndf4u28qezxn6p7xt9esqvqdq7 # ssh beefcake "nix-shell -p ssh-to-age --run 'cat /etc/ssh/ssh_host_ed25519_key.pub | ssh-to-age'" creation_rules: - path_regex: secrets/[^/]+\.(ya?ml|json|env|ini)$ key_groups: diff --git a/disko/default.nix b/disko/default.nix index f26dabe..1e6c617 100644 --- a/disko/default.nix +++ b/disko/default.nix @@ -1,4 +1,6 @@ -{ +{lib, ...}: let + inherit (lib.attrsets) mapAttrs' filterAttrs; +in { standardWithHibernateSwap = { disks ? ["/dev/sda"], swapSize, @@ -138,67 +140,7 @@ }; }; }; - # TODO: figure out what I can't have an optiona/default 'name' attribute here so I can DRY with "standard" - thinker = {disks ? ["/dev/vda"], ...}: { - disko.devices = { - disk = { - vdb = { - type = "disk"; - device = builtins.elemAt disks 0; - content = { - type = "gpt"; - partitions = { - ESP = { - label = "EFI"; - name = "ESP"; - size = "512M"; - type = "EF00"; - content = { - type = "filesystem"; - format = "vfat"; - mountpoint = "/boot"; - mountOptions = [ - "defaults" - ]; - }; - }; - luks = { - size = "100%"; - content = { - type = "luks"; - name = "crypted"; - extraOpenArgs = ["--allow-discards"]; - # if you want to use the key for interactive login be sure there is no trailing newline - # for example use `echo -n "password" > /tmp/secret.key` - keyFile = "/tmp/secret.key"; # Interactive - # settings.keyFile = "/tmp/password.key"; - # additionalKeyFiles = ["/tmp/additionalSecret.key"]; - content = { - type = "btrfs"; - extraArgs = ["-f"]; - subvolumes = { - "/root" = { - mountpoint = "/"; - mountOptions = ["compress=zstd" "noatime"]; - }; - "/home" = { - mountpoint = "/home"; - mountOptions = ["compress=zstd" "noatime"]; - }; - "/nix" = { - mountpoint = "/nix"; - mountOptions = ["compress=zstd" "noatime"]; - }; - }; - }; - }; - }; - }; - }; - }; - }; - }; - }; + unencrypted = {disks ? ["/dev/vda"], ...}: { disko.devices = { disk = { @@ -249,6 +191,197 @@ }; }; }; + + beefcake = let + zpools = { + zroot = { + # TODO: at the time of writing, disko does not support draid6 + # so I'm building/managing the array manually for the time being + # the root pool is just a single disk right now + name = "zroot"; + config = { + type = "zpool"; + # mode = "draid6"; + rootFsOptions = { + compression = "zstd"; + "com.sun:auto-snapshot" = "false"; + }; + mountpoint = "/"; + postCreateHook = "zfs list -t snapshot -H -o name | grep -E '^zroot@blank$' || zfs snapshot zroot@blank"; + + datasets = { + zfs_fs = { + type = "zfs_fs"; + mountpoint = "/zfs_fs"; + options."com.sun:auto-snapshot" = "true"; + }; + zfs_unmounted_fs = { + type = "zfs_fs"; + options.mountpoint = "none"; + }; + zfs_legacy_fs = { + type = "zfs_fs"; + options.mountpoint = "legacy"; + mountpoint = "/zfs_legacy_fs"; + }; + zfs_testvolume = { + type = "zfs_volume"; + size = "10M"; + content = { + type = "filesystem"; + format = "ext4"; + mountpoint = "/ext4onzfs"; + }; + }; + encrypted = { + type = "zfs_fs"; + options = { + mountpoint = "none"; + encryption = "aes-256-gcm"; + keyformat = "passphrase"; + keylocation = "file:///tmp/secret.key"; + }; + # use this to read the key during boot + # postCreateHook = '' + # zfs set keylocation="prompt" "zroot/$name"; + # ''; + }; + "encrypted/test" = { + type = "zfs_fs"; + mountpoint = "/zfs_crypted"; + }; + }; + }; + }; + zstorage = { + # PARITY_COUNT=3 NUM_DRIVES=8 HOT_SPARES=2 sudo -E zpool create -f -O mountpoint=none -O compression=on -O xattr=sa -O acltype=posixacl -o ashift=12 -O atime=off -O recordsize=64K zstorage draid{$PARITY_COUNT}:{$NUM_DRIVES}c:{$HOT_SPARES}s /dev/disk/by-id/scsi-35000039548cb637c /dev/disk/by-id/scsi-35000039548cb7c8c /dev/disk/by-id/scsi-35000039548cb85c8 /dev/disk/by-id/scsi-35000039548d9b504 /dev/disk/by-id/scsi-35000039548da2b08 /dev/disk/by-id/scsi-35000039548dad2fc /dev/disk/by-id/scsi-350000399384be921 /dev/disk/by-id/scsi-35000039548db096c + # sudo zfs create -o mountpoint=legacy zstorage/nix + # sudo zfs create -o canmount=on -o mountpoint=/storage zstorage/storage + name = "zstorage"; + config = {}; + }; + }; + diskClass = { + storage = { + type = "zfs"; + pool = zpools.zroot.name; + }; + boot = { + content = { + type = "gpt"; + partitions = { + ESP = { + size = "1G"; + type = "EF00"; + content = { + type = "filesystem"; + format = "vfat"; + mountpoint = "/boot"; + }; + }; + zfs = { + size = "100%"; + content = { + type = "zfs"; + pool = zpools.zroot.name; + }; + }; + }; + }; + }; + }; + bootDisks = { + "/dev/sdi" = { + name = "i"; + enable = true; + }; + "/dev/sdj" = { + name = "j"; + enable = true; + }; # TODO: join current boot drive to new boot pool + }; + storageDisks = { + "/dev/sda" = { + enable = true; + name = "a"; + }; + "/dev/sdb" = { + enable = true; + name = "b"; + }; + "/dev/sdc" = { + enable = true; + name = "c"; + }; + "/dev/sdd" = { + enable = true; + name = "d"; + }; + + # TODO: start small + "/dev/sde" = { + enable = false; + name = "e"; + }; + "/dev/sdf" = { + enable = false; + name = "f"; + }; + "/dev/sdg" = { + enable = false; + name = "g"; + }; + "/dev/sdh" = { + enable = false; + name = "h"; + }; + + # gap for two boot drives + + "/dev/sdk" = { + enable = false; + name = "k"; + }; + "/dev/sdl" = { + enable = false; + name = "l"; + }; + "/dev/sdm" = { + enable = false; + name = "m"; + }; + "/dev/sdn" = { + # TODO: this is my holding cell for random stuff right now + enable = false; + name = "n"; + }; + }; + + diskoBoot = mapAttrs' (device: {name, ...}: { + name = "boot-${name}"; + value = { + inherit device; + type = "disk"; + content = diskClass.boot.content; + }; + }) (filterAttrs (_: {enable, ...}: enable) bootDisks); + + diskoStorage = mapAttrs' (device: {name, ...}: { + name = "storage-${name}"; + value = { + inherit device; + type = "disk"; + content = diskClass.storage.content; + }; + }) (filterAttrs (_: {enable, ...}: enable) storageDisks); + in { + disko.devices = { + disk = diskoBoot // diskoStorage; + zpool = { + zroot = zpools.zroot.config; + }; + }; + }; legacy = {disks ? ["/dev/vda"], ...}: { disko.devices = { disk = { diff --git a/flake.nix b/flake.nix index 6ae0a96..c80894e 100644 --- a/flake.nix +++ b/flake.nix @@ -49,7 +49,7 @@ "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" "helix.cachix.org-1:ejp9KQpR1FBI2onstMQ34yogDm4OgU2ru6lIwPvuCVs=" "nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" - "h.lyte.dev:HeVWtne31ZG8iMf+c15VY3/Mky/4ufXlfTpT8+4Xbs0=" + "h.lyte.dev-2:te9xK/GcWPA/5aXav8+e5RHImKYMug8hIIbhHsKPN0M=" "hyprland.cachix.org-1:a7pgxzMz7+chwVL3/pzj6jIBMioiJM7ypFP8PwtkuGc=" ]; }; @@ -107,7 +107,7 @@ # overlay I did this to work around some recursion problems # TODO: https://discourse.nixos.org/t/infinite-recursion-getting-started-with-overlays/48880 packages = genPkgs (pkgs: {inherit (pkgs) iosevkaLyteTerm iosevkaLyteTermSubset nix-base-container-image;}); - diskoConfigurations = import ./disko; + diskoConfigurations = import ./disko {inherit (nixpkgs) lib;}; templates = import ./templates; formatter = genPkgs (p: p.alejandra); @@ -221,6 +221,8 @@ final.helix = helix; # TODO: would love to use a current wezterm build so I can make use of ssh/mux functionality without breakage # source: https://github.com/wez/wezterm/issues/3771 + # not-yet-merged (abandoned?): https://github.com/wez/wezterm/pull/4737 + # I did try using the latest code via the flake, but alas it did not resolve my issues with mux'ing wezterm = wezterm-input.outputs.packages.${prev.system}.default; final.wezterm = wezterm; }; @@ -250,21 +252,27 @@ modules = with nixosModules; [ home-manager-defaults + # TODO: disko? hardware.nixosModules.common-cpu-intel + outputs.nixosModules.deno-netlify-ddns-client + + { + services.deno-netlify-ddns-client = { + enable = true; + username = "beefcake.h"; + # TODO: router doesn't even do ipv6 yet... + ipv6 = false; + }; + } + common + podman + troubleshooting-tools linux fonts ./nixos/beefcake.nix - - { - time = { - timeZone = "America/Chicago"; - }; - services.smartd.enable = true; - services.fwupd.enable = true; - } ]; }; @@ -338,6 +346,7 @@ hardware.nixosModules.common-pc-ssd common + gaming graphical-workstation ./nixos/htpc.nix @@ -535,6 +544,7 @@ home-manager-defaults hardware.nixosModules.common-cpu-amd common + linux ./nixos/rascal.nix ]; }; diff --git a/modules/home-manager/default.nix b/modules/home-manager/default.nix index 3512334..63a2833 100644 --- a/modules/home-manager/default.nix +++ b/modules/home-manager/default.nix @@ -1249,7 +1249,7 @@ # docs: https://wezfurlong.org/wezterm/config/appearance.html#defining-your-own-colors programs.wezterm = with colors.withHashPrefix; { enable = true; - package = pkgs.wezterm; + # package = pkgs.wezterm; extraConfig = builtins.readFile ./wezterm/config.lua; colorSchemes = { catppuccin-mocha-sapphire = { diff --git a/modules/home-manager/wezterm/config.lua b/modules/home-manager/wezterm/config.lua index a642934..ba00a7e 100644 --- a/modules/home-manager/wezterm/config.lua +++ b/modules/home-manager/wezterm/config.lua @@ -22,6 +22,8 @@ config.window_background_opacity = 1.0 config.enable_kitty_keyboard = true config.show_new_tab_button_in_tab_bar = true +-- config.front_end = "WebGpu" + local function tab_title(tab_info) local title = tab_info.tab_title if title and #title > 0 then diff --git a/modules/nixos/default.nix b/modules/nixos/default.nix index c79f654..b8f235d 100644 --- a/modules/nixos/default.nix +++ b/modules/nixos/default.nix @@ -11,6 +11,8 @@ pubkey, overlays, }: { + deno-netlify-ddns-client = import ./deno-netlify-ddns-client.nix; + fallback-hostname = {lib, ...}: { networking.hostName = lib.mkDefault "set-a-hostname-dingus"; }; diff --git a/modules/nixos/deno-netlify-ddns-client.nix b/modules/nixos/deno-netlify-ddns-client.nix new file mode 100644 index 0000000..193c256 --- /dev/null +++ b/modules/nixos/deno-netlify-ddns-client.nix @@ -0,0 +1,87 @@ +{ + lib, + config, + pkgs, + ... +}: let + inherit (lib) mkEnableOption mkOption types mkIf; + inherit (lib.strings) optionalString; + cfg = config.services.deno-netlify-ddns-client; +in { + options.services.deno-netlify-ddns-client = { + enable = mkEnableOption "Enable the deno-netlify-ddns client."; + username = mkOption { + type = types.str; + }; + passwordFile = mkOption { + type = types.str; + }; + endpoint = mkOption { + type = types.str; + default = "https://netlify-ddns.deno.dev"; + }; + ipv4 = mkOption { + type = types.bool; + default = true; + }; + ipv6 = mkOption { + type = types.bool; + default = true; + }; + requestTimeout = mkOption { + type = types.int; + description = "The maximum number of seconds before the HTTP request times out."; + default = 30; + }; + afterBootTime = mkOption { + type = types.str; + description = "A systemd.timers timespan. This option corresponds to the OnBootSec field in the timerConfig."; + default = "5m"; + }; + every = mkOption { + type = types.str; + description = "A systemd.timers timespan. This option corresponds to the OnUnitActiveSec field in the timerConfig."; + default = "5m"; + }; + }; + + config = { + systemd.timers.deno-netlify-ddns-client = { + enable = mkIf cfg.enable true; + after = ["network.target"]; + wantedBy = ["timers.target"]; + timerConfig = { + OnBootSec = cfg.afterBootTime; + OnUnitActiveSec = cfg.every; + Unit = "deno-netlify-ddns-client.service"; + }; + }; + + systemd.services.deno-netlify-ddns-client = { + enable = mkIf cfg.enable true; + after = ["network.target"]; + script = '' + set -eu + password="$(cat "${cfg.passwordFile}")" + ${optionalString cfg.ipv4 '' + ${pkgs.curl}/bin/curl -4 -s \ + -X POST \ + --max-time ${toString cfg.requestTimeout} \ + -u "${cfg.username}:''${password}" \ + -L "${cfg.endpoint}/v1/netlify-ddns/replace-all-relevant-user-dns-records" + ''} + ${optionalString cfg.ipv6 '' + ${pkgs.curl}/bin/curl -6 -s \ + -X POST \ + --max-time ${toString cfg.requestTimeout} \ + -u "${cfg.username}:''${password}" \ + -L "${cfg.endpoint}/v1/netlify-ddns/replace-all-relevant-user-dns-records" + ''} + ''; + serviceConfig = { + Type = "oneshot"; + User = "root"; + }; + }; + }; +} diff --git a/nixos/beefcake.nix b/nixos/beefcake.nix index 35932a7..5d21347 100644 --- a/nixos/beefcake.nix +++ b/nixos/beefcake.nix @@ -15,41 +15,61 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 pkgs, ... }: { - system.stateVersion = "22.05"; + system.stateVersion = "24.05"; home-manager.users.daniel.home.stateVersion = "24.05"; networking.hostName = "beefcake"; imports = [ { # hardware + networking.hostId = "541ede55"; boot = { - initrd.availableKernelModules = ["ehci_pci" "megaraid_sas" "usbhid" "uas" "sd_mod"]; + zfs = { + extraPools = ["zstorage"]; + }; + supportedFilesystems = { + zfs = true; + }; + initrd.supportedFilesystems = { + zfs = true; + }; + kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages; + initrd.availableKernelModules = ["ehci_pci" "mpt3sas" "usbhid" "sd_mod"]; kernelModules = ["kvm-intel"]; loader.systemd-boot.enable = true; loader.efi.canTouchEfiVariables = true; }; fileSystems."/" = { - device = "/dev/disk/by-uuid/0747dcba-f590-42e6-89c8-6cb2f9114d64"; + device = "/dev/disk/by-uuid/992ce55c-7507-4d6b-938c-45b7e891f395"; fsType = "ext4"; - options = [ - "usrquota" - ]; }; fileSystems."/boot" = { - device = "/dev/disk/by-uuid/7E3C-9018"; + device = "/dev/disk/by-uuid/B6C4-7CF4"; fsType = "vfat"; + options = ["fmask=0022" "dmask=0022"]; }; - fileSystems."/storage" = { - device = "/dev/disk/by-uuid/ea8258d7-54d1-430e-93b3-e15d33231063"; - fsType = "btrfs"; - options = [ - "compress=zstd:5" - "space_cache=v2" - ]; + # should be mounted by auto-import; see boot.zfs.extraPools + # fileSystems."/storage" = { + # device = "zstorage/storage"; + # fsType = "zfs"; + # }; + + fileSystems."/nix" = { + device = "zstorage/nix"; + fsType = "zfs"; }; + + services.zfs.autoScrub.enable = true; + services.zfs.autoSnapshot.enable = true; + + # TODO: nfs with zfs? + # services.nfs.server.enable = true; + } + { + boot.kernelParams = ["nohibernate"]; } { # sops secrets stuff @@ -83,46 +103,60 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 # subdirectory # "myservice/my_subdir/my_secret" = { }; - "jland.env" = { - path = "/var/lib/jland/jland.env"; - # TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook? - mode = "0440"; - owner = config.users.users.daniel.name; - group = config.users.groups.daniel.name; - }; + # "jland.env" = { + # path = "/var/lib/jland/jland.env"; + # # TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook? + # mode = "0440"; + # owner = config.users.users.daniel.name; + # group = config.users.groups.daniel.name; + # }; - "dawncraft.env" = { - path = "/var/lib/dawncraft/dawncraft.env"; - # TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook? - mode = "0440"; - owner = config.users.users.daniel.name; - group = config.users.groups.daniel.name; - }; + # "dawncraft.env" = { + # path = "/var/lib/dawncraft/dawncraft.env"; + # # TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook? + # mode = "0440"; + # owner = config.users.users.daniel.name; + # group = config.users.groups.daniel.name; + # }; - plausible-admin-password = { - # TODO: path = "${config.systemd.services.plausible.serviceConfig.WorkingDirectory}/plausible-admin-password.txt"; - path = "/var/lib/plausible/plausible-admin-password"; - mode = "0440"; - owner = config.systemd.services.plausible.serviceConfig.User; - group = config.systemd.services.plausible.serviceConfig.Group; - }; - plausible-secret-key-base = { - path = "/var/lib/plausible/plausible-secret-key-base"; - mode = "0440"; - owner = config.systemd.services.plausible.serviceConfig.User; - group = config.systemd.services.plausible.serviceConfig.Group; - }; - nextcloud-admin-password.path = "/var/lib/nextcloud/admin-password"; + # plausible-admin-password = { + # # TODO: path = "${config.systemd.services.plausible.serviceConfig.WorkingDirectory}/plausible-admin-password.txt"; + # path = "/var/lib/plausible/plausible-admin-password"; + # mode = "0440"; + # owner = config.systemd.services.plausible.serviceConfig.User; + # group = config.systemd.services.plausible.serviceConfig.Group; + # }; + # plausible-secret-key-base = { + # path = "/var/lib/plausible/plausible-secret-key-base"; + # mode = "0440"; + # owner = config.systemd.services.plausible.serviceConfig.User; + # group = config.systemd.services.plausible.serviceConfig.Group; + # }; + # nextcloud-admin-password.path = "/var/lib/nextcloud/admin-password"; + restic-ssh-priv-key-benland = {mode = "0400";}; "forgejo-runner.env" = {mode = "0400";}; + netlify-ddns-password = {mode = "0400";}; + nix-cache-priv-key = {mode = "0400";}; + restic-rascal-passphrase = { + mode = "0400"; + }; + restic-rascal-ssh-private-key = { + mode = "0400"; + }; }; }; systemd.services.gitea-runner-beefcake.after = ["sops-nix.service"]; } + { + services.deno-netlify-ddns-client = { + passwordFile = config.sops.secrets.netlify-ddns-password.path; + }; + } { # nix binary cache services.nix-serve = { - enable = true; - secretKeyFile = "/var/cache-priv-key.pem"; + enable = true; # TODO: true + secretKeyFile = config.sops.secrets.nix-cache-priv-key.path; }; services.caddy.virtualHosts."nix.h.lyte.dev" = { extraConfig = '' @@ -167,14 +201,12 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 extraHosts = '' ::1 nix.h.lyte.dev 127.0.0.1 nix.h.lyte.dev - ::1 idm.h.lyte.dev - 127.0.0.1 idm.h.lyte.dev ''; }; } { services.headscale = { - enable = true; + enable = false; address = "127.0.0.1"; port = 7777; settings = { @@ -202,12 +234,12 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 }; }; }; - services.caddy.virtualHosts."tailscale.vpn.h.lyte.dev" = { + services.caddy.virtualHosts."tailscale.vpn.h.lyte.dev" = lib.mkIf config.services.headscale.enable { extraConfig = '' reverse_proxy http://localhost:${toString config.services.headscale.port} ''; }; - networking.firewall.allowedUDPPorts = [3478]; + networking.firewall.allowedUDPPorts = lib.mkIf config.services.headscale.enable [3478]; } { services.soju = { @@ -218,118 +250,129 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 6667 ]; } - { - # samba - users.users.guest = { - # used for anonymous samba access - isSystemUser = true; - group = "users"; - createHome = true; - }; - users.users.scannerupload = { - # used for scanner samba access - isSystemUser = true; - group = "users"; - createHome = true; - }; - systemd.tmpfiles.rules = [ - "d /var/spool/samba 1777 root root -" - ]; - services.samba-wsdd = { - enable = true; - }; - services.samba = { - enable = true; - openFirewall = true; - securityType = "user"; + # { + # # samba + # users.users.guest = { + # # used for anonymous samba access + # isSystemUser = true; + # group = "users"; + # createHome = true; + # }; + # users.users.scannerupload = { + # # used for scanner samba access + # isSystemUser = true; + # group = "users"; + # createHome = true; + # }; + # systemd.tmpfiles.rules = [ + # "d /var/spool/samba 1777 root root -" + # ]; + # services.samba-wsdd = { + # enable = true; + # }; + # services.samba = { + # enable = true; + # openFirewall = true; + # securityType = "user"; - # not needed since I don't think I use printer sharing? - # https://nixos.wiki/wiki/Samba#Printer_sharing - # package = pkgs.sambaFull; # broken last I checked in nixpkgs? + # # not needed since I don't think I use printer sharing? + # # https://nixos.wiki/wiki/Samba#Printer_sharing + # # package = pkgs.sambaFull; # broken last I checked in nixpkgs? - extraConfig = '' - workgroup = WORKGROUP - server string = beefcake - netbios name = beefcake - security = user - #use sendfile = yes - #max protocol = smb2 - # note: localhost is the ipv6 localhost ::1 - hosts allow = 100.64.0.0/10 192.168.0.0/16 127.0.0.1 localhost - hosts deny = 0.0.0.0/0 - guest account = guest - map to guest = never - # load printers = yes - # printing = cups - # printcap name = cups - ''; - shares = { - libre = { - path = "/storage/libre"; - browseable = "yes"; - "read only" = "no"; - "guest ok" = "yes"; - "create mask" = "0666"; - "directory mask" = "0777"; - # "force user" = "nobody"; - # "force group" = "users"; - }; - public = { - path = "/storage/public"; - browseable = "yes"; - "read only" = "no"; - "guest ok" = "yes"; - "create mask" = "0664"; - "directory mask" = "0775"; - # "force user" = "nobody"; - # "force group" = "users"; - }; - family = { - path = "/storage/family"; - browseable = "yes"; - "read only" = "no"; - "guest ok" = "no"; - "create mask" = "0660"; - "directory mask" = "0770"; - # "force user" = "nobody"; - # "force group" = "family"; - }; - scannerdocs = { - path = "/storage/scannerdocs"; - browseable = "yes"; - "read only" = "no"; - "guest ok" = "no"; - "create mask" = "0600"; - "directory mask" = "0700"; - "valid users" = "scannerupload"; - "force user" = "scannerupload"; - "force group" = "users"; - }; - daniel = { - path = "/storage/daniel"; - browseable = "yes"; - "read only" = "no"; - "guest ok" = "no"; - "create mask" = "0600"; - "directory mask" = "0700"; - # "force user" = "daniel"; - # "force group" = "users"; - }; - # printers = { - # comment = "All Printers"; - # path = "/var/spool/samba"; - # public = "yes"; - # browseable = "yes"; - # # to allow user 'guest account' to print. - # "guest ok" = "yes"; - # writable = "no"; - # printable = "yes"; - # "create mode" = 0700; - # }; - }; - }; - } + # extraConfig = '' + # workgroup = WORKGROUP + # server string = beefcake + # netbios name = beefcake + # security = user + # #use sendfile = yes + # #max protocol = smb2 + # # note: localhost is the ipv6 localhost ::1 + # hosts allow = 100.64.0.0/10 192.168.0.0/16 127.0.0.1 localhost + # hosts deny = 0.0.0.0/0 + # guest account = guest + # map to guest = never + # # load printers = yes + # # printing = cups + # # printcap name = cups + # ''; + # shares = { + # libre = { + # path = "/storage/libre"; + # browseable = "yes"; + # "read only" = "no"; + # "guest ok" = "yes"; + # "create mask" = "0666"; + # "directory mask" = "0777"; + # # "force user" = "nobody"; + # # "force group" = "users"; + # }; + # public = { + # path = "/storage/public"; + # browseable = "yes"; + # "read only" = "no"; + # "guest ok" = "yes"; + # "create mask" = "0664"; + # "directory mask" = "0775"; + # # "force user" = "nobody"; + # # "force group" = "users"; + # }; + # family = { + # path = "/storage/family"; + # browseable = "yes"; + # "read only" = "no"; + # "guest ok" = "no"; + # "create mask" = "0660"; + # "directory mask" = "0770"; + # # "force user" = "nobody"; + # # "force group" = "family"; + # }; + # scannerdocs = { + # path = "/storage/scannerdocs"; + # browseable = "yes"; + # "read only" = "no"; + # "guest ok" = "no"; + # "create mask" = "0600"; + # "directory mask" = "0700"; + # "valid users" = "scannerupload"; + # "force user" = "scannerupload"; + # "force group" = "users"; + # }; + # daniel = { + # path = "/storage/daniel"; + # browseable = "yes"; + # "read only" = "no"; + # "guest ok" = "no"; + # "create mask" = "0600"; + # "directory mask" = "0700"; + # # "force user" = "daniel"; + # # "force group" = "users"; + # }; + # # printers = { + # # comment = "All Printers"; + # # path = "/var/spool/samba"; + # # public = "yes"; + # # browseable = "yes"; + # # # to allow user 'guest account' to print. + # # "guest ok" = "yes"; + # # writable = "no"; + # # printable = "yes"; + # # "create mode" = 0700; + # # }; + # }; + # }; + # } { + # services.postgresql = { + # ensureDatabases = [ + # "nextcloud" + # ]; + # ensureUsers = [ + # { + # name = "nextcloud"; + # ensureDBOwnership = true; + # } + # ]; + # }; # nextcloud # users.users.nextcloud = { # isSystemUser = true; @@ -339,127 +382,176 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 } { # plausible - users.users.plausible = { - isSystemUser = true; - createHome = false; - group = "plausible"; - }; - users.extraGroups = { - "plausible" = {}; - }; - services.plausible = { - # TODO: enable - enable = true; - database = { - clickhouse.setup = true; - postgres = { - setup = false; - dbname = "plausible"; - }; - }; - server = { - baseUrl = "https://a.lyte.dev"; - disableRegistration = true; - port = 8899; - secretKeybaseFile = config.sops.secrets.plausible-secret-key-base.path; - }; - adminUser = { - activate = false; - email = "daniel@lyte.dev"; - passwordFile = config.sops.secrets.plausible-admin-password.path; - }; - }; - systemd.services.plausible = let - cfg = config.services.plausible; - in { - serviceConfig.User = "plausible"; - serviceConfig.Group = "plausible"; - # since createdb is not gated behind postgres.setup, this breaks - script = lib.mkForce '' - # Elixir does not start up if `RELEASE_COOKIE` is not set, - # even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused. - # Thus, make a random one, which should then be ignored. - export RELEASE_COOKIE=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 20) - export ADMIN_USER_PWD="$(< $CREDENTIALS_DIRECTORY/ADMIN_USER_PWD )" - export SECRET_KEY_BASE="$(< $CREDENTIALS_DIRECTORY/SECRET_KEY_BASE )" + # ensureDatabases = ["plausible"]; + # ensureUsers = [ + # { + # name = "plausible"; + # ensureDBOwnership = true; + # } + # ]; + # users.users.plausible = { + # isSystemUser = true; + # createHome = false; + # group = "plausible"; + # }; + # users.extraGroups = { + # "plausible" = {}; + # }; + # services.plausible = { + # # TODO: enable + # enable = true; + # database = { + # clickhouse.setup = true; + # postgres = { + # setup = false; + # dbname = "plausible"; + # }; + # }; + # server = { + # baseUrl = "https://a.lyte.dev"; + # disableRegistration = true; + # port = 8899; + # secretKeybaseFile = config.sops.secrets.plausible-secret-key-base.path; + # }; + # adminUser = { + # activate = false; + # email = "daniel@lyte.dev"; + # passwordFile = config.sops.secrets.plausible-admin-password.path; + # }; + # }; + # systemd.services.plausible = let + # cfg = config.services.plausible; + # in { + # serviceConfig.User = "plausible"; + # serviceConfig.Group = "plausible"; + # # since createdb is not gated behind postgres.setup, this breaks + # script = lib.mkForce '' + # # Elixir does not start up if `RELEASE_COOKIE` is not set, + # # even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused. + # # Thus, make a random one, which should then be ignored. + # export RELEASE_COOKIE=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 20) + # export ADMIN_USER_PWD="$(< $CREDENTIALS_DIRECTORY/ADMIN_USER_PWD )" + # export SECRET_KEY_BASE="$(< $CREDENTIALS_DIRECTORY/SECRET_KEY_BASE )" - ${lib.optionalString (cfg.mail.smtp.passwordFile != null) - ''export SMTP_USER_PWD="$(< $CREDENTIALS_DIRECTORY/SMTP_USER_PWD )"''} + # ${lib.optionalString (cfg.mail.smtp.passwordFile != null) + # ''export SMTP_USER_PWD="$(< $CREDENTIALS_DIRECTORY/SMTP_USER_PWD )"''} - # setup - ${ - if cfg.database.postgres.setup - then "${cfg.package}/createdb.sh" - else "" - } - ${cfg.package}/migrate.sh - export IP_GEOLOCATION_DB=${pkgs.dbip-country-lite}/share/dbip/dbip-country-lite.mmdb - ${cfg.package}/bin/plausible eval "(Plausible.Release.prepare() ; Plausible.Auth.create_user(\"$ADMIN_USER_NAME\", \"$ADMIN_USER_EMAIL\", \"$ADMIN_USER_PWD\"))" - ${lib.optionalString cfg.adminUser.activate '' - psql -d plausible <<< "UPDATE users SET email_verified=true where email = '$ADMIN_USER_EMAIL';" - ''} + # # setup + # ${ + # if cfg.database.postgres.setup + # then "${cfg.package}/createdb.sh" + # else "" + # } + # ${cfg.package}/migrate.sh + # export IP_GEOLOCATION_DB=${pkgs.dbip-country-lite}/share/dbip/dbip-country-lite.mmdb + # ${cfg.package}/bin/plausible eval "(Plausible.Release.prepare() ; Plausible.Auth.create_user(\"$ADMIN_USER_NAME\", \"$ADMIN_USER_EMAIL\", \"$ADMIN_USER_PWD\"))" + # ${lib.optionalString cfg.adminUser.activate '' + # psql -d plausible <<< "UPDATE users SET email_verified=true where email = '$ADMIN_USER_EMAIL';" + # ''} - exec plausible start - ''; - }; - services.caddy.virtualHosts."a.lyte.dev" = { - extraConfig = '' - reverse_proxy :${toString config.services.plausible.server.port} - ''; - }; - } - { - # clickhouse - environment.etc = { - "clickhouse-server/users.d/disable-logging-query.xml" = { - text = '' - - - - 0 - 0 - - - - ''; - }; - "clickhouse-server/config.d/reduce-logging.xml" = { - text = '' - - - warning - true - - - - - - - - - - - ''; - }; - }; + # exec plausible start + # ''; + # }; + # services.caddy.virtualHosts."a.lyte.dev" = { + # extraConfig = '' + # reverse_proxy :${toString config.services.plausible.server.port} + # ''; + # }; } + # { + # # clickhouse + # environment.etc = { + # "clickhouse-server/users.d/disable-logging-query.xml" = { + # text = '' + # + # + # + # 0 + # 0 + # + # + # + # ''; + # }; + # "clickhouse-server/config.d/reduce-logging.xml" = { + # text = '' + # + # + # warning + # true + # + # + # + # + # + # + # + # + # + # + # ''; + # }; + # }; + # } { # daniel augments users.groups.daniel.members = ["daniel"]; users.groups.nixadmin.members = ["daniel"]; users.users.daniel = { extraGroups = [ - "nixadmin" # write access to /etc/nixos/ files + # "nixadmin" # write access to /etc/nixos/ files "wheel" # sudo access - "caddy" # write access to /storage/files.lyte.dev + "caddy" # write access to public static files "users" # general users group - "jellyfin" # write access to /storage/jellyfin - "audiobookshelf" # write access to /storage/audiobookshelf - "flanilla" + "jellyfin" # write access to jellyfin files + "audiobookshelf" # write access to audiobookshelf files + "flanilla" # minecraft server manager + "forgejo" + ]; + }; + services.postgresql = { + ensureDatabases = ["daniel"]; + ensureUsers = [ + { + name = "daniel"; + ensureDBOwnership = true; + } ]; }; } { + systemd.tmpfiles.settings = { + "10-jellyfin" = { + "/storage/jellyfin" = { + "d" = { + mode = "0770"; + user = "jellyfin"; + group = "wheel"; + }; + }; + "/storage/jellyfin/movies" = { + "d" = { + mode = "0770"; + user = "jellyfin"; + group = "wheel"; + }; + }; + "/storage/jellyfin/tv" = { + "d" = { + mode = "0770"; + user = "jellyfin"; + group = "wheel"; + }; + }; + "/storage/jellyfin/music" = { + "d" = { + mode = "0770"; + user = "jellyfin"; + group = "wheel"; + }; + }; + }; + }; services.jellyfin = { enable = true; openFirewall = false; @@ -488,46 +580,31 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 # }; } { + systemd.tmpfiles.settings = { + "10-backups" = { + "/storage/postgres" = { + "d" = { + mode = "0770"; + user = "postgres"; + group = "postgres"; + }; + }; + }; + }; services.postgresql = { enable = true; - ensureDatabases = [ - "daniel" - "plausible" - "nextcloud" - # "atuin" - ]; - ensureUsers = [ - { - name = "daniel"; - ensureDBOwnership = true; - } - { - name = "plausible"; - ensureDBOwnership = true; - } - { - name = "nextcloud"; - ensureDBOwnership = true; - } - # { - # name = "atuin"; - # ensureDBOwnership = true; - # } - ]; dataDir = "/storage/postgres"; enableTCPIP = true; package = pkgs.postgresql_15; # https://www.postgresql.org/docs/current/auth-pg-hba-conf.html + # TODO: enable the "daniel" user to access all databases authentication = pkgs.lib.mkOverride 10 '' #type database user auth-method auth-options local all postgres peer map=superuser_map local all daniel peer map=superuser_map local sameuser all peer map=superuser_map - # local plausible plausible peer - # local nextcloud nextcloud peer - # local atuin atuin peer # lan ipv4 host all daniel 192.168.0.0/16 trust @@ -556,121 +633,129 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 startAt = "*-*-* 03:00:00"; }; } + # { + # # friends + # users.users.ben = { + # isNormalUser = true; + # packages = [pkgs.vim]; + # openssh.authorizedKeys.keys = [ + # "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKUfLZ+IX85p9355Po2zP1H2tAxiE0rE6IYb8Sf+eF9T ben@benhany.com" + # ]; + # }; + + # users.users.alan = { + # isNormalUser = true; + # packages = [pkgs.vim]; + # openssh.authorizedKeys.keys = [ + # "" + # ]; + # }; + + # networking.firewall.allowedTCPPorts = [ + # 64022 + # ]; + # networking.firewall.allowedUDPPorts = [ + # 64020 + # ]; + # } { - # friends - users.users.ben = { - isNormalUser = true; - packages = [pkgs.vim]; - openssh.authorizedKeys.keys = [ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKUfLZ+IX85p9355Po2zP1H2tAxiE0rE6IYb8Sf+eF9T ben@benhany.com" - ]; - }; - - users.users.alan = { - isNormalUser = true; - packages = [pkgs.vim]; - openssh.authorizedKeys.keys = [ - "" - ]; - }; - - networking.firewall.allowedTCPPorts = [ - 64022 - ]; - networking.firewall.allowedUDPPorts = [ - 64020 - ]; - } - { - # flanilla family minecraft server - users.groups.flanilla = {}; - users.users.flanilla = { - isSystemUser = true; - createHome = false; - group = "flanilla"; - }; - } - { - # restic backups - users.users.restic = { - # used for other machines to backup to - isNormalUser = true; - openssh.authorizedKeys.keys = - [ - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJbPqzKB09U+i4Kqu136yOjflLZ/J7pYsNulTAd4x903 root@chromebox.h.lyte.dev" - ] - ++ config.users.users.daniel.openssh.authorizedKeys.keys; - }; - # TODO: move previous backups over and put here - # clickhouse and plausible analytics once they're up and running? - services.restic.backups = let - defaults = { - passwordFile = "/root/restic-remotebackup-password"; - paths = [ - "/storage/files.lyte.dev" - "/storage/daniel" - "/storage/forgejo" # TODO: should maybe use configuration.nix's services.forgejo.dump ? - "/storage/postgres-backups" - - # https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault - # specifically, https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault#sqlite-database-files - "/var/lib/bitwarden_rs" # does this need any sqlite preprocessing? - - # TODO: backup *arr configs? - ]; - initialize = true; - exclude = []; - timerConfig = { - OnCalendar = ["04:45" "17:45"]; + systemd.tmpfiles.settings = { + "10-backups" = { + "/storage/daniel" = { + "d" = { + mode = "0700"; + user = "daniel"; + group = "nogroup"; + }; + }; + "/storage/daniel/critical" = { + "d" = { + mode = "0700"; + user = "daniel"; + group = "nogroup"; + }; }; }; - in { - local = - defaults - // { - passwordFile = "/root/restic-localbackup-password"; - repository = "/storage/backups/local"; - }; - rascal = - defaults - // { - extraOptions = [ - "sftp.command='ssh beefcake@rascal -i /root/.ssh/id_ed25519 -s sftp'" - ]; - repository = "sftp://beefcake@rascal://storage/backups/beefcake"; - }; - # TODO: add ruby? - benland = - defaults - // { - extraOptions = [ - "sftp.command='ssh daniel@n.benhaney.com -p 10022 -i /root/.ssh/id_ed25519 -s sftp'" - ]; - repository = "sftp://daniel@n.benhaney.com://storage/backups/beefcake"; - }; }; + # restic backups + users.groups.restic = {}; + users.users.restic = { + # used for other machines to backup to + isSystemUser = true; + group = "restic"; + openssh.authorizedKeys.keys = [] ++ config.users.users.daniel.openssh.authorizedKeys.keys; + }; + # # TODO: move previous backups over and put here + # # clickhouse and plausible analytics once they're up and running? + # services.restic.backups = let + # defaults = { + # passwordFile = "/root/restic-remotebackup-password"; + # paths = [ + # "/storage/files.lyte.dev" + # "/storage/daniel" + # "/storage/forgejo" # TODO: should maybe use configuration.nix's services.forgejo.dump ? + # "/storage/postgres-backups" + + # # https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault + # # specifically, https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault#sqlite-database-files + # "/var/lib/bitwarden_rs" # does this need any sqlite preprocessing? + + # # TODO: backup *arr configs? + # ]; + # initialize = true; + # exclude = []; + # timerConfig = { + # OnCalendar = ["04:45" "17:45"]; + # }; + # }; + # in { + # local = + # defaults + # // { + # passwordFile = "/root/restic-localbackup-password"; + # repository = "/storage/backups/local"; + # }; + # rascal = + # defaults + # // { + # extraOptions = [ + # "sftp.command='ssh beefcake@rascal -i /root/.ssh/id_ed25519 -s sftp'" + # ]; + # repository = "sftp://beefcake@rascal://storage/backups/beefcake"; + # }; + # # TODO: add ruby? + # benland = + # defaults + # // { + # passwordFile = config.sops.secrets.restic-ssh-priv-key-benland.path; + # extraOptions = [ + # "sftp.command='ssh daniel@n.benhaney.com -p 10022 -i /root/.ssh/id_ed25519 -s sftp'" + # ]; + # repository = "sftp://daniel@n.benhaney.com://storage/backups/beefcake"; + # }; + # }; } { + systemd.tmpfiles.settings = { + "10-caddy" = { + "/storage/files.lyte.dev" = { + "d" = { + mode = "2775"; + user = "root"; + group = "wheel"; + }; + }; + }; + }; services.caddy = { # TODO: 502 and other error pages enable = true; email = "daniel@lyte.dev"; adapter = "caddyfile"; virtualHosts = { - "dev.h.lyte.dev" = { - extraConfig = '' - reverse_proxy :8000 - ''; - }; "files.lyte.dev" = { # TODO: customize the files.lyte.dev template? extraConfig = '' - # @options { - # method OPTIONS - # } - # @corsOrigin { - # header_regexp Origin ^https?://([a-zA-Z0-9-]+\.)*lyte\.dev$ - # } header { Access-Control-Allow-Origin "{http.request.header.Origin}" Access-Control-Allow-Credentials true @@ -679,9 +764,6 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 Vary Origin defer } - # reverse_proxy shuwashuwa:8848 { - # header_down -Access-Control-Allow-Origin - # } file_server browse { # browse template # hide .* @@ -692,11 +774,19 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 }; # acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory"; }; - networking.firewall.allowedTCPPorts = [ - 8000 # random development stuff - ]; } { + systemd.tmpfiles.settings = { + "10-forgejo" = { + "/storage/forgejo" = { + "d" = { + mode = "0700"; + user = "forgejo"; + group = "nogroup"; + }; + }; + }; + }; services.forgejo = { enable = true; stateDir = "/storage/forgejo"; @@ -720,11 +810,10 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 COOKIE_SECURE = true; }; log = { - # TODO: raise the log level # LEVEL = "Debug"; }; ui = { - THEMES = "forgejo-auto,forgejo-light,forgejo-dark,catppuccin-mocha-sapphire"; + THEMES = "forgejo-auto,forgejo-light,forgejo-dark"; DEFAULT_THEME = "forgejo-auto"; }; indexer = { @@ -749,7 +838,6 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 services.gitea-actions-runner = { # TODO: simple git-based automation would be dope? maybe especially for # mirroring to github super easy? - # enable = true; package = pkgs.forgejo-runner; instances."beefcake" = { enable = true; @@ -811,33 +899,19 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 }; } { - # TODO: make the client declarative? right now I think it's manually git - # clone'd to /root - systemd.services.deno-netlify-ddns-client = { - serviceConfig.Type = "oneshot"; - path = with pkgs; [curl bash]; - environment = { - NETLIFY_DDNS_RC_FILE = "/root/deno-netlify-ddns-client/.env"; - }; - script = '' - bash /root/deno-netlify-ddns-client/netlify-ddns-client.sh - ''; + services.postgresql = { + ensureDatabases = ["atuin"]; + ensureUsers = [ + { + name = "atuin"; + ensureDBOwnership = true; + } + ]; }; - systemd.timers.deno-netlify-ddns-client = { - wantedBy = ["timers.target"]; - partOf = ["deno-netlify-ddns-client.service"]; - timerConfig = { - OnBootSec = "10sec"; - OnUnitActiveSec = "5min"; - Unit = "deno-netlify-ddns-client.service"; - }; - }; - } - { services.atuin = { enable = true; database = { - createLocally = true; + createLocally = false; # uri = "postgresql://atuin@localhost:5432/atuin"; }; openRegistration = false; @@ -846,473 +920,505 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 extraConfig = ''reverse_proxy :${toString config.services.atuin.port}''; }; } + # { + # # jland minecraft server + # users.groups.jland = { + # gid = 982; + # }; + # users.users.jland = { + # uid = 986; + # isSystemUser = true; + # createHome = false; + # group = "jland"; + # }; + # virtualisation.oci-containers.containers.minecraft-jland = { + # autoStart = false; + + # # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/ + # image = "docker.io/itzg/minecraft-server"; + # # user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}"; + # extraOptions = [ + # "--tty" + # "--interactive" + # ]; + # environment = { + # EULA = "true"; + # # UID = toString config.users.users.jland.uid; + # # GID = toString config.users.groups.jland.gid; + # STOP_SERVER_ANNOUNCE_DELAY = "20"; + # TZ = "America/Chicago"; + # VERSION = "1.20.1"; + # MEMORY = "8G"; + # MAX_MEMORY = "16G"; + # TYPE = "FORGE"; + # FORGE_VERSION = "47.1.3"; + # ALLOW_FLIGHT = "true"; + # ENABLE_QUERY = "true"; + + # MODPACK = "/data/origination-files/Server-Files-0.2.14.zip"; + + # # TYPE = "AUTO_CURSEFORGE"; + # # CF_SLUG = "monumental-experience"; + # # CF_FILE_ID = "4826863"; # 2.2.53 + + # # due to + # # Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory. + # # we must upload manually + # # CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip"; + + # # ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time + # # May also have mod/loader incompatibilities? + # # https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/ + # }; + # environmentFiles = [ + # # config.sops.secrets."jland.env".path + # ]; + # ports = ["26965:25565"]; + # volumes = [ + # "/storage/jland/data:/data" + # "/storage/jland/worlds:/worlds" + # ]; + # }; + # networking.firewall.allowedTCPPorts = [ + # 26965 + # ]; + # } + # { + # # dawncraft minecraft server + # systemd.tmpfiles.rules = [ + # "d /storage/dawncraft/ 0770 1000 1000 -" + # "d /storage/dawncraft/data/ 0770 1000 1000 -" + # "d /storage/dawncraft/worlds/ 0770 1000 1000 -" + # "d /storage/dawncraft/downloads/ 0770 1000 1000 -" + # ]; + # virtualisation.oci-containers.containers.minecraft-dawncraft = { + # autoStart = false; + + # # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/ + # image = "docker.io/itzg/minecraft-server"; + # extraOptions = [ + # "--tty" + # "--interactive" + # ]; + # environment = { + # EULA = "true"; + + # STOP_SERVER_ANNOUNCE_DELAY = "20"; + # TZ = "America/Chicago"; + # VERSION = "1.18.2"; + # MEMORY = "8G"; + # MAX_MEMORY = "32G"; + + # ALLOW_FLIGHT = "true"; + # ENABLE_QUERY = "true"; + # SERVER_PORT = "26968"; + # QUERY_PORT = "26968"; + + # TYPE = "AUTO_CURSEFORGE"; + # CF_SLUG = "dawn-craft"; + + # CF_EXCLUDE_MODS = "368398"; + # CF_FORCE_SYNCHRONIZE = "true"; + # # CF_FILE_ID = "5247696"; # 2.0.7 server + # }; + # environmentFiles = [ + # config.sops.secrets."dawncraft.env".path + # ]; + # ports = ["26968:26968/tcp" "26968:26968/udp"]; + # volumes = [ + # "/storage/dawncraft/data:/data" + # "/storage/dawncraft/worlds:/worlds" + # "/storage/dawncraft/downloads:/downloads" + # ]; + # }; + # networking.firewall.allowedTCPPorts = [ + # 26968 + # ]; + # } + # { + # # flanilla family minecraft server + # users.groups.flanilla = {}; + # users.users.flanilla = { + # isSystemUser = true; + # createHome = false; + # group = "flanilla"; + # }; + # virtualisation.oci-containers.containers.minecraft-flanilla = { + # autoStart = true; + + # image = "docker.io/itzg/minecraft-server"; + # user = "${toString config.users.users.flanilla.uid}:${toString config.users.groups.flanilla.gid}"; + # extraOptions = ["--tty" "--interactive"]; + # environment = { + # EULA = "true"; + # UID = toString config.users.users.flanilla.uid; + # GID = toString config.users.groups.flanilla.gid; + # STOP_SERVER_ANNOUNCE_DELAY = "20"; + # TZ = "America/Chicago"; + # VERSION = "1.20.4"; + # OPS = "lytedev"; + # MODE = "creative"; + # DIFFICULTY = "peaceful"; + # ONLINE_MODE = "false"; + # MEMORY = "8G"; + # MAX_MEMORY = "16G"; + # ALLOW_FLIGHT = "true"; + # ENABLE_QUERY = "true"; + # ENABLE_COMMAND_BLOCK = "true"; + # }; + + # environmentFiles = [ + # # config.sops.secrets."flanilla.env".path + # ]; + + # ports = ["26966:25565"]; + + # volumes = [ + # "/storage/flanilla/data:/data" + # "/storage/flanilla/worlds:/worlds" + # ]; + # }; + # networking.firewall.allowedTCPPorts = [ + # 26966 + # ]; + # } + # ({options, ...}: let + # toml = pkgs.formats.toml {}; + # package = pkgs.kanidm; + # domain = "idm.h.lyte.dev"; + # name = "kanidm"; + # storage = "/storage/${name}"; + # cert = "${storage}/certs/idm.h.lyte.dev.crt"; + # key = "${storage}/certs/idm.h.lyte.dev.key"; + + # serverSettings = { + # inherit domain; + # bindaddress = "127.0.0.1:8443"; + # # ldapbindaddress + # tls_chain = cert; + # tls_key = key; + # origin = "https://${domain}"; + # db_path = "${storage}/data/kanidm.db"; + # log_level = "info"; + # online_backup = { + # path = "${storage}/backups/"; + # schedule = "00 22 * * *"; + # # versions = 7; + # }; + # }; + + # unixdSettings = { + # hsm_pin_path = "/var/cache/${name}-unixd/hsm-pin"; + # pam_allowed_login_groups = []; + # }; + + # clientSettings = { + # uri = "https://idm.h.lyte.dev"; + # }; + + # user = name; + # group = name; + # serverConfigFile = toml.generate "server.toml" serverSettings; + # unixdConfigFile = toml.generate "kanidm-unixd.toml" unixdSettings; + # clientConfigFile = toml.generate "kanidm-config.toml" clientSettings; + + # defaultServiceConfig = { + # BindReadOnlyPaths = [ + # "/nix/store" + # "-/etc/resolv.conf" + # "-/etc/nsswitch.conf" + # "-/etc/hosts" + # "-/etc/localtime" + # ]; + # CapabilityBoundingSet = []; + # # ProtectClock= adds DeviceAllow=char-rtc r + # DeviceAllow = ""; + # # Implies ProtectSystem=strict, which re-mounts all paths + # # DynamicUser = true; + # LockPersonality = true; + # MemoryDenyWriteExecute = true; + # NoNewPrivileges = true; + # PrivateDevices = true; + # PrivateMounts = true; + # PrivateNetwork = true; + # PrivateTmp = true; + # PrivateUsers = true; + # ProcSubset = "pid"; + # ProtectClock = true; + # ProtectHome = true; + # ProtectHostname = true; + # # Would re-mount paths ignored by temporary root + # #ProtectSystem = "strict"; + # ProtectControlGroups = true; + # ProtectKernelLogs = true; + # ProtectKernelModules = true; + # ProtectKernelTunables = true; + # ProtectProc = "invisible"; + # RestrictAddressFamilies = []; + # RestrictNamespaces = true; + # RestrictRealtime = true; + # RestrictSUIDSGID = true; + # SystemCallArchitectures = "native"; + # SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"]; + # # Does not work well with the temporary root + # #UMask = "0066"; + # }; + # in { + # # kanidm + + # config = { + # # we need a mechanism to get the certificates that caddy provisions for us + # systemd.timers."copy-kanidm-certificates-from-caddy" = { + # wantedBy = ["timers.target"]; + # timerConfig = { + # OnBootSec = "10m"; # 10 minutes after booting + # OnUnitActiveSec = "5m"; # every 5 minutes afterwards + # Unit = "copy-kanidm-certificates-from-caddy.service"; + # }; + # }; + + # systemd.services."copy-kanidm-certificates-from-caddy" = { + # script = '' + # umask 077 + # install -d -m 0700 -o "${user}" -g "${group}" "${storage}/data" "${storage}/certs" + # cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev + # install -m 0700 -o "${user}" -g "${group}" idm.h.lyte.dev.key idm.h.lyte.dev.crt "${storage}/certs" + # ''; + # path = with pkgs; [rsync]; + # serviceConfig = { + # Type = "oneshot"; + # User = "root"; + # }; + # }; + + # environment.systemPackages = [package]; + + # # TODO: should I use this for /storage/kanidm/certs etc.? + # systemd.tmpfiles.settings."10-kanidm" = { + # "${serverSettings.online_backup.path}".d = { + # inherit user group; + # mode = "0700"; + # }; + # # "${builtins.dirOf unixdSettings.hsm_pin_path}".d = { + # # user = "${user}-unixd"; + # # group = "${group}-unixd"; + # # mode = "0700"; + # # }; + # "${storage}/data".d = { + # inherit user group; + # mode = "0700"; + # }; + # "${storage}/certs".d = { + # inherit user group; + # mode = "0700"; + # }; + # }; + + # users.groups = { + # ${group} = {}; + # "${group}-unixd" = {}; + # }; + + # users.users.${user} = { + # inherit group; + # description = "kanidm server"; + # isSystemUser = true; + # packages = [package]; + # }; + # users.users."${user}-unixd" = { + # group = "${group}-unixd"; + # description = lib.mkForce "kanidm PAM daemon"; + # isSystemUser = true; + # }; + + # # the kanidm module in nixpkgs was not working for me, so I rolled my own + # # loosely based off it + # systemd.services.kanidm = { + # enable = true; + # path = with pkgs; [openssl] ++ [package]; + # description = "kanidm identity management daemon"; + # wantedBy = ["multi-user.target"]; + # after = ["network.target"]; + # requires = ["copy-kanidm-certificates-from-caddy.service"]; + # script = '' + # pwd + # ls -la + # ls -laR /storage/kanidm + # ${package}/bin/kanidmd server -c ${serverConfigFile} + # ''; + # # environment.RUST_LOG = serverSettings.log_level; + # serviceConfig = lib.mkMerge [ + # defaultServiceConfig + # { + # StateDirectory = name; + # StateDirectoryMode = "0700"; + # RuntimeDirectory = "${name}d"; + # User = user; + # Group = group; + + # AmbientCapabilities = ["CAP_NET_BIND_SERVICE"]; + # CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"]; + # PrivateUsers = lib.mkForce false; + # PrivateNetwork = lib.mkForce false; + # RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"]; + # # TemporaryFileSystem = "/:ro"; + # BindReadOnlyPaths = [ + # "${storage}/certs" + # ]; + # BindPaths = [ + # "${storage}/data" + + # # socket + # "/run/${name}d:/run/${name}d" + + # # backups + # serverSettings.online_backup.path + # ]; + # } + # ]; + # }; + + # systemd.services.kanidm-unixd = { + # description = "Kanidm PAM daemon"; + # wantedBy = ["multi-user.target"]; + # after = ["network.target"]; + # restartTriggers = [unixdConfigFile clientConfigFile]; + # serviceConfig = lib.mkMerge [ + # defaultServiceConfig + # { + # CacheDirectory = "${name}-unixd"; + # CacheDirectoryMode = "0700"; + # RuntimeDirectory = "${name}-unixd"; + # ExecStart = "${package}/bin/kanidm_unixd"; + # User = "${user}-unixd"; + # Group = "${group}-unixd"; + + # BindReadOnlyPaths = [ + # "-/etc/kanidm" + # "-/etc/static/kanidm" + # "-/etc/ssl" + # "-/etc/static/ssl" + # "-/etc/passwd" + # "-/etc/group" + # ]; + + # BindPaths = [ + # # socket + # "/run/kanidm-unixd:/var/run/kanidm-unixd" + # ]; + + # # Needs to connect to kanidmd + # PrivateNetwork = lib.mkForce false; + # RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"]; + # TemporaryFileSystem = "/:ro"; + # } + # ]; + # environment.RUST_LOG = serverSettings.log_level; + # }; + + # systemd.services.kanidm-unixd-tasks = { + # description = "Kanidm PAM home management daemon"; + # wantedBy = ["multi-user.target"]; + # after = ["network.target" "kanidm-unixd.service"]; + # partOf = ["kanidm-unixd.service"]; + # restartTriggers = [unixdConfigFile clientConfigFile]; + # serviceConfig = { + # ExecStart = "${package}/bin/kanidm_unixd_tasks"; + + # BindReadOnlyPaths = [ + # "/nix/store" + # "-/etc/resolv.conf" + # "-/etc/nsswitch.conf" + # "-/etc/hosts" + # "-/etc/localtime" + # "-/etc/kanidm" + # "-/etc/static/kanidm" + # ]; + # BindPaths = [ + # # To manage home directories + # "/home" + + # # To connect to kanidm-unixd + # "/run/kanidm-unixd:/var/run/kanidm-unixd" + # ]; + # # CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket + # CapabilityBoundingSet = ["CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH"]; + # IPAddressDeny = "any"; + # # Need access to users + # PrivateUsers = false; + # # Need access to home directories + # ProtectHome = false; + # RestrictAddressFamilies = ["AF_UNIX"]; + # TemporaryFileSystem = "/:ro"; + # Restart = "on-failure"; + # }; + # environment.RUST_LOG = serverSettings.log_level; + # }; + + # environment.etc = { + # "kanidm/server.toml".source = serverConfigFile; + # "kanidm/config".source = clientConfigFile; + # "kanidm/unixd".source = unixdConfigFile; + # }; + + # system.nssModules = [package]; + + # system.nssDatabases.group = [name]; + # system.nssDatabases.passwd = [name]; + + # # environment.etc."kanidm/server.toml" = { + # # mode = "0600"; + # # group = "kanidm"; + # # user = "kanidm"; + # # }; + + # # environment.etc."kanidm/config" = { + # # mode = "0600"; + # # group = "kanidm"; + # # user = "kanidm"; + # # }; + + # services.caddy.virtualHosts."idm.h.lyte.dev" = { + # extraConfig = ''reverse_proxy https://idm.h.lyte.dev:8443''; + # }; + + # networking = { + # extraHosts = '' + # ::1 idm.h.lyte.dev + # 127.0.0.1 idm.h.lyte.dev + # ''; + # }; + # }; + # }) { - # jland minecraft server - users.groups.jland = { - gid = 982; - }; - users.users.jland = { - uid = 986; - isSystemUser = true; - createHome = false; - group = "jland"; - }; - virtualisation.oci-containers.containers.minecraft-jland = { - autoStart = false; - - # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/ - image = "docker.io/itzg/minecraft-server"; - # user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}"; - extraOptions = [ - "--tty" - "--interactive" - ]; - environment = { - EULA = "true"; - # UID = toString config.users.users.jland.uid; - # GID = toString config.users.groups.jland.gid; - STOP_SERVER_ANNOUNCE_DELAY = "20"; - TZ = "America/Chicago"; - VERSION = "1.20.1"; - MEMORY = "8G"; - MAX_MEMORY = "16G"; - TYPE = "FORGE"; - FORGE_VERSION = "47.1.3"; - ALLOW_FLIGHT = "true"; - ENABLE_QUERY = "true"; - - MODPACK = "/data/origination-files/Server-Files-0.2.14.zip"; - - # TYPE = "AUTO_CURSEFORGE"; - # CF_SLUG = "monumental-experience"; - # CF_FILE_ID = "4826863"; # 2.2.53 - - # due to - # Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory. - # we must upload manually - # CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip"; - - # ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time - # May also have mod/loader incompatibilities? - # https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/ - }; - environmentFiles = [ - # config.sops.secrets."jland.env".path - ]; - ports = ["26965:25565"]; - volumes = [ - "/storage/jland/data:/data" - "/storage/jland/worlds:/worlds" - ]; - }; - networking.firewall.allowedTCPPorts = [ - 26965 - ]; - } - { - # dawncraft minecraft server - systemd.tmpfiles.rules = [ - "d /storage/dawncraft/ 0770 1000 1000 -" - "d /storage/dawncraft/data/ 0770 1000 1000 -" - "d /storage/dawncraft/worlds/ 0770 1000 1000 -" - "d /storage/dawncraft/downloads/ 0770 1000 1000 -" - ]; - virtualisation.oci-containers.containers.minecraft-dawncraft = { - autoStart = false; - - # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/ - image = "docker.io/itzg/minecraft-server"; - extraOptions = [ - "--tty" - "--interactive" - ]; - environment = { - EULA = "true"; - - STOP_SERVER_ANNOUNCE_DELAY = "20"; - TZ = "America/Chicago"; - VERSION = "1.18.2"; - MEMORY = "8G"; - MAX_MEMORY = "32G"; - - ALLOW_FLIGHT = "true"; - ENABLE_QUERY = "true"; - SERVER_PORT = "26968"; - QUERY_PORT = "26968"; - - TYPE = "AUTO_CURSEFORGE"; - CF_SLUG = "dawn-craft"; - - CF_EXCLUDE_MODS = "368398"; - CF_FORCE_SYNCHRONIZE = "true"; - # CF_FILE_ID = "5247696"; # 2.0.7 server - }; - environmentFiles = [ - config.sops.secrets."dawncraft.env".path - ]; - ports = ["26968:26968/tcp" "26968:26968/udp"]; - volumes = [ - "/storage/dawncraft/data:/data" - "/storage/dawncraft/worlds:/worlds" - "/storage/dawncraft/downloads:/downloads" - ]; - }; - networking.firewall.allowedTCPPorts = [ - 26968 - ]; - } - { - virtualisation.oci-containers.containers.minecraft-flanilla = { - autoStart = true; - - image = "docker.io/itzg/minecraft-server"; - user = "${toString config.users.users.flanilla.uid}:${toString config.users.groups.flanilla.gid}"; - extraOptions = ["--tty" "--interactive"]; - environment = { - EULA = "true"; - UID = toString config.users.users.flanilla.uid; - GID = toString config.users.groups.flanilla.gid; - STOP_SERVER_ANNOUNCE_DELAY = "20"; - TZ = "America/Chicago"; - VERSION = "1.20.4"; - OPS = "lytedev"; - MODE = "creative"; - DIFFICULTY = "peaceful"; - ONLINE_MODE = "false"; - MEMORY = "8G"; - MAX_MEMORY = "16G"; - ALLOW_FLIGHT = "true"; - ENABLE_QUERY = "true"; - ENABLE_COMMAND_BLOCK = "true"; - }; - - environmentFiles = [ - # config.sops.secrets."flanilla.env".path - ]; - - ports = ["26966:25565"]; - - volumes = [ - "/storage/flanilla/data:/data" - "/storage/flanilla/worlds:/worlds" - ]; - }; - networking.firewall.allowedTCPPorts = [ - 26966 - ]; - } - ({options, ...}: let - toml = pkgs.formats.toml {}; - package = pkgs.kanidm; - domain = "idm.h.lyte.dev"; - name = "kanidm"; - storage = "/storage/${name}"; - cert = "${storage}/certs/idm.h.lyte.dev.crt"; - key = "${storage}/certs/idm.h.lyte.dev.key"; - - serverSettings = { - inherit domain; - bindaddress = "127.0.0.1:8443"; - # ldapbindaddress - tls_chain = cert; - tls_key = key; - origin = "https://${domain}"; - db_path = "${storage}/data/kanidm.db"; - log_level = "info"; - online_backup = { - path = "${storage}/backups/"; - schedule = "00 22 * * *"; - # versions = 7; - }; - }; - - unixdSettings = { - hsm_pin_path = "/var/cache/${name}-unixd/hsm-pin"; - pam_allowed_login_groups = []; - }; - - clientSettings = { - uri = "https://idm.h.lyte.dev"; - }; - - user = name; - group = name; - serverConfigFile = toml.generate "server.toml" serverSettings; - unixdConfigFile = toml.generate "kanidm-unixd.toml" unixdSettings; - clientConfigFile = toml.generate "kanidm-config.toml" clientSettings; - - defaultServiceConfig = { - BindReadOnlyPaths = [ - "/nix/store" - "-/etc/resolv.conf" - "-/etc/nsswitch.conf" - "-/etc/hosts" - "-/etc/localtime" - ]; - CapabilityBoundingSet = []; - # ProtectClock= adds DeviceAllow=char-rtc r - DeviceAllow = ""; - # Implies ProtectSystem=strict, which re-mounts all paths - # DynamicUser = true; - LockPersonality = true; - MemoryDenyWriteExecute = true; - NoNewPrivileges = true; - PrivateDevices = true; - PrivateMounts = true; - PrivateNetwork = true; - PrivateTmp = true; - PrivateUsers = true; - ProcSubset = "pid"; - ProtectClock = true; - ProtectHome = true; - ProtectHostname = true; - # Would re-mount paths ignored by temporary root - #ProtectSystem = "strict"; - ProtectControlGroups = true; - ProtectKernelLogs = true; - ProtectKernelModules = true; - ProtectKernelTunables = true; - ProtectProc = "invisible"; - RestrictAddressFamilies = []; - RestrictNamespaces = true; - RestrictRealtime = true; - RestrictSUIDSGID = true; - SystemCallArchitectures = "native"; - SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"]; - # Does not work well with the temporary root - #UMask = "0066"; - }; - in { - # kanidm - - config = { - # we need a mechanism to get the certificates that caddy provisions for us - systemd.timers."copy-kanidm-certificates-from-caddy" = { - wantedBy = ["timers.target"]; - timerConfig = { - OnBootSec = "10m"; # 10 minutes after booting - OnUnitActiveSec = "5m"; # every 5 minutes afterwards - Unit = "copy-kanidm-certificates-from-caddy.service"; + systemd.tmpfiles.settings = { + "10-audiobookshelf" = { + "/storage/audiobookshelf" = { + "d" = { + mode = "0770"; + user = "audiobookshelf"; + group = "wheel"; + }; + }; + "/storage/audiobookshelf/audiobooks" = { + "d" = { + mode = "0770"; + user = "audiobookshelf"; + group = "wheel"; + }; + }; + "/storage/audiobookshelf/podcasts" = { + "d" = { + mode = "0770"; + user = "audiobookshelf"; + group = "wheel"; + }; }; }; - - systemd.services."copy-kanidm-certificates-from-caddy" = { - script = '' - umask 077 - install -d -m 0700 -o "${user}" -g "${group}" "${storage}/data" "${storage}/certs" - cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev - install -m 0700 -o "${user}" -g "${group}" idm.h.lyte.dev.key idm.h.lyte.dev.crt "${storage}/certs" - ''; - path = with pkgs; [rsync]; - serviceConfig = { - Type = "oneshot"; - User = "root"; - }; - }; - - environment.systemPackages = [package]; - - # TODO: should I use this for /storage/kanidm/certs etc.? - systemd.tmpfiles.settings."10-kanidm" = { - "${serverSettings.online_backup.path}".d = { - inherit user group; - mode = "0700"; - }; - # "${builtins.dirOf unixdSettings.hsm_pin_path}".d = { - # user = "${user}-unixd"; - # group = "${group}-unixd"; - # mode = "0700"; - # }; - "${storage}/data".d = { - inherit user group; - mode = "0700"; - }; - "${storage}/certs".d = { - inherit user group; - mode = "0700"; - }; - }; - - users.groups = { - ${group} = {}; - "${group}-unixd" = {}; - }; - - users.users.${user} = { - inherit group; - description = "kanidm server"; - isSystemUser = true; - packages = [package]; - }; - users.users."${user}-unixd" = { - group = "${group}-unixd"; - description = lib.mkForce "kanidm PAM daemon"; - isSystemUser = true; - }; - - # the kanidm module in nixpkgs was not working for me, so I rolled my own - # loosely based off it - systemd.services.kanidm = { - enable = true; - path = with pkgs; [openssl] ++ [package]; - description = "kanidm identity management daemon"; - wantedBy = ["multi-user.target"]; - after = ["network.target"]; - requires = ["copy-kanidm-certificates-from-caddy.service"]; - script = '' - pwd - ls -la - ls -laR /storage/kanidm - ${package}/bin/kanidmd server -c ${serverConfigFile} - ''; - # environment.RUST_LOG = serverSettings.log_level; - serviceConfig = lib.mkMerge [ - defaultServiceConfig - { - StateDirectory = name; - StateDirectoryMode = "0700"; - RuntimeDirectory = "${name}d"; - User = user; - Group = group; - - AmbientCapabilities = ["CAP_NET_BIND_SERVICE"]; - CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"]; - PrivateUsers = lib.mkForce false; - PrivateNetwork = lib.mkForce false; - RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"]; - # TemporaryFileSystem = "/:ro"; - BindReadOnlyPaths = [ - "${storage}/certs" - ]; - BindPaths = [ - "${storage}/data" - - # socket - "/run/${name}d:/run/${name}d" - - # backups - serverSettings.online_backup.path - ]; - } - ]; - }; - - systemd.services.kanidm-unixd = { - description = "Kanidm PAM daemon"; - wantedBy = ["multi-user.target"]; - after = ["network.target"]; - restartTriggers = [unixdConfigFile clientConfigFile]; - serviceConfig = lib.mkMerge [ - defaultServiceConfig - { - CacheDirectory = "${name}-unixd"; - CacheDirectoryMode = "0700"; - RuntimeDirectory = "${name}-unixd"; - ExecStart = "${package}/bin/kanidm_unixd"; - User = "${user}-unixd"; - Group = "${group}-unixd"; - - BindReadOnlyPaths = [ - "-/etc/kanidm" - "-/etc/static/kanidm" - "-/etc/ssl" - "-/etc/static/ssl" - "-/etc/passwd" - "-/etc/group" - ]; - - BindPaths = [ - # socket - "/run/kanidm-unixd:/var/run/kanidm-unixd" - ]; - - # Needs to connect to kanidmd - PrivateNetwork = lib.mkForce false; - RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"]; - TemporaryFileSystem = "/:ro"; - } - ]; - environment.RUST_LOG = serverSettings.log_level; - }; - - systemd.services.kanidm-unixd-tasks = { - description = "Kanidm PAM home management daemon"; - wantedBy = ["multi-user.target"]; - after = ["network.target" "kanidm-unixd.service"]; - partOf = ["kanidm-unixd.service"]; - restartTriggers = [unixdConfigFile clientConfigFile]; - serviceConfig = { - ExecStart = "${package}/bin/kanidm_unixd_tasks"; - - BindReadOnlyPaths = [ - "/nix/store" - "-/etc/resolv.conf" - "-/etc/nsswitch.conf" - "-/etc/hosts" - "-/etc/localtime" - "-/etc/kanidm" - "-/etc/static/kanidm" - ]; - BindPaths = [ - # To manage home directories - "/home" - - # To connect to kanidm-unixd - "/run/kanidm-unixd:/var/run/kanidm-unixd" - ]; - # CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket - CapabilityBoundingSet = ["CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH"]; - IPAddressDeny = "any"; - # Need access to users - PrivateUsers = false; - # Need access to home directories - ProtectHome = false; - RestrictAddressFamilies = ["AF_UNIX"]; - TemporaryFileSystem = "/:ro"; - Restart = "on-failure"; - }; - environment.RUST_LOG = serverSettings.log_level; - }; - - environment.etc = { - "kanidm/server.toml".source = serverConfigFile; - "kanidm/config".source = clientConfigFile; - "kanidm/unixd".source = unixdConfigFile; - }; - - system.nssModules = [package]; - - system.nssDatabases.group = [name]; - system.nssDatabases.passwd = [name]; - - # environment.etc."kanidm/server.toml" = { - # mode = "0600"; - # group = "kanidm"; - # user = "kanidm"; - # }; - - # environment.etc."kanidm/config" = { - # mode = "0600"; - # group = "kanidm"; - # user = "kanidm"; - # }; - - services.caddy.virtualHosts."idm.h.lyte.dev" = { - extraConfig = ''reverse_proxy https://idm.h.lyte.dev:8443''; - }; - - networking = { - extraHosts = '' - ::1 idm.h.lyte.dev - 127.0.0.1 idm.h.lyte.dev - ''; - }; }; - }) - { services.audiobookshelf = { enable = true; - # dataDir = "/storage/audiobookshelf"; + dataDir = "/storage/audiobookshelf"; port = 8523; }; services.caddy.virtualHosts."audio.lyte.dev" = { @@ -1331,42 +1437,20 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 # or # users.users.example-user.extraGroups = [ config.users.groups.keys.name ]; - # TODO: directory attributes for /storage subdirectories? - # example: user daniel should be able to write to /storage/files.lyte.dev and - # caddy should be able to serve it - # TODO: declarative directory quotas? for storage/$USER and /home/$USER - # TODO: would be nice to get ALL the storage stuff declared in here - # should I be using btrfs subvolumes? can I capture file ownership, perimssions, and ACLs? - - virtualisation.oci-containers.backend = "podman"; - virtualisation.podman = { - # autoPrune.enable = true; - # defaultNetwork.settings = { - # driver = "host"; - # }; - }; environment.systemPackages = with pkgs; [ - linuxquota + restic + btrfs-progs + zfs + smartmontools htop bottom curl xh ]; + services.tailscale.useRoutingFeatures = "server"; - services.openssh = { - listenAddresses = [ - { - addr = "0.0.0.0"; - port = 64022; - } - { - addr = "0.0.0.0"; - port = 22; - } - ]; - }; # https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72 # services.lidarr = { @@ -1389,12 +1473,12 @@ sudo nix run nixpkgs#ipmitool -- raw 0x30 0x30 0x02 0xff 0x00 # listenPort = 6767; # }; - networking.firewall.allowedTCPPorts = [9876 9877]; - networking.firewall.allowedUDPPorts = [9876 9877]; - networking.firewall.allowedUDPPortRanges = [ - { - from = 27000; - to = 27100; - } - ]; + # networking.firewall.allowedTCPPorts = [9876 9877]; + # networking.firewall.allowedUDPPorts = [9876 9877]; + # networking.firewall.allowedUDPPortRanges = [ + # { + # from = 27000; + # to = 27100; + # } + # ]; } diff --git a/nixos/htpc.nix b/nixos/htpc.nix index b0d8072..e07eaf1 100644 --- a/nixos/htpc.nix +++ b/nixos/htpc.nix @@ -27,6 +27,17 @@ swapDevices = []; + hardware.bluetooth = { + enable = true; + # package = pkgs.bluez; + settings = { + General = { + AutoConnect = true; + MultiProfile = "multiple"; + }; + }; + }; + nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux"; hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware; } diff --git a/nixos/rascal.nix b/nixos/rascal.nix index 69ac0e3..bdb3a09 100644 --- a/nixos/rascal.nix +++ b/nixos/rascal.nix @@ -28,11 +28,13 @@ users.users = { beefcake = { # used for restic backups + # TODO: can this be a system user? isNormalUser = true; openssh.authorizedKeys.keys = config.users.users.daniel.openssh.authorizedKeys.keys ++ [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK7HrojwoyHED+A/FzRjYmIL0hzofwBd9IYHH6yV0oPO root@beefcake" + "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAOEI82VdbyR1RYqSnFtlffHBtHFdXO0v9RmQH7GkfXo restic@beefcake" ]; }; @@ -60,5 +62,7 @@ }; }; + services.tailscale.useRoutingFeatures = "server"; + system.stateVersion = "24.05"; } diff --git a/nixos/router.nix b/nixos/router.nix index 2263075..fdd730e 100644 --- a/nixos/router.nix +++ b/nixos/router.nix @@ -337,7 +337,7 @@ in { ConfigureWithoutCarrier = true; # IPv6AcceptRA = false; IPv6SendRA = true; - DHCPPrefixDelegation = true; + DHCPv6PrefixDelegation = true; }; }; @@ -406,7 +406,10 @@ in { cache-size = "10000"; - dhcp-range = with dhcp_lease_space; ["${interfaces.lan.name},${min},${max},${netmask},24h"]; + dhcp-range = with dhcp_lease_space; [ + "${interfaces.lan.name},${min},${max},${netmask},24h" + "::,constructor:${interfaces.lan.name},ra-stateless,ra-names,4h" + ]; except-interface = interfaces.wan.name; interface = interfaces.lan.name; dhcp-host = diff --git a/readme.md b/readme.md index a10040f..337385e 100644 --- a/readme.md +++ b/readme.md @@ -1,6 +1,7 @@ # Nix -[![build status](https://git.lyte.dev/lytedev/nix/badges/workflows/nix-flake-check.yaml/badge.svg)](https://git.lyte.dev/lytedev/nix/actions?workflow=nix-flake-check.yaml) +[![flake check status](https://git.lyte.dev/lytedev/nix/badges/workflows/nix-flake-check.yaml/badge.svg)](https://git.lyte.dev/lytedev/nix/actions?workflow=nix-flake-check.yaml) +[![build status](https://git.lyte.dev/lytedev/nix/badges/workflows/nix-build.yaml/badge.svg)](https://git.lyte.dev/lytedev/nix/actions?workflow=nix-build.yaml) My grand, declarative, and unified application, service, environment, and machine configuration, secret, and package management in a single flake. ❤️ ❄️ diff --git a/secrets/beefcake/secrets.yml b/secrets/beefcake/secrets.yml index 411e745..315fdd9 100644 --- a/secrets/beefcake/secrets.yml +++ b/secrets/beefcake/secrets.yml @@ -1,5 +1,6 @@ hello: ENC[AES256_GCM,data:zFcid19gJKCNO6uThYyDzQ+KCxsBC/Fjma9AhyddOraK9siZtcpBWyPhnIkq9Q==,iv:1j1sEZcZS5+NUbIRHNE5L41lDMuLGAqWw9QJNOmtxuE=,tag:dDPq3rGesiA7khX/GPMVhQ==,type:str] example_key: ENC[AES256_GCM,data:EyQzVVXEgm20i62hFA==,iv:Z/gQF3lUcg7Ox66yWgBhi9aJqkN9nwIhcprSbC+fbdI=,tag:enULK/yFVQjNpRk0u4RFAg==,type:str] +more: ENC[AES256_GCM,data:wO/uSxU=,iv:eaLvLUWwyntTMkWrRMlOEpxGCffZy0VxPCizVD0Rmrk=,tag:xr9gwa2Jz1cF0XYUNzoA9g==,type:str] #ENC[AES256_GCM,data:S7g4kg1/4oztGaattpyo1Q==,iv:/JYp8w/ONJLIRXfiyhc7us4BZ+eg6UZeMWYHWSYXiGE=,tag:Ec02qXNPU+TsKf55cV/nlA==,type:comment] example_array: - ENC[AES256_GCM,data:ava5NqrxDX3u3Tr8vZQ=,iv:Q+c2aZx3buUKNUf8NeMxWsSsXtqk4PLbYM0PzVrgyKs=,tag:kVCv9FMQTkQwvGfH4t3HCg==,type:str] @@ -8,15 +9,21 @@ example_number: ENC[AES256_GCM,data:AifVPuuPnEw2lQ==,iv:/L/vG2znNlM35u4ZGM31bweT example_booleans: - ENC[AES256_GCM,data:GD3U7Q==,iv:ahTK9d6m8lQkjd2sS9Yo6V3EyFWoyEbeQG6Uke4hF40=,tag:rykfnfaLz39V+SJbomu5Zw==,type:bool] - ENC[AES256_GCM,data:hK/CtTQ=,iv:EFXdBumvMKdaXdd97vUBIMKIaw1rMfUt+/irkRZGc4Y=,tag:JofhZ5SS+jzRe6WJmP34Xg==,type:bool] +nix-cache-priv-key: ENC[AES256_GCM,data:ClVXffaK6MPQGAizjY7WcQ/PWmihkFgudLzVdWVnnp9R/GcgHjDB5RBBKqxa7pBlEM+Bvh6VrK/2AXxAC73JUhJxK44s7PaJBgBvdLk04c1abAgIT1idC0DL1izIbsGOqB+SweQ=,iv:KU2o20Vv0Ob3D+WIpJNRHCBd+FhuCKiGKaiTkGXJfKI=,tag:ZG+WF2YBeI+ZnCNIEWUXTQ==,type:str] +nix-cache-pub-key: ENC[AES256_GCM,data:E03CllQyoFO1/Ts6RCEuHZlHqLpd4OZ4nLDs+TlLEbY16mEBG3lFJnqAattmiJb41EjDUmiv1RqU,iv:SZbSMvRU2PC8/t4PS24EU9nVhYgrgKvJ0dfYTtW7YkM=,tag:5rmu6a0wPPkcB3JGnFF+7w==,type:str] plausible-admin-password: ENC[AES256_GCM,data:dC9olypZgMLdPOsmjthOaa/fMLtbGBlF9A==,iv:GU2ccj10TKQ0KW9b9X9AgYnvhS/wMVqYTyxr6Xt50Gk=,tag:ypQ0VtutVD8wgdfm40QZkw==,type:str] plausible-erlang-cookie: ENC[AES256_GCM,data:zhmC+D6EjIE8Rw91lIrMqY0QIazTX1e1jBzcZJP/76B9VvHWZ5bCkP1+KdfCY0lk3wIEq5vRfb8=,iv:RNNjlV3OFtXn1N0a5fEb/3FWzcHX19wtCLMdaVlKNJ0=,tag:8iU5oFVbzd0eMe5Mo1PiAw==,type:str] plausible-secret-key-base: ENC[AES256_GCM,data:ylakPGzY4S9640krl0fxYgm0Getf0+I7zthyTqTD/IpVhz5xgYBYx3Y2lSNa9Oi9yQ7+f9OdOBC6nc7n6MuUBg==,iv:YLPax/cRjMdIFti26gJd8COKr+3jXNZ7HCA5VvQVyAo=,tag:LHqYi590oEIp1IihLcFTtw==,type:str] nextcloud-admin-password: ENC[AES256_GCM,data:QaoSZyommeGED3nWNru92UVO2tjk24HE9fWX7ExYT101o4ZL411TmV1TXHSyfwjmE7yLIm1K/j4xpEbIY3zvFg==,iv:xC5EZVPHumVPOob5jiiXMFAmdFQcFSUPtZgioAgGDDs=,tag:Q/kY38XWkGsqcmCkd2lodg==,type:str] +netlify-ddns-password: ENC[AES256_GCM,data:mz9MS93ZPbtziwo56DP27q5ZgA1rgCptQpgTPrq2Ihc3KjSxSACJ6p6t8NjRPr4lSDLPzDa47OnRct/N4fcm5Q==,iv:upOh9S0wvTXBwfso3GhQzpl5befY0T0hTW/LGNcvv0k=,tag:/LNP0wIaxtExulV0blVkXA==,type:str] #ENC[AES256_GCM,data:IDauOj95sPt6LQkNWOaAV3AR7XPHJljX7Gef/IgtzC227ln7aKpVLCbhxD6pNTwd9/KhIXJp3vagCjfgkO/utA==,iv:Pn5jIPsFMBA2xnp3SUBgBug1NN8d3h3zy1pGVzO2hO0=,tag:NzhLA7nqE7SRRMV+rKgCjQ==,type:comment] -forgejo-runner.env: ENC[AES256_GCM,data:10wKRImXKS7ezcWnkwz7ak194snQ4wG8GBePeHXN1I23JfOvuD00427fOJ4jbCY=,iv:8jrmcXa2yqFTSf4fFnZXCuyGft90RzUO3S4rZGXaTDI=,tag:EGDqTK8GKBGfogkqkCODxg==,type:str] +forgejo-runner.env: ENC[AES256_GCM,data:x4EaDzK4W34ZEZ/Inakore2YABZf8e7TBBjoC6xTPZ9GBrSZCE85FOcHAmMXPDo=,iv:bNGOsLnhxnlC/opCKT1DSsGoWdmgJ8NgEPY3ySlN108=,tag:Ijp3qHBSdv6EDaZdomJhAA==,type:str] jland.env: ENC[AES256_GCM,data:u+QKwKWG9NFduuofhe3aatof3KoC0N4ZpNOD8E/7l0BTSoTe5Tqmz5/33EOcBUw99+YLFR4kTJwdUmLWHk4UD87aGsJ4liPCtXnBsToAzBGg0I3mhGQ/QM8iKXMW9oKb3ciapitQBuJa1WIp5/bHNtCXWQ==,iv:iZDET5EWM4DnAoQqLP9+Ll4S+mFHt2wZ3ENtN79Dbqw=,tag:qVpocN3FxlHfte2hAmtGPA==,type:str] dawncraft.env: ENC[AES256_GCM,data:8n1ymQZpMeVwTyoHhccV+W5diMLcsZw5zZQy4Z4eaMcLFk8ey3SeXkCf9+GnqpIU5xIZfCP1ZqeSxR03kJx3TPbQeBLZeN/QAYBxHOg/tjXIE6jdIGv0INkVLkExKPlvGN8F+ijwYkwgfqlhKPBf+Q==,iv:EMGlqUxcfvxqn1G1NohrAtJP/fLdolP++zcvaxIvVR4=,tag:1+ueIDCJTxmM586Z7i0aUA==,type:str] api.lyte.dev: ENC[AES256_GCM,data:14C5GQ41m/g7qHPzxlYoWjKWDOcm7MEDkuSofiuLfRNc/nji61t1eDbKX3d+SQL1UBchJFoBrWrUxnf0mUERhED1196z8vUq2jKEkcqKCAUS3soECInlb8zcxTcxaTFjYSjp1vUBdAn05AqLsF+hh9Bsm4fMQYjnHEZke9EmPZhuTlUdZa4eLv3+L3xAPHk2QIHQhdsjcTjGAZRMZOgTEcCvtGlb5pQuo11XmR2JzwzOXMC51WFDeOIWMAdO80yQBAdILso7rp1Nts/lwF0Bc9t7bNdHyoVTOA==,iv:jWGqUpXOTb/O972qXOqeX0EMFQLDKwaNHBqlpuGrZOk=,tag:uwB/jlAgESkLZ+vJ/OeV0A==,type:str] +restic-rascal-passphrase: ENC[AES256_GCM,data:yonKbBh4riGwxc/qcj8F/qrgAtA1sWhYejw9rdOTdCNW3a7zL/Ny1+XCI/P3bMOsY6UTmg/gxA2itp4cSbvqjg==,iv:5GwaEExn7b3dIkCVehLxaBXW+nUuSexY/bcqfCUwF5Q=,tag:dinyyw2XeVoSnw/IsYfK0w==,type:str] +restic-rascal-ssh-private-key: ENC[AES256_GCM,data:ddsOs0XsayyQI9qc6LzwQpdDnfwNpbj8PbBJ5fyuqtlVNYndeLxaYcbZI2ULSUhgR1tN0FS+ggGTHQhVvjwksNvpskUGHNKkSLKH3D/mn5N9tsoeAblN4gZsloZdqXBVzEehumcQMdhh6iy6NkNbuinKrVKDhLV25PrFKuSBEYw9VHU7HAMW5Tfop3RzBXjZWETCDAR2OQa7d1dXsJ0Kw6b9RFmRe5MGQ0J7YhjdTg26JGMMVSeHvr5UbiUJkGA5RvOLEDM2Dfai7Lf8yRPZVxUl+rdRsNvNYEoYGu5rGLUFcuqIbQ+s40dP2uXwWauwkIvHUjEahkbP0httj4Kg3qIJBRPg7OuS+MOwAnLEAs3hl5zeBV396yA9qjWW8nhnbml58/uFFbfXbJWTM3r8cMpFbHKD+Ojo/99fm5Vy3pAMzNzEsHOaT+iyDYyNkV5OH1GyKK9n7kIRLdqmWe7GmaKXlwVvNUPi3RvLX9VXq83a4BuupFyTmaNfPGMs/17830aleV674+QVgKh3VyFtuJy6KBpMXDv16wFo,iv:S2I3h6pmKLxEc29E0zn2b8lscqA//5/ZMTV9q+/tdvs=,tag:ALeCT+nrVPDfS21xC555sA==,type:str] +restic-ssh-priv-key-benland: ENC[AES256_GCM,data:G+uiYZTvqXhpJb66j6Q6S+otlXeRX0CdYeMHzSMjIbvbI0AVm0yCU7COO5/O8i47NpvrKKS1kVxVEK8ixLRUowkl3hgRXhxsBIPFnpkMD0ENmJttm4HOpi0qIWMwzPYTjkz/slY4HcTFnCfYy1ZpURQdWwZsr1EdAA05bUMTtM22R3uOMzjO8uf72PCWX7yffo8MxsLmWvNVAOhVlrb2H5KQNR/IquFK3TFoZitq5nVDG9tcEFkX+lgA3zsmCHU/2DvvodgeRoltaAFvgjVznNGf4e5p8owHUtSzX52HwGZRiUlMuhpre2gm1r73n8AyZe41II+LX/85fMfZDdyayIGv3AAMBib8H0/AoChexRcdLQEmzOgRrXsgucDJrWSWP6WMBVyamUm79m5ep0fvL1lJftuJqN0uuq9dBrispdso4x+6jk/pDf5pEM/FE6s1rY832BEb7q0PnjyvVogOez+cIihmMpDdnS0A/8TFzg29i3C+93x5vrt3k7atNzR/jN+/GqX2FKLzxWrrIw2d,iv:IP+N8JQu+XRvwTtBnxu54ujzU5UliltXG3mk9HfJaN8=,tag:4oinE9QMaSh8IfUd/ttM3Q==,type:str] sops: kms: [] gcp_kms: [] @@ -26,23 +33,23 @@ sops: - recipient: age1stdue5q5teskee057ced6rh9pzzr93xsy66w4sc3zu49rgxl7cjshztt45 enc: | -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBDOHpnQlJkTWlUNXlxNzVY - WkF4ci9hTzg3S0tJM2RZMGlIcC9nNlgrdjEwCjRvaDBpb1ZoOWNtNkE1NDVXQVJY - UGZyZ2FpalQyUlpSU056TFRpUXlBNTgKLS0tIFNCSWdiQ25yNDdsdUtlUGZLS0h1 - N3Z4NWRvcXN2a2xKMjlRM2lPZEhhekEKtolJt3EAZXlqq6UKV43Z2EJW4hkfZMJ8 - 06Se+Eim/PS3H1gjRdZ9SV45ghRmLy2OSMKTJxN78HFcJeDpp5CQnA== + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAybllGUkNqb3JBMFI2UHpL + R1BVNlRRSkFLYTJzUnRLZktNcVdJN1BPZVdFClNOQnRjOWh2Nk0xcHROQTlTQ1VF + RXJhdUpYS1hyZ2RHRnhrcU5IQ2VwakUKLS0tIFMyN3VNWkpmTTVkT05HK2hjbzBK + U1lZeUVmSHkxTjNsUVF4OGRYZklva2sKMKFZ0ohdeMGl3HamOjlccaFCrhtIpxGH + 44wCRW1zIjOLrieTbUba/ejdQoMb7GgSXBzHZqxy/sE4CvgHLS/iBw== -----END AGE ENCRYPTED FILE----- - - recipient: age1k8s590x34ghz7yrjyrgzkd24j252srf0mhfy34halp4frwr065csrlt2ev + - recipient: age1etv56f7kf78a55lxqtydrdd32dpmsjnxndf4u28qezxn6p7xt9esqvqdq7 enc: | -----BEGIN AGE ENCRYPTED FILE----- - YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBJTittdVRqRTRWSlBpRnpY - NmlIKzdoOFNxSnNoTFpwRVN3UGdJaHhRMldjCmRrRlo5V1luN0dabFBCWDhZaU9V - c05VeUxMQi9oM3czaDFFUEw3aHp4T1EKLS0tIHFqTVlXTnE5ZkoxRk9ESGo3MzAr - b0lTRjVCMU9ELzdvbFBJZ0tHbGtsYkEKLEcXCEikC3T3hfVOYKtWcNSGmfg28y+f - nGC4dQh9EciEbk1ZBbN3i6YSNULDoMSH172KBmRyt1ogr1ZPyCNqtg== + YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBQY3EyYUlMSmZYNlpZUU9u + QW5hL1RZajJ2MmJ6WTJ5YWVEZ0g4ZEMzcjFNCkNRSlRTT2FyTUlSMkVNNU1adjdL + dkpGS3ZwVDhlVkduVC82TlFiWHZ5RG8KLS0tIEgxcENVMS8rTFAzejE2bjJGOTIx + bGpacHFRSkJYUUMwOEh4cVBXZ1NESmsKa5EhZ7148ojCqZldukLcPLr93HqnpNgq + rMI0Nyz4Z4lkTVMRpA94zyNTkNwJ02/CYcKi8EJi6jGZnNPUTcnTwg== -----END AGE ENCRYPTED FILE----- - lastmodified: "2024-07-24T16:34:28Z" - mac: ENC[AES256_GCM,data:/zOixu65MHMRj5hermm6mmkpS5q97yEwALP+LwC6j9NIXxL2nIFB+jqQtiyMwlErB1Vf5cZvH3PA1sOqHnPOsv5p58S5Ww7eIHb4ElPXufGLqhA6sTiz1RrlWwUqtDtR42V3kql6Hro57PXV+NZ6NEnvzHKct9S30OCOWWtGwTs=,iv:JTF5u4rva9PgLAG2ysTz+pA4wTRq5WJR7xJZNGbciUA=,tag:0X0NlvxBoaELANxp/vwnnw==,type:str] + lastmodified: "2024-09-06T21:22:57Z" + mac: ENC[AES256_GCM,data:suoBGuZnfZpo55g+sq6MXDvecwhhWRS9gtTlCvnWmSvWT+K8TFXHcz9cLZT5U2N4ueSYJovRoKPoAv9rKgtLHSSg+JKI0b0cErQge75970bTbeMKMl+SJmYF0T0ht5+8n5zjhnQjVo2mHmJJI1IekumsoNJ9+F6USPBidiK0uNU=,iv:7dMsEnXylvn0vVfmU9pQt1BgrqfKdSyoBbNTUZ782Uo=,tag:E3u9LVcdTKa7mjAxQ/m9rw==,type:str] pgp: [] unencrypted_suffix: _unencrypted - version: 3.8.1 + version: 3.9.0