2023-10-12 22:54:05 -05:00
/*
if ur fans get loud :
# enable manual fan control
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x01 0x00
# set fan speed to last byte as decimal
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x02 0xff 0x00
* /
2023-10-03 11:52:44 -05:00
{
2024-02-21 21:14:46 -06:00
# inputs,
2024-02-16 16:52:58 -06:00
# outputs,
2024-02-21 22:15:41 -06:00
lib ,
2024-02-21 20:39:10 -06:00
config ,
2023-10-03 11:52:44 -05:00
pkgs ,
. . .
2024-03-13 21:12:14 -05:00
} : {
2024-09-04 10:31:06 -05:00
system . stateVersion = " 2 4 . 0 5 " ;
2024-07-29 10:01:59 -05:00
home-manager . users . daniel . home . stateVersion = " 2 4 . 0 5 " ;
2024-03-13 21:12:14 -05:00
networking . hostName = " b e e f c a k e " ;
2023-11-02 13:14:43 -05:00
2024-03-13 21:12:14 -05:00
imports = [
{
2024-09-11 11:57:27 -05:00
# hardware and boot module
2024-09-06 08:39:30 -05:00
networking . hostId = " 5 4 1 e d e 5 5 " ;
2024-03-13 21:12:14 -05:00
boot = {
2024-09-06 08:39:30 -05:00
zfs = {
extraPools = [ " z s t o r a g e " ] ;
} ;
supportedFilesystems = {
zfs = true ;
} ;
initrd . supportedFilesystems = {
zfs = true ;
} ;
kernelPackages = config . boot . zfs . package . latestCompatibleLinuxPackages ;
2024-09-03 20:03:24 -05:00
initrd . availableKernelModules = [ " e h c i _ p c i " " m p t 3 s a s " " u s b h i d " " s d _ m o d " ] ;
2024-03-13 21:12:14 -05:00
kernelModules = [ " k v m - i n t e l " ] ;
2024-09-11 11:57:27 -05:00
kernelParams = [ " n o h i b e r n a t e " ] ;
2024-03-13 21:12:14 -05:00
loader . systemd-boot . enable = true ;
loader . efi . canTouchEfiVariables = true ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / " = {
2024-09-03 20:03:24 -05:00
device = " / d e v / d i s k / b y - u u i d / 9 9 2 c e 5 5 c - 7 5 0 7 - 4 d 6 b - 9 3 8 c - 4 5 b 7 e 8 9 1 f 3 9 5 " ;
2024-03-13 21:12:14 -05:00
fsType = " e x t 4 " ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / b o o t " = {
2024-09-03 20:03:24 -05:00
device = " / d e v / d i s k / b y - u u i d / B 6 C 4 - 7 C F 4 " ;
2024-03-13 21:12:14 -05:00
fsType = " v f a t " ;
2024-09-03 20:03:24 -05:00
options = [ " f m a s k = 0 0 2 2 " " d m a s k = 0 0 2 2 " ] ;
2024-03-13 21:12:14 -05:00
} ;
2023-09-05 21:46:55 -05:00
2024-09-06 08:39:30 -05:00
# should be mounted by auto-import; see boot.zfs.extraPools
2024-09-03 20:03:24 -05:00
# fileSystems."/storage" = {
2024-09-06 08:39:30 -05:00
# device = "zstorage/storage";
# fsType = "zfs";
2024-09-03 20:03:24 -05:00
# };
2024-09-06 08:39:30 -05:00
fileSystems . " / n i x " = {
device = " z s t o r a g e / n i x " ;
fsType = " z f s " ;
} ;
services . zfs . autoScrub . enable = true ;
services . zfs . autoSnapshot . enable = true ;
# TODO: nfs with zfs?
# services.nfs.server.enable = true;
2024-03-13 21:12:14 -05:00
}
2024-09-11 11:57:27 -05:00
( {
options ,
config ,
. . .
} : let
inherit ( lib ) mkOption types ;
in {
options . services . restic . commonPaths = mkOption {
type = types . nullOr ( types . listOf types . str ) ;
default = [ ] ;
description = ''
Which paths to backup , in addition to ones specified via
` dynamicFilesFrom ` . If null or an empty array and
` dynamicFilesFrom ` is also null , no backup command will be run .
This can be used to create a prune-only job .
'' ;
example = [
" / v a r / l i b / p o s t g r e s q l "
" / h o m e / u s e r / b a c k u p "
] ;
} ;
} )
2024-09-04 10:31:06 -05:00
{
2024-09-11 11:57:27 -05:00
# sops secrets config
2024-03-13 21:12:14 -05:00
sops = {
defaultSopsFile = ../secrets/beefcake/secrets.yml ;
age = {
sshKeyPaths = [ " / e t c / s s h / s s h _ h o s t _ e d 2 5 5 1 9 _ k e y " ] ;
keyFile = " / v a r / l i b / s o p s - n i x / k e y . t x t " ;
generateKey = true ;
} ;
} ;
}
2024-09-06 15:34:18 -05:00
{
2024-09-11 11:57:27 -05:00
sops . secrets = {
netlify-ddns-password = { mode = " 0 4 0 0 " ; } ;
} ;
2024-09-06 15:34:18 -05:00
services . deno-netlify-ddns-client = {
passwordFile = config . sops . secrets . netlify-ddns-password . path ;
} ;
}
2024-03-13 21:12:14 -05:00
{
2024-03-28 13:10:51 -05:00
# nix binary cache
2024-09-11 11:57:27 -05:00
sops . secrets = {
nix-cache-priv-key = { mode = " 0 4 0 0 " ; } ;
} ;
2024-03-13 21:12:14 -05:00
services . nix-serve = {
2024-09-06 15:35:50 -05:00
enable = true ; # TODO: true
2024-09-06 15:34:18 -05:00
secretKeyFile = config . sops . secrets . nix-cache-priv-key . path ;
2024-03-13 21:12:14 -05:00
} ;
services . caddy . virtualHosts . " n i x . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . nix-serve . port }
'' ;
} ;
networking . firewall . allowedTCPPorts = [
80
443
] ;
2024-03-28 13:10:51 -05:00
# regularly build this flake so we have stuff in the cache
2024-03-28 13:12:00 -05:00
# TODO: schedule this for nightly builds instead of intervals based on boot time
2024-09-06 16:32:10 -05:00
systemd . timers . " b u i l d - l y t e d e v - f l a k e " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 3 0 m " ; # 30 minutes after booting
OnUnitActiveSec = " 1 d " ; # every day afterwards
Unit = " b u i l d - l y t e d e v - f l a k e . s e r v i c e " ;
} ;
} ;
2024-03-28 13:10:51 -05:00
2024-09-06 16:32:10 -05:00
systemd . services . " b u i l d - l y t e d e v - f l a k e " = {
2024-09-11 11:57:27 -05:00
# TODO: might want to add root for the most recent results?
2024-09-06 16:32:10 -05:00
script = ''
# build self (main server) configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git - - accept-flake-config
# build desktop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #dragon --accept-flake-config
# build main laptop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #foxtrot --accept-flake-config
'' ;
path = with pkgs ; [ openssh git nixos-rebuild ] ;
serviceConfig = {
# TODO: mkdir -p...?
WorkingDirectory = " / h o m e / d a n i e l / . h o m e / n i g h t l y - f l a k e - b u i l d s " ;
Type = " o n e s h o t " ;
2024-09-11 11:57:27 -05:00
User = " d a n i e l " ;
2024-09-06 16:32:10 -05:00
} ;
} ;
2024-03-30 07:50:23 -05:00
networking = {
extraHosts = ''
: : 1 nix . h . lyte . dev
127 .0 .0 .1 nix . h . lyte . dev
2024-03-13 21:12:14 -05:00
'' ;
} ;
}
2024-09-06 16:36:53 -05:00
{
services . headscale = {
2024-09-11 11:57:27 -05:00
enable = false ; # TODO: setup headscale?
2024-09-06 16:36:53 -05:00
address = " 1 2 7 . 0 . 0 . 1 " ;
port = 7777 ;
settings = {
server_url = " h t t p s : / / t a i l s c a l e . v p n . h . l y t e . d e v " ;
db_type = " s q l i t e 3 " ;
db_path = " / v a r / l i b / h e a d s c a l e / d b . s q l i t e " ;
derp . server = {
enable = true ;
region_id = 999 ;
stun_listen_addr = " 0 . 0 . 0 . 0 : 3 4 7 8 " ;
} ;
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
dns_config = {
magic_dns = true ;
base_domain = " v p n . h . l y t e . d e v " ;
domains = [
" t s . v p n . h . l y t e . d e v "
] ;
nameservers = [
" 1 . 1 . 1 . 1 "
# "192.168.0.1"
] ;
override_local_dns = true ;
} ;
} ;
} ;
services . caddy . virtualHosts . " t a i l s c a l e . v p n . h . l y t e . d e v " = lib . mkIf config . services . headscale . enable {
extraConfig = ''
reverse_proxy http://localhost:$ { toString config . services . headscale . port }
'' ;
} ;
networking . firewall . allowedUDPPorts = lib . mkIf config . services . headscale . enable [ 3478 ] ;
}
2024-03-13 21:12:14 -05:00
{
2024-09-11 11:57:27 -05:00
# TODO: I think I need to setup my account? wondering if this can be done in nix as well
services . restic . commonPaths = [ " / v a r / l i b / s o j u " " / v a r / l i b / p r i v a t e / s o j u " ] ;
2024-03-13 21:12:14 -05:00
services . soju = {
enable = true ;
listen = [ " i r c + i n s e c u r e : / / : 6 6 6 7 " ] ;
} ;
networking . firewall . allowedTCPPorts = [
6667
] ;
}
2024-09-03 20:03:24 -05:00
# {
# # samba
# users.users.guest = {
# # used for anonymous samba access
# isSystemUser = true;
# group = "users";
# createHome = true;
# };
# users.users.scannerupload = {
# # used for scanner samba access
# isSystemUser = true;
# group = "users";
# createHome = true;
# };
# systemd.tmpfiles.rules = [
# "d /var/spool/samba 1777 root root -"
# ];
# services.samba-wsdd = {
# enable = true;
# };
# services.samba = {
# enable = true;
# openFirewall = true;
# securityType = "user";
# # not needed since I don't think I use printer sharing?
# # https://nixos.wiki/wiki/Samba#Printer_sharing
# # package = pkgs.sambaFull; # broken last I checked in nixpkgs?
# extraConfig = ''
# workgroup = WORKGROUP
# server string = beefcake
# netbios name = beefcake
# security = user
# #use sendfile = yes
# #max protocol = smb2
# # note: localhost is the ipv6 localhost ::1
# hosts allow = 100.64.0.0/10 192.168.0.0/16 127.0.0.1 localhost
# hosts deny = 0.0.0.0/0
# guest account = guest
# map to guest = never
# # load printers = yes
# # printing = cups
# # printcap name = cups
# '';
# shares = {
# libre = {
# path = "/storage/libre";
# browseable = "yes";
# "read only" = "no";
# "guest ok" = "yes";
# "create mask" = "0666";
# "directory mask" = "0777";
# # "force user" = "nobody";
# # "force group" = "users";
# };
# public = {
# path = "/storage/public";
# browseable = "yes";
# "read only" = "no";
# "guest ok" = "yes";
# "create mask" = "0664";
# "directory mask" = "0775";
# # "force user" = "nobody";
# # "force group" = "users";
# };
# family = {
# path = "/storage/family";
# browseable = "yes";
# "read only" = "no";
# "guest ok" = "no";
# "create mask" = "0660";
# "directory mask" = "0770";
# # "force user" = "nobody";
# # "force group" = "family";
# };
# scannerdocs = {
# path = "/storage/scannerdocs";
# browseable = "yes";
# "read only" = "no";
# "guest ok" = "no";
# "create mask" = "0600";
# "directory mask" = "0700";
# "valid users" = "scannerupload";
# "force user" = "scannerupload";
# "force group" = "users";
# };
# daniel = {
# path = "/storage/daniel";
# browseable = "yes";
# "read only" = "no";
# "guest ok" = "no";
# "create mask" = "0600";
# "directory mask" = "0700";
# # "force user" = "daniel";
# # "force group" = "users";
# };
# # printers = {
# # comment = "All Printers";
# # path = "/var/spool/samba";
# # public = "yes";
# # browseable = "yes";
# # # to allow user 'guest account' to print.
# # "guest ok" = "yes";
# # writable = "no";
# # printable = "yes";
# # "create mode" = 0700;
# # };
# };
# };
# }
2024-03-13 21:12:14 -05:00
{
2024-09-11 11:57:27 -05:00
# nextcloud
# TODO: investigate https://carlosvaz.com/posts/the-holy-grail-nextcloud-setup-made-easy-by-nixos/
2024-09-06 16:44:15 -05:00
# services.postgresql = {
# ensureDatabases = [
# "nextcloud"
# ];
# ensureUsers = [
# {
# name = "nextcloud";
# ensureDBOwnership = true;
# }
# ];
# };
2024-03-13 21:12:14 -05:00
# nextcloud
# users.users.nextcloud = {
# isSystemUser = true;
# createHome = false;
# group = "nextcloud";
# };
}
2024-09-06 16:44:15 -05:00
{
# plausible
2024-09-11 11:57:27 -05:00
services . postgresql = {
ensureDatabases = [ " p l a u s i b l e " ] ;
ensureUsers = [
{
name = " p l a u s i b l e " ;
ensureDBOwnership = true ;
}
] ;
} ;
users . users . plausible = {
isSystemUser = true ;
createHome = false ;
group = " p l a u s i b l e " ;
} ;
users . extraGroups = {
" p l a u s i b l e " = { } ;
} ;
services . plausible = {
enable = true ;
database = {
clickhouse . setup = true ;
postgres = {
setup = false ;
dbname = " p l a u s i b l e " ;
} ;
} ;
server = {
baseUrl = " h t t p s : / / a . l y t e . d e v " ;
disableRegistration = true ;
port = 8899 ;
secretKeybaseFile = config . sops . secrets . plausible-secret-key-base . path ;
} ;
adminUser = {
activate = false ;
email = " d a n i e l @ l y t e . d e v " ;
passwordFile = config . sops . secrets . plausible-admin-password . path ;
} ;
} ;
sops . secrets = {
plausible-secret-key-base = {
owner = " p l a u s i b l e " ;
group = " p l a u s i b l e " ;
} ;
plausible-admin-password = {
owner = " p l a u s i b l e " ;
group = " p l a u s i b l e " ;
} ;
} ;
systemd . services . plausible = {
serviceConfig . User = " p l a u s i b l e " ;
serviceConfig . Group = " p l a u s i b l e " ;
} ;
services . caddy . virtualHosts . " a . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . plausible . server . port }
'' ;
} ;
}
{
# clickhouse
environment . etc = {
" c l i c k h o u s e - s e r v e r / u s e r s . d / d i s a b l e - l o g g i n g - q u e r y . x m l " = {
text = ''
<clickhouse>
<profiles>
<default>
<log_queries> 0 < /log_queries >
<log_query_threads> 0 < /log_query_threads >
< /default >
< /profiles >
< /clickhouse >
'' ;
} ;
" c l i c k h o u s e - s e r v e r / c o n f i g . d / r e d u c e - l o g g i n g . x m l " = {
text = ''
<clickhouse>
<logger>
<level> warning < /level >
<console> true < /console >
< /logger >
< query_thread_log remove = " r e m o v e " / >
< query_log remove = " r e m o v e " / >
< text_log remove = " r e m o v e " / >
< trace_log remove = " r e m o v e " / >
< metric_log remove = " r e m o v e " / >
< asynchronous_metric_log remove = " r e m o v e " / >
< session_log remove = " r e m o v e " / >
< part_log remove = " r e m o v e " / >
< /clickhouse >
'' ;
} ;
} ;
services . restic . commonPaths = [
# "/var/lib/clickhouse"
] ;
}
{
# family storage
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - f a m i l y " = {
2024-09-11 11:57:27 -05:00
" / s t o r a g e / f a m i l y " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " r o o t " ;
group = " f a m i l y " ;
} ;
} ;
" / s t o r a g e / f a m i l y / v a l e r i e " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " v a l e r i e " ;
group = " f a m i l y " ;
} ;
} ;
" / s t o r a g e / f a m i l y / d a n i e l " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " d a n i e l " ;
group = " f a m i l y " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
" / s t o r a g e / f a m i l y "
] ;
2024-09-06 16:44:15 -05:00
}
2024-03-13 21:12:14 -05:00
{
# daniel augments
2024-09-11 11:57:27 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - d a n i e l " = {
2024-09-11 11:57:27 -05:00
" / s t o r a g e / d a n i e l " = {
" d " = {
mode = " 0 7 0 0 " ;
user = " d a n i e l " ;
group = " n o g r o u p " ;
} ;
} ;
" / s t o r a g e / d a n i e l / c r i t i c a l " = {
" d " = {
mode = " 0 7 0 0 " ;
user = " d a n i e l " ;
group = " n o g r o u p " ;
} ;
} ;
} ;
} ;
2024-03-13 21:12:14 -05:00
users . groups . daniel . members = [ " d a n i e l " ] ;
users . groups . nixadmin . members = [ " d a n i e l " ] ;
users . users . daniel = {
extraGroups = [
2024-09-03 20:03:24 -05:00
# "nixadmin" # write access to /etc/nixos/ files
2024-03-13 21:12:14 -05:00
" w h e e l " # sudo access
2024-09-03 20:03:24 -05:00
" c a d d y " # write access to public static files
2024-03-13 21:12:14 -05:00
" u s e r s " # general users group
2024-09-03 20:03:24 -05:00
" j e l l y f i n " # write access to jellyfin files
" a u d i o b o o k s h e l f " # write access to audiobookshelf files
" f l a n i l l a " # minecraft server manager
2024-09-06 16:15:58 -05:00
" f o r g e j o "
2024-03-13 21:12:14 -05:00
] ;
2024-08-13 14:35:09 -05:00
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / d a n i e l "
] ;
2024-09-06 16:36:53 -05:00
services . postgresql = {
ensureDatabases = [ " d a n i e l " ] ;
ensureUsers = [
{
name = " d a n i e l " ;
ensureDBOwnership = true ;
}
] ;
} ;
2024-08-13 14:35:09 -05:00
}
2024-09-06 16:36:53 -05:00
{
systemd . tmpfiles . settings = {
" 1 0 - j e l l y f i n " = {
" / s t o r a g e / j e l l y f i n " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / m o v i e s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / t v " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / m u s i c " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
services . jellyfin = {
enable = true ;
openFirewall = false ;
# uses port 8096 by default, configurable from admin UI
} ;
services . caddy . virtualHosts . " v i d e o . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 8 0 9 6 '' ;
} ;
# NOTE: this server's xeon chips DO NOT seem to support quicksync or graphics in general
# but I can probably throw in a crappy GPU (or a big, cheap ebay GPU for ML
# stuff, too?) and get good transcoding performance
# jellyfin hardware encoding
# hardware.graphics = {
# enable = true;
# extraPackages = with pkgs; [
# intel-media-driver
# vaapiIntel
# vaapiVdpau
# libvdpau-va-gl
# intel-compute-runtime
# ];
# };
# nixpkgs.config.packageOverrides = pkgs: {
# vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
# };
}
{
2024-09-06 16:44:15 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - p o s t g r e s " = {
2024-09-06 16:44:15 -05:00
" / s t o r a g e / p o s t g r e s " = {
" d " = {
2024-09-11 12:03:55 -05:00
mode = " 0 7 5 0 " ;
2024-09-06 16:44:15 -05:00
user = " p o s t g r e s " ;
group = " p o s t g r e s " ;
} ;
} ;
} ;
} ;
2024-09-06 16:36:53 -05:00
services . postgresql = {
enable = true ;
dataDir = " / s t o r a g e / p o s t g r e s " ;
enableTCPIP = true ;
package = pkgs . postgresql_15 ;
# https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
2024-09-11 11:57:27 -05:00
# TODO: give the "daniel" user access to all databases
2024-09-06 16:36:53 -05:00
authentication = pkgs . lib . mkOverride 10 ''
#type database user auth-method auth-options
local all postgres peer map = superuser_map
local all daniel peer map = superuser_map
local sameuser all peer map = superuser_map
# lan ipv4
host all daniel 192.168.0.0/16 trust
host all daniel 10.0.0.0/24 trust
# tailnet ipv4
host all daniel 100.64.0.0/10 trust
'' ;
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
identMap = ''
# map system_user db_user
superuser_map root postgres
superuser_map postgres postgres
superuser_map daniel postgres
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
# Let other names login as themselves
superuser_map / ^ ( . * ) $ \ 1
'' ;
} ;
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
services . postgresqlBackup = {
enable = true ;
backupAll = true ;
2024-09-11 11:57:27 -05:00
compression = " n o n e " ; # hoping for restic deduplication here?
2024-09-06 16:36:53 -05:00
location = " / s t o r a g e / p o s t g r e s - b a c k u p s " ;
startAt = " * - * - * 0 3 : 0 0 : 0 0 " ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / p o s t g r e s - b a c k u p s "
] ;
2024-09-06 16:36:53 -05:00
}
2024-09-11 11:57:27 -05:00
{
# friends
users . users . ben = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I K U f L Z + I X 8 5 p 9 3 5 5 P o 2 z P 1 H 2 t A x i E 0 r E 6 I Y b 8 S f + e F 9 T b e n @ b e n h a n y . c o m "
] ;
} ;
2024-09-03 20:03:24 -05:00
2024-09-11 11:57:27 -05:00
users . users . alan = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
# openssh.authorizedKeys.keys = [];
} ;
}
2024-09-06 15:34:18 -05:00
{
2024-09-11 11:57:27 -05:00
# restic backups
sops . secrets = {
restic-ssh-priv-key-benland = { mode = " 0 4 0 0 " ; } ;
restic-rascal-passphrase = {
mode = " 0 4 0 0 " ;
} ;
restic-rascal-ssh-private-key = {
mode = " 0 4 0 0 " ;
2024-09-06 15:34:18 -05:00
} ;
} ;
users . groups . restic = { } ;
users . users . restic = {
# used for other machines to backup to
isSystemUser = true ;
group = " r e s t i c " ;
openssh . authorizedKeys . keys = [ ] ++ config . users . users . daniel . openssh . authorizedKeys . keys ;
} ;
2024-09-11 12:03:55 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - b a c k u p s - l o c a l " = {
2024-09-11 12:03:55 -05:00
" / s t o r a g e / b a c k u p s / l o c a l " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " r o o t " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . backups = let
defaults = {
2024-09-11 13:31:07 -05:00
passwordFile = config . sops . secrets . restic-rascal-passphrase . path ;
2024-09-11 11:57:27 -05:00
paths =
config . services . restic . commonPaths
++ [
] ;
initialize = true ;
exclude = [ ] ;
timerConfig = {
OnCalendar = [ " 0 4 : 4 5 " " 1 7 : 4 5 " ] ;
} ;
} ;
in {
local =
defaults
// {
repository = " / s t o r a g e / b a c k u p s / l o c a l " ;
} ;
rascal =
defaults
// {
extraOptions = [
" s f t p . c o m m a n d = ' s s h b e e f c a k e @ r a s c a l - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / b e e f c a k e @ r a s c a l : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
# TODO: add ruby?
benland =
defaults
// {
passwordFile = config . sops . secrets . restic-ssh-priv-key-benland . path ;
extraOptions = [
" s f t p . c o m m a n d = ' s s h d a n i e l @ n . b e n h a n e y . c o m - p 1 0 0 2 2 - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / d a n i e l @ n . b e n h a n e y . c o m : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
} ;
2024-09-06 15:34:18 -05:00
}
{
systemd . tmpfiles . settings = {
" 1 0 - c a d d y " = {
" / s t o r a g e / f i l e s . l y t e . d e v " = {
" d " = {
mode = " 2 7 7 5 " ;
user = " r o o t " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / f i l e s . l y t e . d e v "
] ;
2024-09-06 15:34:18 -05:00
services . caddy = {
# TODO: 502 and other error pages
enable = true ;
email = " d a n i e l @ l y t e . d e v " ;
adapter = " c a d d y f i l e " ;
virtualHosts = {
" f i l e s . l y t e . d e v " = {
# TODO: customize the files.lyte.dev template?
extraConfig = ''
header {
Access-Control-Allow-Origin " { h t t p . r e q u e s t . h e a d e r . O r i g i n } "
Access-Control-Allow-Credentials true
Access-Control-Allow-Methods *
Access-Control-Allow-Headers *
Vary Origin
defer
}
file_server browse {
# browse template
# hide .*
root /storage/files.lyte.dev
}
'' ;
} ;
} ;
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
} ;
}
2024-09-06 15:39:26 -05:00
{
systemd . tmpfiles . settings = {
2024-09-06 16:15:58 -05:00
" 1 0 - f o r g e j o " = {
2024-09-06 15:39:26 -05:00
" / s t o r a g e / f o r g e j o " = {
" d " = {
mode = " 0 7 0 0 " ;
user = " f o r g e j o " ;
group = " n o g r o u p " ;
} ;
} ;
} ;
} ;
services . forgejo = {
2024-09-06 16:15:58 -05:00
enable = true ;
2024-09-06 15:39:26 -05:00
stateDir = " / s t o r a g e / f o r g e j o " ;
settings = {
DEFAULT = {
APP_NAME = " g i t . l y t e . d e v " ;
} ;
server = {
ROOT_URL = " h t t p s : / / g i t . l y t e . d e v " ;
HTTP_ADDR = " 1 2 7 . 0 . 0 . 1 " ;
HTTP_PORT = 3088 ;
DOMAIN = " g i t . l y t e . d e v " ;
} ;
actions = {
ENABLED = true ;
} ;
service = {
DISABLE_REGISTRATION = true ;
} ;
session = {
COOKIE_SECURE = true ;
} ;
log = {
# LEVEL = "Debug";
} ;
ui = {
THEMES = " f o r g e j o - a u t o , f o r g e j o - l i g h t , f o r g e j o - d a r k " ;
DEFAULT_THEME = " f o r g e j o - a u t o " ;
} ;
indexer = {
REPO_INDEXER_ENABLED = " t r u e " ;
REPO_INDEXER_PATH = " i n d e x e r s / r e p o s . b l e v e " ;
MAX_FILE_SIZE = " 1 0 4 8 5 7 6 " ;
# REPO_INDEXER_INCLUDE =
REPO_INDEXER_EXCLUDE = " r e s o u r c e s / b i n / * * " ;
} ;
} ;
lfs = {
enable = true ;
} ;
dump = {
2024-09-11 11:57:27 -05:00
enable = false ;
2024-09-06 15:39:26 -05:00
} ;
database = {
# TODO: move to postgres?
type = " s q l i t e 3 " ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
config . services . forgejo . stateDir
] ;
sops . secrets = {
" f o r g e j o - r u n n e r . e n v " = { mode = " 0 4 0 0 " ; } ;
} ;
systemd . services . gitea-runner-beefcake . after = [ " s o p s - n i x . s e r v i c e " ] ;
2024-09-06 15:39:26 -05:00
services . gitea-actions-runner = {
# TODO: simple git-based automation would be dope? maybe especially for
# mirroring to github super easy?
package = pkgs . forgejo-runner ;
instances . " b e e f c a k e " = {
2024-09-06 16:26:53 -05:00
enable = true ;
2024-09-06 15:39:26 -05:00
name = " b e e f c a k e " ;
url = " h t t p s : / / g i t . l y t e . d e v " ;
settings = {
container = {
# use the shared network which is bridged by default
# this lets us hit git.lyte.dev just fine
network = " p o d m a n " ;
} ;
} ;
labels = [
# type ":host" does not depend on docker/podman/lxc
" p o d m a n "
" n i x : d o c k e r : / / g i t . l y t e . d e v / l y t e d e v / n i x : l a t e s t "
" b e e f c a k e : h o s t "
" n i x o s - h o s t : h o s t "
] ;
tokenFile = config . sops . secrets . " f o r g e j o - r u n n e r . e n v " . path ;
hostPackages = with pkgs ; [
nix
bash
coreutils
curl
gawk
gitMinimal
gnused
nodejs
gnutar # needed for cache action
wget
] ;
} ;
} ;
# environment.systemPackages = with pkgs; [nodejs];
services . caddy . virtualHosts . " g i t . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
'' ;
} ;
services . caddy . virtualHosts . " h t t p : / / g i t . b e e f c a k e . l a n " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
'' ;
} ;
}
2024-09-06 16:05:29 -05:00
{
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
config . services . vaultwarden . backupDir
] ;
2024-09-06 16:05:29 -05:00
services . vaultwarden = {
enable = true ;
2024-09-11 11:57:27 -05:00
backupDir = " / s t o r a g e / v a u l t w a r d e n / b a c k u p s " ;
2024-09-06 16:05:29 -05:00
config = {
DOMAIN = " h t t p s : / / b w . l y t e . d e v " ;
SIGNUPS_ALLOWED = " f a l s e " ;
ROCKET_ADDRESS = " 1 2 7 . 0 . 0 . 1 " ;
ROCKET_PORT = 8222 ;
} ;
} ;
services . caddy . virtualHosts . " b w . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . vaultwarden . config . ROCKET_PORT } '' ;
} ;
}
2024-09-06 16:44:15 -05:00
{
services . postgresql = {
ensureDatabases = [ " a t u i n " ] ;
ensureUsers = [
{
name = " a t u i n " ;
ensureDBOwnership = true ;
}
] ;
} ;
services . atuin = {
enable = true ;
database = {
createLocally = false ;
2024-09-11 11:57:27 -05:00
# NOTE: this uses postgres over the unix domain socket by default
2024-09-06 16:57:30 -05:00
# uri = "postgresql://atuin@localhost:5432/atuin";
2024-09-06 16:44:15 -05:00
} ;
openRegistration = false ;
} ;
services . caddy . virtualHosts . " a t u i n . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . atuin . port } '' ;
} ;
}
2024-09-03 20:03:24 -05:00
# {
# # jland minecraft server
# users.groups.jland = {
# gid = 982;
# };
# users.users.jland = {
# uid = 986;
# isSystemUser = true;
# createHome = false;
# group = "jland";
# };
# virtualisation.oci-containers.containers.minecraft-jland = {
# autoStart = false;
# # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
# image = "docker.io/itzg/minecraft-server";
# # user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}";
# extraOptions = [
# "--tty"
# "--interactive"
# ];
# environment = {
# EULA = "true";
# # UID = toString config.users.users.jland.uid;
# # GID = toString config.users.groups.jland.gid;
# STOP_SERVER_ANNOUNCE_DELAY = "20";
# TZ = "America/Chicago";
# VERSION = "1.20.1";
# MEMORY = "8G";
# MAX_MEMORY = "16G";
# TYPE = "FORGE";
# FORGE_VERSION = "47.1.3";
# ALLOW_FLIGHT = "true";
# ENABLE_QUERY = "true";
# MODPACK = "/data/origination-files/Server-Files-0.2.14.zip";
# # TYPE = "AUTO_CURSEFORGE";
# # CF_SLUG = "monumental-experience";
# # CF_FILE_ID = "4826863"; # 2.2.53
# # due to
# # Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory.
# # we must upload manually
# # CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip";
# # ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time
# # May also have mod/loader incompatibilities?
# # https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/
# };
# environmentFiles = [
# # config.sops.secrets."jland.env".path
# ];
# ports = ["26965:25565"];
# volumes = [
# "/storage/jland/data:/data"
# "/storage/jland/worlds:/worlds"
# ];
# };
# networking.firewall.allowedTCPPorts = [
# 26965
# ];
# }
# {
# # dawncraft minecraft server
# systemd.tmpfiles.rules = [
# "d /storage/dawncraft/ 0770 1000 1000 -"
# "d /storage/dawncraft/data/ 0770 1000 1000 -"
# "d /storage/dawncraft/worlds/ 0770 1000 1000 -"
# "d /storage/dawncraft/downloads/ 0770 1000 1000 -"
# ];
# virtualisation.oci-containers.containers.minecraft-dawncraft = {
# autoStart = false;
# # sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
# image = "docker.io/itzg/minecraft-server";
# extraOptions = [
# "--tty"
# "--interactive"
# ];
# environment = {
# EULA = "true";
# STOP_SERVER_ANNOUNCE_DELAY = "20";
# TZ = "America/Chicago";
# VERSION = "1.18.2";
# MEMORY = "8G";
# MAX_MEMORY = "32G";
# ALLOW_FLIGHT = "true";
# ENABLE_QUERY = "true";
# SERVER_PORT = "26968";
# QUERY_PORT = "26968";
# TYPE = "AUTO_CURSEFORGE";
# CF_SLUG = "dawn-craft";
# CF_EXCLUDE_MODS = "368398";
# CF_FORCE_SYNCHRONIZE = "true";
# # CF_FILE_ID = "5247696"; # 2.0.7 server
# };
# environmentFiles = [
# config.sops.secrets."dawncraft.env".path
# ];
# ports = ["26968:26968/tcp" "26968:26968/udp"];
# volumes = [
# "/storage/dawncraft/data:/data"
# "/storage/dawncraft/worlds:/worlds"
# "/storage/dawncraft/downloads:/downloads"
# ];
# };
# networking.firewall.allowedTCPPorts = [
# 26968
# ];
# }
# {
2024-09-06 16:57:30 -05:00
# # flanilla family minecraft server
# users.groups.flanilla = {};
# users.users.flanilla = {
# isSystemUser = true;
# createHome = false;
# group = "flanilla";
# };
2024-09-03 20:03:24 -05:00
# virtualisation.oci-containers.containers.minecraft-flanilla = {
# autoStart = true;
# image = "docker.io/itzg/minecraft-server";
# user = "${toString config.users.users.flanilla.uid}:${toString config.users.groups.flanilla.gid}";
# extraOptions = ["--tty" "--interactive"];
# environment = {
# EULA = "true";
# UID = toString config.users.users.flanilla.uid;
# GID = toString config.users.groups.flanilla.gid;
# STOP_SERVER_ANNOUNCE_DELAY = "20";
# TZ = "America/Chicago";
# VERSION = "1.20.4";
# OPS = "lytedev";
# MODE = "creative";
# DIFFICULTY = "peaceful";
# ONLINE_MODE = "false";
# MEMORY = "8G";
# MAX_MEMORY = "16G";
# ALLOW_FLIGHT = "true";
# ENABLE_QUERY = "true";
# ENABLE_COMMAND_BLOCK = "true";
# };
# environmentFiles = [
# # config.sops.secrets."flanilla.env".path
# ];
# ports = ["26966:25565"];
# volumes = [
# "/storage/flanilla/data:/data"
# "/storage/flanilla/worlds:/worlds"
# ];
# };
# networking.firewall.allowedTCPPorts = [
# 26966
# ];
# }
# ({options, ...}: let
# toml = pkgs.formats.toml {};
# package = pkgs.kanidm;
# domain = "idm.h.lyte.dev";
# name = "kanidm";
# storage = "/storage/${name}";
# cert = "${storage}/certs/idm.h.lyte.dev.crt";
# key = "${storage}/certs/idm.h.lyte.dev.key";
# serverSettings = {
# inherit domain;
# bindaddress = "127.0.0.1:8443";
# # ldapbindaddress
# tls_chain = cert;
# tls_key = key;
# origin = "https://${domain}";
# db_path = "${storage}/data/kanidm.db";
# log_level = "info";
# online_backup = {
# path = "${storage}/backups/";
# schedule = "00 22 * * *";
# # versions = 7;
# };
# };
# unixdSettings = {
# hsm_pin_path = "/var/cache/${name}-unixd/hsm-pin";
# pam_allowed_login_groups = [];
# };
# clientSettings = {
# uri = "https://idm.h.lyte.dev";
# };
# user = name;
# group = name;
# serverConfigFile = toml.generate "server.toml" serverSettings;
# unixdConfigFile = toml.generate "kanidm-unixd.toml" unixdSettings;
# clientConfigFile = toml.generate "kanidm-config.toml" clientSettings;
# defaultServiceConfig = {
# BindReadOnlyPaths = [
# "/nix/store"
# "-/etc/resolv.conf"
# "-/etc/nsswitch.conf"
# "-/etc/hosts"
# "-/etc/localtime"
# ];
# CapabilityBoundingSet = [];
# # ProtectClock= adds DeviceAllow=char-rtc r
# DeviceAllow = "";
# # Implies ProtectSystem=strict, which re-mounts all paths
# # DynamicUser = true;
# LockPersonality = true;
# MemoryDenyWriteExecute = true;
# NoNewPrivileges = true;
# PrivateDevices = true;
# PrivateMounts = true;
# PrivateNetwork = true;
# PrivateTmp = true;
# PrivateUsers = true;
# ProcSubset = "pid";
# ProtectClock = true;
# ProtectHome = true;
# ProtectHostname = true;
# # Would re-mount paths ignored by temporary root
# #ProtectSystem = "strict";
# ProtectControlGroups = true;
# ProtectKernelLogs = true;
# ProtectKernelModules = true;
# ProtectKernelTunables = true;
# ProtectProc = "invisible";
# RestrictAddressFamilies = [];
# RestrictNamespaces = true;
# RestrictRealtime = true;
# RestrictSUIDSGID = true;
# SystemCallArchitectures = "native";
# SystemCallFilter = ["@system-service" "~@privileged @resources @setuid @keyring"];
# # Does not work well with the temporary root
# #UMask = "0066";
# };
# in {
# # kanidm
# config = {
# # we need a mechanism to get the certificates that caddy provisions for us
# systemd.timers."copy-kanidm-certificates-from-caddy" = {
# wantedBy = ["timers.target"];
# timerConfig = {
# OnBootSec = "10m"; # 10 minutes after booting
# OnUnitActiveSec = "5m"; # every 5 minutes afterwards
# Unit = "copy-kanidm-certificates-from-caddy.service";
# };
# };
# systemd.services."copy-kanidm-certificates-from-caddy" = {
# script = ''
# umask 077
# install -d -m 0700 -o "${user}" -g "${group}" "${storage}/data" "${storage}/certs"
# cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev
# install -m 0700 -o "${user}" -g "${group}" idm.h.lyte.dev.key idm.h.lyte.dev.crt "${storage}/certs"
# '';
# path = with pkgs; [rsync];
# serviceConfig = {
# Type = "oneshot";
# User = "root";
# };
# };
# environment.systemPackages = [package];
# # TODO: should I use this for /storage/kanidm/certs etc.?
# systemd.tmpfiles.settings."10-kanidm" = {
# "${serverSettings.online_backup.path}".d = {
# inherit user group;
# mode = "0700";
# };
# # "${builtins.dirOf unixdSettings.hsm_pin_path}".d = {
# # user = "${user}-unixd";
# # group = "${group}-unixd";
# # mode = "0700";
# # };
# "${storage}/data".d = {
# inherit user group;
# mode = "0700";
# };
# "${storage}/certs".d = {
# inherit user group;
# mode = "0700";
# };
# };
# users.groups = {
# ${group} = {};
# "${group}-unixd" = {};
# };
# users.users.${user} = {
# inherit group;
# description = "kanidm server";
# isSystemUser = true;
# packages = [package];
# };
# users.users."${user}-unixd" = {
# group = "${group}-unixd";
# description = lib.mkForce "kanidm PAM daemon";
# isSystemUser = true;
# };
# # the kanidm module in nixpkgs was not working for me, so I rolled my own
# # loosely based off it
# systemd.services.kanidm = {
# enable = true;
# path = with pkgs; [openssl] ++ [package];
# description = "kanidm identity management daemon";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# requires = ["copy-kanidm-certificates-from-caddy.service"];
# script = ''
# pwd
# ls -la
# ls -laR /storage/kanidm
# ${package}/bin/kanidmd server -c ${serverConfigFile}
# '';
# # environment.RUST_LOG = serverSettings.log_level;
# serviceConfig = lib.mkMerge [
# defaultServiceConfig
# {
# StateDirectory = name;
# StateDirectoryMode = "0700";
# RuntimeDirectory = "${name}d";
# User = user;
# Group = group;
# AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
# CapabilityBoundingSet = ["CAP_NET_BIND_SERVICE"];
# PrivateUsers = lib.mkForce false;
# PrivateNetwork = lib.mkForce false;
# RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
# # TemporaryFileSystem = "/:ro";
# BindReadOnlyPaths = [
# "${storage}/certs"
# ];
# BindPaths = [
# "${storage}/data"
# # socket
# "/run/${name}d:/run/${name}d"
# # backups
# serverSettings.online_backup.path
# ];
# }
# ];
# };
# systemd.services.kanidm-unixd = {
# description = "Kanidm PAM daemon";
# wantedBy = ["multi-user.target"];
# after = ["network.target"];
# restartTriggers = [unixdConfigFile clientConfigFile];
# serviceConfig = lib.mkMerge [
# defaultServiceConfig
# {
# CacheDirectory = "${name}-unixd";
# CacheDirectoryMode = "0700";
# RuntimeDirectory = "${name}-unixd";
# ExecStart = "${package}/bin/kanidm_unixd";
# User = "${user}-unixd";
# Group = "${group}-unixd";
# BindReadOnlyPaths = [
# "-/etc/kanidm"
# "-/etc/static/kanidm"
# "-/etc/ssl"
# "-/etc/static/ssl"
# "-/etc/passwd"
# "-/etc/group"
# ];
# BindPaths = [
# # socket
# "/run/kanidm-unixd:/var/run/kanidm-unixd"
# ];
# # Needs to connect to kanidmd
# PrivateNetwork = lib.mkForce false;
# RestrictAddressFamilies = ["AF_INET" "AF_INET6" "AF_UNIX"];
# TemporaryFileSystem = "/:ro";
# }
# ];
# environment.RUST_LOG = serverSettings.log_level;
# };
# systemd.services.kanidm-unixd-tasks = {
# description = "Kanidm PAM home management daemon";
# wantedBy = ["multi-user.target"];
# after = ["network.target" "kanidm-unixd.service"];
# partOf = ["kanidm-unixd.service"];
# restartTriggers = [unixdConfigFile clientConfigFile];
# serviceConfig = {
# ExecStart = "${package}/bin/kanidm_unixd_tasks";
# BindReadOnlyPaths = [
# "/nix/store"
# "-/etc/resolv.conf"
# "-/etc/nsswitch.conf"
# "-/etc/hosts"
# "-/etc/localtime"
# "-/etc/kanidm"
# "-/etc/static/kanidm"
# ];
# BindPaths = [
# # To manage home directories
# "/home"
# # To connect to kanidm-unixd
# "/run/kanidm-unixd:/var/run/kanidm-unixd"
# ];
# # CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
# CapabilityBoundingSet = ["CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH"];
# IPAddressDeny = "any";
# # Need access to users
# PrivateUsers = false;
# # Need access to home directories
# ProtectHome = false;
# RestrictAddressFamilies = ["AF_UNIX"];
# TemporaryFileSystem = "/:ro";
# Restart = "on-failure";
# };
# environment.RUST_LOG = serverSettings.log_level;
# };
# environment.etc = {
# "kanidm/server.toml".source = serverConfigFile;
# "kanidm/config".source = clientConfigFile;
# "kanidm/unixd".source = unixdConfigFile;
# };
# system.nssModules = [package];
# system.nssDatabases.group = [name];
# system.nssDatabases.passwd = [name];
# # environment.etc."kanidm/server.toml" = {
# # mode = "0600";
# # group = "kanidm";
# # user = "kanidm";
# # };
# # environment.etc."kanidm/config" = {
# # mode = "0600";
# # group = "kanidm";
# # user = "kanidm";
# # };
# services.caddy.virtualHosts."idm.h.lyte.dev" = {
# extraConfig = ''reverse_proxy https://idm.h.lyte.dev:8443'';
# };
# networking = {
# extraHosts = ''
# ::1 idm.h.lyte.dev
# 127.0.0.1 idm.h.lyte.dev
# '';
# };
# };
# })
2024-09-06 16:48:59 -05:00
{
systemd . tmpfiles . settings = {
" 1 0 - a u d i o b o o k s h e l f " = {
" / s t o r a g e / a u d i o b o o k s h e l f " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / a u d i o b o o k s h e l f / a u d i o b o o k s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / a u d i o b o o k s h e l f / p o d c a s t s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-06 20:45:10 -05:00
users . groups . audiobookshelf = { } ;
users . users . audiobookshelf = {
isSystemUser = true ;
group = " a u d i o b o o k s h e l f " ;
} ;
2024-09-06 16:48:59 -05:00
services . audiobookshelf = {
enable = true ;
dataDir = " / s t o r a g e / a u d i o b o o k s h e l f " ;
port = 8523 ;
} ;
2024-09-06 20:45:10 -05:00
systemd . services . audiobookshelf . serviceConfig = {
WorkingDirectory = lib . mkForce config . services . audiobookshelf . dataDir ;
StateDirectory = lib . mkForce config . services . audiobookshelf . dataDir ;
Group = " a u d i o b o o k s h e l f " ;
User = " a u d i o b o o k s h e l f " ;
} ;
2024-09-06 16:48:59 -05:00
services . caddy . virtualHosts . " a u d i o . l y t e . d e v " = {
2024-09-06 20:45:10 -05:00
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . audiobookshelf . port } '' ;
2024-09-06 16:48:59 -05:00
} ;
}
2024-09-11 14:31:48 -05:00
{
# prometheus
services . restic . commonPaths = [
# TODO: do I want this backed up?
# "/var/lib/prometheus"
] ;
services . prometheus = {
enable = true ;
checkConfig = true ;
listenAddress = " 1 2 7 . 0 . 0 . 1 " ;
port = 9090 ;
exporters = {
postgres = {
enable = true ;
# runAsLocalSuperUser = true;
} ;
} ;
# alertmanager.enable = true; # grafana for alerts?
} ;
# services.node-exporter.enable = true; # TODO: node-exporter?
# TODO: exporters.zfs?
# TODO: promtail?
# idrac exporter?
# restic exporter?
# smartctl exporter?
# systemd exporter?
# NOTE: we probably don't want this exposed
# services.caddy.virtualHosts."prometheus.h.lyte.dev" = {
# extraConfig = ''reverse_proxy :${toString config.services.prometheus.port}'';
# };
}
{
# grafana
systemd . tmpfiles . settings = {
" 1 0 - g r a f a n a " = {
" / s t o r a g e / g r a f a n a " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " r o o t " ;
group = " f a m i l y " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
2024-09-11 14:58:17 -05:00
" / s t o r a g e / g r a f a n a "
2024-09-11 14:31:48 -05:00
] ;
2024-09-11 14:58:17 -05:00
sops . secrets = {
grafana-admin-password = {
owner = " g r a f a n a " ;
group = " g r a f a n a " ;
mode = " 0 4 0 0 " ;
} ;
} ;
2024-09-11 14:31:48 -05:00
services . grafana = {
enable = true ;
dataDir = " / s t o r a g e / g r a f a n a " ;
provision = {
enable = true ;
} ;
settings = {
server = {
http_port = 3814 ;
} ;
2024-09-11 14:58:17 -05:00
security = {
admin_email = " d a n i e l @ l y t e . d e v " ;
admin_user = " l y t e d e v " ;
admin_file = '' $_ _ f i l e { ${ config . sops . secrets . grafana-admin-password . path } } '' ;
} ;
# database = {
# };
2024-09-11 14:31:48 -05:00
} ;
} ;
networking . firewall . allowedTCPPorts = [
9000
] ;
services . caddy . virtualHosts . " g r a f a n a . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . grafana . settings . server . http_port } '' ;
} ;
}
2024-03-13 21:12:14 -05:00
] ;
2023-09-04 11:40:30 -05:00
# TODO: non-root processes and services that access secrets need to be part of
# the 'keys' group
2023-10-04 21:34:20 -05:00
# maybe this will fix plausible?
2023-09-04 11:40:30 -05:00
# systemd.services.some-service = {
# serviceConfig.SupplementaryGroups = [ config.users.groups.keys.name ];
# };
# or
# users.users.example-user.extraGroups = [ config.users.groups.keys.name ];
# TODO: declarative directory quotas? for storage/$USER and /home/$USER
2024-02-21 22:22:40 -06:00
environment . systemPackages = with pkgs ; [
2024-09-06 20:45:10 -05:00
aria2
2024-09-04 11:25:02 -05:00
restic
2024-09-04 10:31:06 -05:00
btrfs-progs
zfs
smartmontools
2024-02-21 22:22:40 -06:00
htop
bottom
2024-02-21 22:59:49 -06:00
curl
xh
2024-02-21 22:22:40 -06:00
] ;
2024-09-03 20:03:24 -05:00
2024-03-13 21:12:14 -05:00
services . tailscale . useRoutingFeatures = " s e r v e r " ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
# https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72
2023-11-02 13:14:43 -05:00
# services.lidarr = {
# enable = true;
# dataDir = "/storage/lidarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.radarr = {
# enable = true;
# dataDir = "/storage/radarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.sonarr = {
# enable = true;
# dataDir = "/storage/sonarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.bazarr = {
# enable = true;
# listenPort = 6767;
# };
2023-09-28 12:29:21 -05:00
2024-09-03 20:03:24 -05:00
# networking.firewall.allowedTCPPorts = [9876 9877];
# networking.firewall.allowedUDPPorts = [9876 9877];
# networking.firewall.allowedUDPPortRanges = [
# {
# from = 27000;
# to = 27100;
# }
# ];
2023-09-04 11:40:30 -05:00
}