2023-10-12 22:54:05 -05:00
/*
if ur fans get loud :
# enable manual fan control
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x01 0x00
# set fan speed to last byte as decimal
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x02 0xff 0x00
* /
2023-10-03 11:52:44 -05:00
{
2024-09-12 11:58:24 -05:00
/*
inputs ,
outputs ,
* /
2024-02-21 22:15:41 -06:00
lib ,
2024-02-21 20:39:10 -06:00
config ,
2023-10-03 11:52:44 -05:00
pkgs ,
. . .
2024-03-13 21:12:14 -05:00
} : {
2024-09-04 10:31:06 -05:00
system . stateVersion = " 2 4 . 0 5 " ;
2024-07-29 10:01:59 -05:00
home-manager . users . daniel . home . stateVersion = " 2 4 . 0 5 " ;
2024-03-13 21:12:14 -05:00
networking . hostName = " b e e f c a k e " ;
2023-11-02 13:14:43 -05:00
2024-03-13 21:12:14 -05:00
imports = [
{
2024-09-11 11:57:27 -05:00
# hardware and boot module
2024-09-06 08:39:30 -05:00
networking . hostId = " 5 4 1 e d e 5 5 " ;
2024-03-13 21:12:14 -05:00
boot = {
2024-09-06 08:39:30 -05:00
zfs = {
extraPools = [ " z s t o r a g e " ] ;
} ;
supportedFilesystems = {
zfs = true ;
} ;
initrd . supportedFilesystems = {
zfs = true ;
} ;
kernelPackages = config . boot . zfs . package . latestCompatibleLinuxPackages ;
2024-09-03 20:03:24 -05:00
initrd . availableKernelModules = [ " e h c i _ p c i " " m p t 3 s a s " " u s b h i d " " s d _ m o d " ] ;
2024-03-13 21:12:14 -05:00
kernelModules = [ " k v m - i n t e l " ] ;
2024-09-11 11:57:27 -05:00
kernelParams = [ " n o h i b e r n a t e " ] ;
2024-03-13 21:12:14 -05:00
loader . systemd-boot . enable = true ;
loader . efi . canTouchEfiVariables = true ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / " = {
2024-09-03 20:03:24 -05:00
device = " / d e v / d i s k / b y - u u i d / 9 9 2 c e 5 5 c - 7 5 0 7 - 4 d 6 b - 9 3 8 c - 4 5 b 7 e 8 9 1 f 3 9 5 " ;
2024-03-13 21:12:14 -05:00
fsType = " e x t 4 " ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / b o o t " = {
2024-09-03 20:03:24 -05:00
device = " / d e v / d i s k / b y - u u i d / B 6 C 4 - 7 C F 4 " ;
2024-03-13 21:12:14 -05:00
fsType = " v f a t " ;
2024-09-03 20:03:24 -05:00
options = [ " f m a s k = 0 0 2 2 " " d m a s k = 0 0 2 2 " ] ;
2024-03-13 21:12:14 -05:00
} ;
2023-09-05 21:46:55 -05:00
2024-09-12 11:58:24 -05:00
/*
2024-09-06 08:39:30 -05:00
# should be mounted by auto-import; see boot.zfs.extraPools
2024-09-12 11:58:24 -05:00
fileSystems . " / s t o r a g e " = {
device = " z s t o r a g e / s t o r a g e " ;
fsType = " z f s " ;
} ;
* /
2024-09-06 08:39:30 -05:00
fileSystems . " / n i x " = {
device = " z s t o r a g e / n i x " ;
fsType = " z f s " ;
} ;
services . zfs . autoScrub . enable = true ;
services . zfs . autoSnapshot . enable = true ;
# TODO: nfs with zfs?
# services.nfs.server.enable = true;
2024-03-13 21:12:14 -05:00
}
2024-09-11 11:57:27 -05:00
( {
options ,
config ,
. . .
} : let
inherit ( lib ) mkOption types ;
in {
options . services . restic . commonPaths = mkOption {
type = types . nullOr ( types . listOf types . str ) ;
default = [ ] ;
description = ''
Which paths to backup , in addition to ones specified via
` dynamicFilesFrom ` . If null or an empty array and
` dynamicFilesFrom ` is also null , no backup command will be run .
This can be used to create a prune-only job .
'' ;
example = [
" / v a r / l i b / p o s t g r e s q l "
" / h o m e / u s e r / b a c k u p "
] ;
} ;
} )
2024-09-04 10:31:06 -05:00
{
2024-09-11 11:57:27 -05:00
# sops secrets config
2024-03-13 21:12:14 -05:00
sops = {
defaultSopsFile = ../secrets/beefcake/secrets.yml ;
age = {
sshKeyPaths = [ " / e t c / s s h / s s h _ h o s t _ e d 2 5 5 1 9 _ k e y " ] ;
keyFile = " / v a r / l i b / s o p s - n i x / k e y . t x t " ;
generateKey = true ;
} ;
} ;
}
2024-09-06 15:34:18 -05:00
{
2024-09-11 11:57:27 -05:00
sops . secrets = {
netlify-ddns-password = { mode = " 0 4 0 0 " ; } ;
} ;
2024-09-06 15:34:18 -05:00
services . deno-netlify-ddns-client = {
passwordFile = config . sops . secrets . netlify-ddns-password . path ;
} ;
}
2024-03-13 21:12:14 -05:00
{
2024-03-28 13:10:51 -05:00
# nix binary cache
2024-09-11 11:57:27 -05:00
sops . secrets = {
nix-cache-priv-key = { mode = " 0 4 0 0 " ; } ;
} ;
2024-03-13 21:12:14 -05:00
services . nix-serve = {
2024-09-06 15:35:50 -05:00
enable = true ; # TODO: true
2024-09-06 15:34:18 -05:00
secretKeyFile = config . sops . secrets . nix-cache-priv-key . path ;
2024-03-13 21:12:14 -05:00
} ;
services . caddy . virtualHosts . " n i x . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . nix-serve . port }
'' ;
} ;
networking . firewall . allowedTCPPorts = [
80
443
] ;
2024-03-28 13:10:51 -05:00
# regularly build this flake so we have stuff in the cache
2024-03-28 13:12:00 -05:00
# TODO: schedule this for nightly builds instead of intervals based on boot time
2024-09-06 16:32:10 -05:00
systemd . timers . " b u i l d - l y t e d e v - f l a k e " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 3 0 m " ; # 30 minutes after booting
OnUnitActiveSec = " 1 d " ; # every day afterwards
Unit = " b u i l d - l y t e d e v - f l a k e . s e r v i c e " ;
} ;
} ;
2024-03-28 13:10:51 -05:00
2024-09-12 10:36:29 -05:00
systemd . tmpfiles . settings = {
" 1 0 - d a n i e l - n i g h t l y - f l a k e - b u i l d " = {
" / h o m e / d a n i e l / . h o m e / . c a c h e / n i g h t l y - f l a k e - b u i l d s " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " d a n i e l " ;
group = " d a n i e l " ;
} ;
} ;
} ;
} ;
2024-09-06 16:32:10 -05:00
systemd . services . " b u i l d - l y t e d e v - f l a k e " = {
2024-09-11 11:57:27 -05:00
# TODO: might want to add root for the most recent results?
2024-09-06 16:32:10 -05:00
script = ''
# build self (main server) configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git - - accept-flake-config
# build desktop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #dragon --accept-flake-config
# build main laptop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #foxtrot --accept-flake-config
'' ;
path = with pkgs ; [ openssh git nixos-rebuild ] ;
serviceConfig = {
# TODO: mkdir -p...?
2024-09-12 10:36:29 -05:00
WorkingDirectory = " / h o m e / d a n i e l / . h o m e / . c a c h e / n i g h t l y - f l a k e - b u i l d s " ;
2024-09-06 16:32:10 -05:00
Type = " o n e s h o t " ;
2024-09-11 11:57:27 -05:00
User = " d a n i e l " ;
2024-09-06 16:32:10 -05:00
} ;
} ;
2024-03-30 07:50:23 -05:00
networking = {
extraHosts = ''
: : 1 nix . h . lyte . dev
127 .0 .0 .1 nix . h . lyte . dev
2024-03-13 21:12:14 -05:00
'' ;
} ;
}
2024-09-06 16:36:53 -05:00
{
services . headscale = {
2024-09-11 11:57:27 -05:00
enable = false ; # TODO: setup headscale?
2024-09-06 16:36:53 -05:00
address = " 1 2 7 . 0 . 0 . 1 " ;
port = 7777 ;
settings = {
server_url = " h t t p s : / / t a i l s c a l e . v p n . h . l y t e . d e v " ;
db_type = " s q l i t e 3 " ;
db_path = " / v a r / l i b / h e a d s c a l e / d b . s q l i t e " ;
derp . server = {
enable = true ;
region_id = 999 ;
stun_listen_addr = " 0 . 0 . 0 . 0 : 3 4 7 8 " ;
} ;
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
dns_config = {
magic_dns = true ;
base_domain = " v p n . h . l y t e . d e v " ;
domains = [
" t s . v p n . h . l y t e . d e v "
] ;
nameservers = [
" 1 . 1 . 1 . 1 "
# "192.168.0.1"
] ;
override_local_dns = true ;
} ;
} ;
} ;
services . caddy . virtualHosts . " t a i l s c a l e . v p n . h . l y t e . d e v " = lib . mkIf config . services . headscale . enable {
extraConfig = ''
reverse_proxy http://localhost:$ { toString config . services . headscale . port }
'' ;
} ;
networking . firewall . allowedUDPPorts = lib . mkIf config . services . headscale . enable [ 3478 ] ;
}
2024-03-13 21:12:14 -05:00
{
2024-09-11 11:57:27 -05:00
# TODO: I think I need to setup my account? wondering if this can be done in nix as well
services . restic . commonPaths = [ " / v a r / l i b / s o j u " " / v a r / l i b / p r i v a t e / s o j u " ] ;
2024-03-13 21:12:14 -05:00
services . soju = {
enable = true ;
listen = [ " i r c + i n s e c u r e : / / : 6 6 6 7 " ] ;
} ;
networking . firewall . allowedTCPPorts = [
6667
] ;
}
{
2024-09-11 11:57:27 -05:00
# nextcloud
2024-09-12 22:37:20 -05:00
users . users . nextcloud = {
isSystemUser = true ;
createHome = false ;
group = " n e x t c l o u d " ;
} ;
users . groups . nextcloud = { } ;
sops . secrets = {
nextcloud-admin-password = {
owner = " n e x t c l o u d " ;
group = " n e x t c l o u d " ;
mode = " 4 0 0 " ;
} ;
} ;
systemd . tmpfiles . settings = {
" 1 0 - n e x t c l o u d " = {
" / s t o r a g e / n e x t c l o u d " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " n e x t c l o u d " ;
group = " n e x t c l o u d " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
" / s t o r a g e / n e x t c l o u d "
] ;
2024-09-12 11:58:24 -05:00
services . postgresql = {
2024-09-12 22:37:20 -05:00
ensureDatabases = [ " n e x t c l o u d " ] ;
2024-09-12 11:58:24 -05:00
ensureUsers = [
{
name = " n e x t c l o u d " ;
ensureDBOwnership = true ;
}
] ;
} ;
2024-09-12 22:37:20 -05:00
services . nextcloud = {
enable = true ;
hostName = " n e x t c l o u d . h . l y t e . d e v " ;
maxUploadSize = " 1 0 0 G " ;
extraAppsEnable = true ;
autoUpdateApps . enable = true ;
extraApps = with config . services . nextcloud . package . packages . apps ; {
inherit calendar contacts notes onlyoffice tasks ;
} ;
package = pkgs . nextcloud28 ;
home = " / s t o r a g e / n e x t c l o u d " ;
configureRedis = true ;
caching . redis = true ;
settings = {
# TODO: SMTP
maintenance_window_start = 1 ;
} ;
config = {
adminpassFile = config . sops . secrets . nextcloud-admin-password . path ;
adminuser = " d a n i e l " ;
dbtype = " p g s q l " ;
dbhost = " / r u n / p o s t g r e s q l " ;
} ;
phpOptions = {
" x d e b u g . m o d e " = " d e b u g " ;
" x d e b u g . c l i e n t _ h o s t " = " 1 0 . 0 . 2 . 2 " ;
" x d e b u g . c l i e n t _ p o r t " = " 9 0 0 0 " ;
" x d e b u g . s t a r t _ w i t h _ r e q u e s t " = " y e s " ;
" x d e b u g . i d e k e y " = " E C L I P S E " ;
} ;
2024-09-12 11:58:24 -05:00
} ;
2024-09-12 22:37:20 -05:00
services . nginx . enable = false ;
systemd . services . nextcloud = {
serviceConfig . User = " n e x t c l o u d " ;
serviceConfig . Group = " n e x t c l o u d " ;
} ;
services . phpfpm . pools . nextcloud . settings = {
" l i s t e n . o w n e r " = " c a d d y " ;
" l i s t e n . g r o u p " = " c a d d y " ;
} ;
services . caddy . virtualHosts . " n e x t c l o u d . h . l y t e . d e v " = let
fpm-nextcloud-pool = config . services . phpfpm . pools . nextcloud ;
root = config . services . nginx . virtualHosts . ${ config . services . nextcloud . hostName } . root ;
in
lib . mkIf config . services . nextcloud . enable {
extraConfig = ''
encode zstd gzip
root * $ { root }
redir /.well-known/carddav /remote.php/dav 301
redir /.well-known/caldav /remote.php/dav 301
redir /.well-known /* / i n d e x . p h p { u r i } 3 0 1
redir /remote /* / r e m o t e . p h p { u r i } 3 0 1
header {
Strict-Transport-Security max-age = 31536000
Permissions-Policy interest-cohort = ( )
X-Content-Type-Options nosniff
X-Frame-Options SAMEORIGIN
Referrer-Policy no-referrer
X-XSS-Protection " 1 ; m o d e = b l o c k "
X-Permitted-Cross-Domain-Policies none
X-Robots-Tag " n o i n d e x , n o f o l l o w "
X-Forwarded-Host nextcloud . h . lyte . dev
- X-Powered-By
}
php_fastcgi unix / $ { fpm-nextcloud-pool . socket } {
root $ { root }
env front_controller_active true
env modHeadersAvailable true
}
@ forbidden {
path /build /* / t e s t s / * / c o n f i g / * / l i b / * / 3 r d p a r t y / * / t e m p l a t e s / * / d a t a / *
path /. * /autotest * /occ * /issue * /indie * /db_ * /console *
not path /.well-known /*
}
error @ forbidden 404
@ immutable {
path * . css * . js * . mjs * . svg * . gif * . png * . jpg * . ico * . wasm * . tflite
query v = *
}
header @ immutable Cache-Control " m a x - a g e = 1 5 7 7 8 4 6 3 , i m m u t a b l e "
@ static {
path * . css * . js * . mjs * . svg * . gif * . png * . jpg * . ico * . wasm * . tflite
not query v = *
}
header @ static Cache-Control " m a x - a g e = 1 5 7 7 8 4 6 3 "
@ woff2 path * . woff2
header @ woff2 Cache-Control " m a x - a g e = 6 0 4 8 0 0 "
file_server
'' ;
} ;
2024-03-13 21:12:14 -05:00
}
2024-09-06 16:44:15 -05:00
{
# plausible
2024-09-11 11:57:27 -05:00
services . postgresql = {
ensureDatabases = [ " p l a u s i b l e " ] ;
ensureUsers = [
{
name = " p l a u s i b l e " ;
ensureDBOwnership = true ;
}
] ;
} ;
users . users . plausible = {
isSystemUser = true ;
createHome = false ;
group = " p l a u s i b l e " ;
} ;
users . extraGroups = {
" p l a u s i b l e " = { } ;
} ;
services . plausible = {
enable = true ;
database = {
clickhouse . setup = true ;
postgres = {
setup = false ;
dbname = " p l a u s i b l e " ;
} ;
} ;
server = {
baseUrl = " h t t p s : / / a . l y t e . d e v " ;
disableRegistration = true ;
port = 8899 ;
secretKeybaseFile = config . sops . secrets . plausible-secret-key-base . path ;
} ;
adminUser = {
activate = false ;
email = " d a n i e l @ l y t e . d e v " ;
passwordFile = config . sops . secrets . plausible-admin-password . path ;
} ;
} ;
sops . secrets = {
plausible-secret-key-base = {
owner = " p l a u s i b l e " ;
group = " p l a u s i b l e " ;
} ;
plausible-admin-password = {
owner = " p l a u s i b l e " ;
group = " p l a u s i b l e " ;
} ;
} ;
systemd . services . plausible = {
serviceConfig . User = " p l a u s i b l e " ;
serviceConfig . Group = " p l a u s i b l e " ;
} ;
services . caddy . virtualHosts . " a . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . plausible . server . port }
'' ;
} ;
}
{
# clickhouse
environment . etc = {
" c l i c k h o u s e - s e r v e r / u s e r s . d / d i s a b l e - l o g g i n g - q u e r y . x m l " = {
text = ''
<clickhouse>
<profiles>
<default>
<log_queries> 0 < /log_queries >
<log_query_threads> 0 < /log_query_threads >
< /default >
< /profiles >
< /clickhouse >
'' ;
} ;
" c l i c k h o u s e - s e r v e r / c o n f i g . d / r e d u c e - l o g g i n g . x m l " = {
text = ''
<clickhouse>
<logger>
<level> warning < /level >
<console> true < /console >
< /logger >
< query_thread_log remove = " r e m o v e " / >
< query_log remove = " r e m o v e " / >
< text_log remove = " r e m o v e " / >
< trace_log remove = " r e m o v e " / >
< metric_log remove = " r e m o v e " / >
< asynchronous_metric_log remove = " r e m o v e " / >
< session_log remove = " r e m o v e " / >
< part_log remove = " r e m o v e " / >
< /clickhouse >
'' ;
} ;
} ;
services . restic . commonPaths = [
# "/var/lib/clickhouse"
] ;
}
{
# family storage
2024-09-12 10:36:29 -05:00
users . extraGroups = {
" f a m i l y " = { } ;
} ;
2024-09-11 11:57:27 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - f a m i l y " = {
2024-09-11 11:57:27 -05:00
" / s t o r a g e / f a m i l y " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " r o o t " ;
group = " f a m i l y " ;
} ;
} ;
2024-09-12 15:16:09 -05:00
" / s t o r a g e / v a l e r i e " = {
2024-09-11 11:57:27 -05:00
" d " = {
2024-09-12 15:16:09 -05:00
mode = " 0 7 0 0 " ;
2024-09-11 11:57:27 -05:00
user = " v a l e r i e " ;
group = " f a m i l y " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
" / s t o r a g e / f a m i l y "
2024-09-12 15:16:09 -05:00
" / s t o r a g e / v a l e r i e "
2024-09-11 11:57:27 -05:00
] ;
2024-09-06 16:44:15 -05:00
}
2024-03-13 21:12:14 -05:00
{
# daniel augments
2024-09-11 11:57:27 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - d a n i e l " = {
2024-09-11 11:57:27 -05:00
" / s t o r a g e / d a n i e l " = {
" d " = {
mode = " 0 7 0 0 " ;
user = " d a n i e l " ;
group = " n o g r o u p " ;
} ;
} ;
" / s t o r a g e / d a n i e l / c r i t i c a l " = {
" d " = {
mode = " 0 7 0 0 " ;
user = " d a n i e l " ;
group = " n o g r o u p " ;
} ;
} ;
} ;
} ;
2024-03-13 21:12:14 -05:00
users . groups . daniel . members = [ " d a n i e l " ] ;
users . groups . nixadmin . members = [ " d a n i e l " ] ;
users . users . daniel = {
extraGroups = [
2024-09-03 20:03:24 -05:00
# "nixadmin" # write access to /etc/nixos/ files
2024-03-13 21:12:14 -05:00
" w h e e l " # sudo access
2024-09-03 20:03:24 -05:00
" c a d d y " # write access to public static files
2024-03-13 21:12:14 -05:00
" u s e r s " # general users group
2024-09-03 20:03:24 -05:00
" j e l l y f i n " # write access to jellyfin files
" a u d i o b o o k s h e l f " # write access to audiobookshelf files
" f l a n i l l a " # minecraft server manager
2024-09-06 16:15:58 -05:00
" f o r g e j o "
2024-03-13 21:12:14 -05:00
] ;
2024-08-13 14:35:09 -05:00
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / d a n i e l "
] ;
2024-09-06 16:36:53 -05:00
services . postgresql = {
ensureDatabases = [ " d a n i e l " ] ;
ensureUsers = [
{
name = " d a n i e l " ;
2024-09-12 22:37:20 -05:00
ensureClauses = {
# superuser = true;
# createrole = true;
# createdb = true;
# bypassrls = true;
} ;
2024-09-06 16:36:53 -05:00
ensureDBOwnership = true ;
}
] ;
} ;
2024-08-13 14:35:09 -05:00
}
2024-09-06 16:36:53 -05:00
{
systemd . tmpfiles . settings = {
" 1 0 - j e l l y f i n " = {
" / s t o r a g e / j e l l y f i n " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / m o v i e s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / t v " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / j e l l y f i n / m u s i c " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " j e l l y f i n " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
services . jellyfin = {
enable = true ;
openFirewall = false ;
# uses port 8096 by default, configurable from admin UI
} ;
services . caddy . virtualHosts . " v i d e o . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 8 0 9 6 '' ;
} ;
2024-09-12 11:58:24 -05:00
/*
NOTE : this server's xeon chips DO NOT seem to support quicksync or graphics in general
but I can probably throw in a crappy GPU ( or a big , cheap ebay GPU for ML
stuff , too ? ) and get good transcoding performance
* /
2024-09-06 16:36:53 -05:00
# jellyfin hardware encoding
2024-09-12 11:58:24 -05:00
/*
hardware . graphics = {
enable = true ;
extraPackages = with pkgs ; [
intel-media-driver
vaapiIntel
vaapiVdpau
libvdpau-va-gl
intel-compute-runtime
] ;
} ;
nixpkgs . config . packageOverrides = pkgs : {
vaapiIntel = pkgs . vaapiIntel . override { enableHybridCodec = true ; } ;
} ;
* /
2024-09-06 16:36:53 -05:00
}
{
2024-09-06 16:44:15 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - p o s t g r e s " = {
2024-09-06 16:44:15 -05:00
" / s t o r a g e / p o s t g r e s " = {
" d " = {
2024-09-11 12:03:55 -05:00
mode = " 0 7 5 0 " ;
2024-09-06 16:44:15 -05:00
user = " p o s t g r e s " ;
group = " p o s t g r e s " ;
} ;
} ;
} ;
} ;
2024-09-06 16:36:53 -05:00
services . postgresql = {
enable = true ;
dataDir = " / s t o r a g e / p o s t g r e s " ;
enableTCPIP = true ;
package = pkgs . postgresql_15 ;
# https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
2024-09-11 11:57:27 -05:00
# TODO: give the "daniel" user access to all databases
2024-09-12 22:37:20 -05:00
/*
2024-09-06 16:36:53 -05:00
authentication = pkgs . lib . mkOverride 10 ''
#type database user auth-method auth-options
local all postgres peer map = superuser_map
local all daniel peer map = superuser_map
local sameuser all peer map = superuser_map
# lan ipv4
host all daniel 192.168.0.0/16 trust
host all daniel 10.0.0.0/24 trust
# tailnet ipv4
host all daniel 100.64.0.0/10 trust
'' ;
2024-09-12 22:37:20 -05:00
* /
2024-09-03 20:03:24 -05:00
2024-09-12 22:37:20 -05:00
/*
2024-09-06 16:36:53 -05:00
identMap = ''
# map system_user db_user
superuser_map root postgres
superuser_map postgres postgres
superuser_map daniel postgres
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
# Let other names login as themselves
superuser_map / ^ ( . * ) $ \ 1
'' ;
2024-09-12 22:37:20 -05:00
* /
2024-09-06 16:36:53 -05:00
} ;
2024-09-03 20:03:24 -05:00
2024-09-06 16:36:53 -05:00
services . postgresqlBackup = {
enable = true ;
backupAll = true ;
2024-09-11 11:57:27 -05:00
compression = " n o n e " ; # hoping for restic deduplication here?
2024-09-06 16:36:53 -05:00
location = " / s t o r a g e / p o s t g r e s - b a c k u p s " ;
startAt = " * - * - * 0 3 : 0 0 : 0 0 " ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / p o s t g r e s - b a c k u p s "
] ;
2024-09-06 16:36:53 -05:00
}
2024-09-11 11:57:27 -05:00
{
# friends
users . users . ben = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I K U f L Z + I X 8 5 p 9 3 5 5 P o 2 z P 1 H 2 t A x i E 0 r E 6 I Y b 8 S f + e F 9 T b e n @ b e n h a n y . c o m "
] ;
} ;
2024-09-03 20:03:24 -05:00
2024-09-11 11:57:27 -05:00
users . users . alan = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
# openssh.authorizedKeys.keys = [];
} ;
}
2024-09-06 15:34:18 -05:00
{
2024-09-11 11:57:27 -05:00
# restic backups
sops . secrets = {
restic-ssh-priv-key-benland = { mode = " 0 4 0 0 " ; } ;
restic-rascal-passphrase = {
mode = " 0 4 0 0 " ;
} ;
restic-rascal-ssh-private-key = {
mode = " 0 4 0 0 " ;
2024-09-06 15:34:18 -05:00
} ;
} ;
users . groups . restic = { } ;
users . users . restic = {
# used for other machines to backup to
isSystemUser = true ;
2024-09-12 14:47:21 -05:00
createHome = true ;
home = " / s t o r a g e / b a c k u p s / r e s t i c " ;
2024-09-06 15:34:18 -05:00
group = " r e s t i c " ;
2024-09-12 14:47:21 -05:00
extraGroups = [ " s f t p o n l y " ] ;
2024-09-06 15:34:18 -05:00
openssh . authorizedKeys . keys = [ ] ++ config . users . users . daniel . openssh . authorizedKeys . keys ;
} ;
2024-09-12 14:47:21 -05:00
services . openssh . extraConfig = ''
Match Group sftponly
ChrootDirectory /storage/backups / % u
ForceCommand internal-sftp
AllowTcpForwarding no
'' ;
2024-09-11 12:03:55 -05:00
systemd . tmpfiles . settings = {
2024-09-11 14:31:48 -05:00
" 1 0 - b a c k u p s - l o c a l " = {
2024-09-11 12:03:55 -05:00
" / s t o r a g e / b a c k u p s / l o c a l " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " r o o t " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . backups = let
2024-09-12 14:47:21 -05:00
# TODO: How do I set things up so that a compromised server doesn't have access to my backups so that it can corrupt or ransomware them?
2024-09-11 11:57:27 -05:00
defaults = {
2024-09-11 13:31:07 -05:00
passwordFile = config . sops . secrets . restic-rascal-passphrase . path ;
2024-09-11 11:57:27 -05:00
paths =
config . services . restic . commonPaths
++ [
] ;
initialize = true ;
exclude = [ ] ;
timerConfig = {
OnCalendar = [ " 0 4 : 4 5 " " 1 7 : 4 5 " ] ;
} ;
} ;
in {
local =
defaults
// {
repository = " / s t o r a g e / b a c k u p s / l o c a l " ;
} ;
rascal =
defaults
// {
extraOptions = [
2024-09-17 08:56:54 -05:00
'' s f t p . c o m m a n d = " s s h b e e f c a k e @ r a s c a l . h a r e - c o d . t s . n e t - i ${ config . sops . secrets . restic-rascal-ssh-private-key . path } - s s f t p " ''
2024-09-11 11:57:27 -05:00
] ;
2024-09-17 08:56:54 -05:00
repository = " s f t p : / / b e e f c a k e @ r a s c a l . h a r e - c o d . t s . n e t : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
2024-09-11 11:57:27 -05:00
} ;
# TODO: add ruby?
benland =
defaults
// {
extraOptions = [
2024-09-14 07:20:34 -05:00
'' s f t p . c o m m a n d = " s s h d a n i e l @ n . b e n h a n e y . c o m - p 1 0 0 2 2 - i ${ config . sops . secrets . restic-ssh-priv-key-benland . path } - s s f t p " ''
2024-09-11 11:57:27 -05:00
] ;
repository = " s f t p : / / d a n i e l @ n . b e n h a n e y . c o m : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
} ;
2024-09-06 15:34:18 -05:00
}
{
systemd . tmpfiles . settings = {
" 1 0 - c a d d y " = {
" / s t o r a g e / f i l e s . l y t e . d e v " = {
" d " = {
mode = " 2 7 7 5 " ;
user = " r o o t " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
" / s t o r a g e / f i l e s . l y t e . d e v "
] ;
2024-09-06 15:34:18 -05:00
services . caddy = {
# TODO: 502 and other error pages
enable = true ;
email = " d a n i e l @ l y t e . d e v " ;
adapter = " c a d d y f i l e " ;
virtualHosts = {
" f i l e s . l y t e . d e v " = {
# TODO: customize the files.lyte.dev template?
extraConfig = ''
header {
Access-Control-Allow-Origin " { h t t p . r e q u e s t . h e a d e r . O r i g i n } "
Access-Control-Allow-Credentials true
Access-Control-Allow-Methods *
Access-Control-Allow-Headers *
Vary Origin
defer
}
2024-09-12 11:58:24 -05:00
2024-09-06 15:34:18 -05:00
file_server browse {
2024-09-12 11:58:24 -05:00
## browse template
## hide .*
2024-09-06 15:34:18 -05:00
root /storage/files.lyte.dev
}
'' ;
} ;
} ;
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
} ;
}
2024-09-06 15:39:26 -05:00
{
2024-09-12 22:37:20 -05:00
# systemd.tmpfiles.settings = {
# "10-forgejo" = {
# "/storage/forgejo" = {
# "d" = {
# mode = "0700";
# user = "forgejo";
# group = "nogroup";
# };
# };
# };
# };
2024-09-06 15:39:26 -05:00
services . forgejo = {
2024-09-06 16:15:58 -05:00
enable = true ;
2024-09-06 15:39:26 -05:00
stateDir = " / s t o r a g e / f o r g e j o " ;
settings = {
DEFAULT = {
APP_NAME = " g i t . l y t e . d e v " ;
} ;
server = {
ROOT_URL = " h t t p s : / / g i t . l y t e . d e v " ;
HTTP_ADDR = " 1 2 7 . 0 . 0 . 1 " ;
HTTP_PORT = 3088 ;
DOMAIN = " g i t . l y t e . d e v " ;
} ;
2024-09-16 16:38:16 -05:00
migrations = {
ALLOWED_DOMAINS = " * . g i t h u b . c o m , g i t h u b . c o m , g i t l a b . c o m , * . g i t l a b . c o m " ;
} ;
2024-09-06 15:39:26 -05:00
actions = {
ENABLED = true ;
} ;
service = {
DISABLE_REGISTRATION = true ;
} ;
session = {
COOKIE_SECURE = true ;
} ;
log = {
# LEVEL = "Debug";
} ;
ui = {
THEMES = " f o r g e j o - a u t o , f o r g e j o - l i g h t , f o r g e j o - d a r k " ;
DEFAULT_THEME = " f o r g e j o - a u t o " ;
} ;
indexer = {
REPO_INDEXER_ENABLED = " t r u e " ;
REPO_INDEXER_PATH = " i n d e x e r s / r e p o s . b l e v e " ;
MAX_FILE_SIZE = " 1 0 4 8 5 7 6 " ;
# REPO_INDEXER_INCLUDE =
REPO_INDEXER_EXCLUDE = " r e s o u r c e s / b i n / * * " ;
} ;
} ;
lfs = {
enable = true ;
} ;
dump = {
2024-09-11 11:57:27 -05:00
enable = false ;
2024-09-06 15:39:26 -05:00
} ;
database = {
# TODO: move to postgres?
type = " s q l i t e 3 " ;
} ;
} ;
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
config . services . forgejo . stateDir
] ;
sops . secrets = {
" f o r g e j o - r u n n e r . e n v " = { mode = " 0 4 0 0 " ; } ;
} ;
systemd . services . gitea-runner-beefcake . after = [ " s o p s - n i x . s e r v i c e " ] ;
2024-09-06 15:39:26 -05:00
services . gitea-actions-runner = {
# TODO: simple git-based automation would be dope? maybe especially for
# mirroring to github super easy?
package = pkgs . forgejo-runner ;
instances . " b e e f c a k e " = {
2024-09-06 16:26:53 -05:00
enable = true ;
2024-09-06 15:39:26 -05:00
name = " b e e f c a k e " ;
url = " h t t p s : / / g i t . l y t e . d e v " ;
settings = {
container = {
# use the shared network which is bridged by default
# this lets us hit git.lyte.dev just fine
network = " p o d m a n " ;
} ;
} ;
labels = [
# type ":host" does not depend on docker/podman/lxc
" p o d m a n "
" n i x : d o c k e r : / / g i t . l y t e . d e v / l y t e d e v / n i x : l a t e s t "
" b e e f c a k e : h o s t "
" n i x o s - h o s t : h o s t "
] ;
tokenFile = config . sops . secrets . " f o r g e j o - r u n n e r . e n v " . path ;
hostPackages = with pkgs ; [
nix
bash
coreutils
curl
gawk
gitMinimal
gnused
nodejs
gnutar # needed for cache action
wget
] ;
} ;
} ;
# environment.systemPackages = with pkgs; [nodejs];
services . caddy . virtualHosts . " g i t . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
'' ;
} ;
services . caddy . virtualHosts . " h t t p : / / g i t . b e e f c a k e . l a n " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
'' ;
} ;
}
2024-09-06 16:05:29 -05:00
{
2024-09-11 11:57:27 -05:00
services . restic . commonPaths = [
config . services . vaultwarden . backupDir
] ;
2024-09-06 16:05:29 -05:00
services . vaultwarden = {
enable = true ;
2024-09-11 11:57:27 -05:00
backupDir = " / s t o r a g e / v a u l t w a r d e n / b a c k u p s " ;
2024-09-06 16:05:29 -05:00
config = {
DOMAIN = " h t t p s : / / b w . l y t e . d e v " ;
SIGNUPS_ALLOWED = " f a l s e " ;
ROCKET_ADDRESS = " 1 2 7 . 0 . 0 . 1 " ;
ROCKET_PORT = 8222 ;
2024-09-12 11:58:24 -05:00
/*
TODO : smtp setup ?
right now , I think I configured this manually by temporarily setting ADMIN_TOKEN
and then configuring in https://bw.lyte.dev/admin
* /
2024-09-06 16:05:29 -05:00
} ;
} ;
services . caddy . virtualHosts . " b w . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . vaultwarden . config . ROCKET_PORT } '' ;
} ;
}
2024-09-06 16:44:15 -05:00
{
2024-09-11 16:04:32 -05:00
users . users . atuin = {
isSystemUser = true ;
createHome = false ;
group = " a t u i n " ;
} ;
users . extraGroups = {
" a t u i n " = { } ;
} ;
2024-09-06 16:44:15 -05:00
services . postgresql = {
ensureDatabases = [ " a t u i n " ] ;
ensureUsers = [
{
name = " a t u i n " ;
ensureDBOwnership = true ;
}
] ;
} ;
services . atuin = {
enable = true ;
database = {
createLocally = false ;
2024-09-11 11:57:27 -05:00
# NOTE: this uses postgres over the unix domain socket by default
2024-09-06 16:57:30 -05:00
# uri = "postgresql://atuin@localhost:5432/atuin";
2024-09-06 16:44:15 -05:00
} ;
openRegistration = false ;
2024-09-11 16:04:32 -05:00
# TODO: would be neat to have a way to "force" a registration on the server
} ;
systemd . services . atuin . serviceConfig = {
Group = " a t u i n " ;
User = " a t u i n " ;
2024-09-06 16:44:15 -05:00
} ;
services . caddy . virtualHosts . " a t u i n . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . atuin . port } '' ;
} ;
}
2024-09-12 11:58:24 -05:00
{
# jland minecraft server
/*
users . groups . jland = {
gid = 982 ;
} ;
users . users . jland = {
uid = 986 ;
isSystemUser = true ;
createHome = false ;
group = " j l a n d " ;
} ;
virtualisation . oci-containers . containers . minecraft-jland = {
autoStart = false ;
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
# user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}";
extraOptions = [
" - - t t y "
" - - i n t e r a c t i v e "
] ;
environment = {
EULA = " t r u e " ;
## UID = toString config.users.users.jland.uid;
## GID = toString config.users.groups.jland.gid;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 1 " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
TYPE = " F O R G E " ;
FORGE_VERSION = " 4 7 . 1 . 3 " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
MODPACK = " / d a t a / o r i g i n a t i o n - f i l e s / S e r v e r - F i l e s - 0 . 2 . 1 4 . z i p " ;
## TYPE = "AUTO_CURSEFORGE";
## CF_SLUG = "monumental-experience";
## CF_FILE_ID = "4826863"; # 2.2.53
## due to
## Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory.
## we must upload manually
## CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip";
## ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time
## May also have mod/loader incompatibilities?
## https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/
} ;
environmentFiles = [
# config.sops.secrets."jland.env".path
] ;
ports = [ " 2 6 9 6 5 : 2 5 5 6 5 " ] ;
volumes = [
" / s t o r a g e / j l a n d / d a t a : / d a t a "
" / s t o r a g e / j l a n d / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26965
] ;
}
{
# dawncraft minecraft server
systemd . tmpfiles . rules = [
" d / s t o r a g e / d a w n c r a f t / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / d a t a / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / w o r l d s / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / d o w n l o a d s / 0 7 7 0 1 0 0 0 1 0 0 0 - "
] ;
virtualisation . oci-containers . containers . minecraft-dawncraft = {
autoStart = false ;
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
extraOptions = [
" - - t t y "
" - - i n t e r a c t i v e "
] ;
environment = {
EULA = " t r u e " ;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 1 8 . 2 " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 3 2 G " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
SERVER_PORT = " 2 6 9 6 8 " ;
QUERY_PORT = " 2 6 9 6 8 " ;
TYPE = " A U T O _ C U R S E F O R G E " ;
CF_SLUG = " d a w n - c r a f t " ;
CF_EXCLUDE_MODS = " 3 6 8 3 9 8 " ;
CF_FORCE_SYNCHRONIZE = " t r u e " ;
# CF_FILE_ID = "5247696"; # 2.0.7 server
} ;
environmentFiles = [
config . sops . secrets . " d a w n c r a f t . e n v " . path
] ;
ports = [ " 2 6 9 6 8 : 2 6 9 6 8 / t c p " " 2 6 9 6 8 : 2 6 9 6 8 / u d p " ] ;
volumes = [
" / s t o r a g e / d a w n c r a f t / d a t a : / d a t a "
" / s t o r a g e / d a w n c r a f t / w o r l d s : / w o r l d s "
" / s t o r a g e / d a w n c r a f t / d o w n l o a d s : / d o w n l o a d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26968
] ;
* /
}
{
# flanilla family minecraft server
/*
users . groups . flanilla = { } ;
users . users . flanilla = {
isSystemUser = true ;
createHome = false ;
group = " f l a n i l l a " ;
} ;
virtualisation . oci-containers . containers . minecraft-flanilla = {
autoStart = true ;
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
user = " ${ toString config . users . users . flanilla . uid } : ${ toString config . users . groups . flanilla . gid } " ;
extraOptions = [ " - - t t y " " - - i n t e r a c t i v e " ] ;
environment = {
EULA = " t r u e " ;
UID = toString config . users . users . flanilla . uid ;
GID = toString config . users . groups . flanilla . gid ;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 4 " ;
OPS = " l y t e d e v " ;
MODE = " c r e a t i v e " ;
DIFFICULTY = " p e a c e f u l " ;
ONLINE_MODE = " f a l s e " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
ENABLE_COMMAND_BLOCK = " t r u e " ;
} ;
environmentFiles = [
# config.sops.secrets."flanilla.env".path
] ;
ports = [ " 2 6 9 6 6 : 2 5 5 6 5 " ] ;
volumes = [
" / s t o r a g e / f l a n i l l a / d a t a : / d a t a "
" / s t o r a g e / f l a n i l l a / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26966
] ;
* /
}
( { options , . . . }: let
/*
toml = pkgs . formats . toml { } ;
package = pkgs . kanidm ;
domain = " i d m . h . l y t e . d e v " ;
name = " k a n i d m " ;
storage = " / s t o r a g e / ${ name } " ;
cert = " ${ storage } / c e r t s / i d m . h . l y t e . d e v . c r t " ;
key = " ${ storage } / c e r t s / i d m . h . l y t e . d e v . k e y " ;
serverSettings = {
inherit domain ;
bindaddress = " 1 2 7 . 0 . 0 . 1 : 8 4 4 3 " ;
# ldapbindaddress
tls_chain = cert ;
tls_key = key ;
origin = " h t t p s : / / ${ domain } " ;
db_path = " ${ storage } / d a t a / k a n i d m . d b " ;
log_level = " i n f o " ;
online_backup = {
path = " ${ storage } / b a c k u p s / " ;
schedule = " 0 0 2 2 * * * " ;
# versions = 7;
} ;
} ;
unixdSettings = {
hsm_pin_path = " / v a r / c a c h e / ${ name } - u n i x d / h s m - p i n " ;
pam_allowed_login_groups = [ ] ;
} ;
clientSettings = {
uri = " h t t p s : / / i d m . h . l y t e . d e v " ;
} ;
user = name ;
group = name ;
serverConfigFile = toml . generate " s e r v e r . t o m l " serverSettings ;
unixdConfigFile = toml . generate " k a n i d m - u n i x d . t o m l " unixdSettings ;
clientConfigFile = toml . generate " k a n i d m - c o n f i g . t o m l " clientSettings ;
defaultServiceConfig = {
BindReadOnlyPaths = [
" / n i x / s t o r e "
" - / e t c / r e s o l v . c o n f "
" - / e t c / n s s w i t c h . c o n f "
" - / e t c / h o s t s "
" - / e t c / l o c a l t i m e "
] ;
CapabilityBoundingSet = [ ] ;
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = " " ;
# Implies ProtectSystem=strict, which re-mounts all paths
# DynamicUser = true;
LockPersonality = true ;
MemoryDenyWriteExecute = true ;
NoNewPrivileges = true ;
PrivateDevices = true ;
PrivateMounts = true ;
PrivateNetwork = true ;
PrivateTmp = true ;
PrivateUsers = true ;
ProcSubset = " p i d " ;
ProtectClock = true ;
ProtectHome = true ;
ProtectHostname = true ;
# Would re-mount paths ignored by temporary root
#ProtectSystem = "strict";
ProtectControlGroups = true ;
ProtectKernelLogs = true ;
ProtectKernelModules = true ;
ProtectKernelTunables = true ;
ProtectProc = " i n v i s i b l e " ;
RestrictAddressFamilies = [ ] ;
RestrictNamespaces = true ;
RestrictRealtime = true ;
RestrictSUIDSGID = true ;
SystemCallArchitectures = " n a t i v e " ;
SystemCallFilter = [ " @ s y s t e m - s e r v i c e " " ~ @ p r i v i l e g e d @ r e s o u r c e s @ s e t u i d @ k e y r i n g " ] ;
# Does not work well with the temporary root
#UMask = "0066";
} ;
* /
in {
# kanidm
/*
config = {
# we need a mechanism to get the certificates that caddy provisions for us
systemd . timers . " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 1 0 m " ; # 10 minutes after booting
OnUnitActiveSec = " 5 m " ; # every 5 minutes afterwards
Unit = " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y . s e r v i c e " ;
} ;
} ;
systemd . services . " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y " = {
script = ''
umask 077
install - d - m 0700 - o " ${ user } " - g " ${ group } " " ${ storage } / d a t a " " ${ storage } / c e r t s "
cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev
install - m 0700 - o " ${ user } " - g " ${ group } " idm . h . lyte . dev . key idm . h . lyte . dev . crt " ${ storage } / c e r t s "
'' ;
path = with pkgs ; [ rsync ] ;
serviceConfig = {
Type = " o n e s h o t " ;
User = " r o o t " ;
} ;
} ;
environment . systemPackages = [ package ] ;
# TODO: should I use this for /storage/kanidm/certs etc.?
systemd . tmpfiles . settings . " 1 0 - k a n i d m " = {
" ${ serverSettings . online_backup . path } " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
## "${builtins.dirOf unixdSettings.hsm_pin_path}".d = {
## user = "${user}-unixd";
## group = "${group}-unixd";
## mode = "0700";
## };
" ${ storage } / d a t a " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
" ${ storage } / c e r t s " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
} ;
users . groups = {
$ { group } = { } ;
" ${ group } - u n i x d " = { } ;
} ;
users . users . ${ user } = {
inherit group ;
description = " k a n i d m s e r v e r " ;
isSystemUser = true ;
packages = [ package ] ;
} ;
users . users . " ${ user } - u n i x d " = {
group = " ${ group } - u n i x d " ;
description = lib . mkForce " k a n i d m P A M d a e m o n " ;
isSystemUser = true ;
} ;
# the kanidm module in nixpkgs was not working for me, so I rolled my own
# loosely based off it
systemd . services . kanidm = {
enable = true ;
path = with pkgs ; [ openssl ] ++ [ package ] ;
description = " k a n i d m i d e n t i t y m a n a g e m e n t d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " ] ;
requires = [ " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y . s e r v i c e " ] ;
script = ''
pwd
ls - la
ls - laR /storage/kanidm
$ { package } /bin/kanidmd server - c $ { serverConfigFile }
'' ;
# environment.RUST_LOG = serverSettings.log_level;
serviceConfig = lib . mkMerge [
defaultServiceConfig
{
StateDirectory = name ;
StateDirectoryMode = " 0 7 0 0 " ;
RuntimeDirectory = " ${ name } d " ;
User = user ;
Group = group ;
AmbientCapabilities = [ " C A P _ N E T _ B I N D _ S E R V I C E " ] ;
CapabilityBoundingSet = [ " C A P _ N E T _ B I N D _ S E R V I C E " ] ;
PrivateUsers = lib . mkForce false ;
PrivateNetwork = lib . mkForce false ;
RestrictAddressFamilies = [ " A F _ I N E T " " A F _ I N E T 6 " " A F _ U N I X " ] ;
# TemporaryFileSystem = "/:ro";
BindReadOnlyPaths = [
" ${ storage } / c e r t s "
] ;
BindPaths = [
" ${ storage } / d a t a "
# socket
" / r u n / ${ name } d : / r u n / ${ name } d "
# backups
serverSettings . online_backup . path
] ;
}
] ;
} ;
systemd . services . kanidm-unixd = {
description = " K a n i d m P A M d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " ] ;
restartTriggers = [ unixdConfigFile clientConfigFile ] ;
serviceConfig = lib . mkMerge [
defaultServiceConfig
{
CacheDirectory = " ${ name } - u n i x d " ;
CacheDirectoryMode = " 0 7 0 0 " ;
RuntimeDirectory = " ${ name } - u n i x d " ;
ExecStart = " ${ package } / b i n / k a n i d m _ u n i x d " ;
User = " ${ user } - u n i x d " ;
Group = " ${ group } - u n i x d " ;
BindReadOnlyPaths = [
" - / e t c / k a n i d m "
" - / e t c / s t a t i c / k a n i d m "
" - / e t c / s s l "
" - / e t c / s t a t i c / s s l "
" - / e t c / p a s s w d "
" - / e t c / g r o u p "
] ;
BindPaths = [
# socket
" / r u n / k a n i d m - u n i x d : / v a r / r u n / k a n i d m - u n i x d "
] ;
# Needs to connect to kanidmd
PrivateNetwork = lib . mkForce false ;
RestrictAddressFamilies = [ " A F _ I N E T " " A F _ I N E T 6 " " A F _ U N I X " ] ;
TemporaryFileSystem = " / : r o " ;
}
] ;
environment . RUST_LOG = serverSettings . log_level ;
} ;
systemd . services . kanidm-unixd-tasks = {
description = " K a n i d m P A M h o m e m a n a g e m e n t d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " " k a n i d m - u n i x d . s e r v i c e " ] ;
partOf = [ " k a n i d m - u n i x d . s e r v i c e " ] ;
restartTriggers = [ unixdConfigFile clientConfigFile ] ;
serviceConfig = {
ExecStart = " ${ package } / b i n / k a n i d m _ u n i x d _ t a s k s " ;
BindReadOnlyPaths = [
" / n i x / s t o r e "
" - / e t c / r e s o l v . c o n f "
" - / e t c / n s s w i t c h . c o n f "
" - / e t c / h o s t s "
" - / e t c / l o c a l t i m e "
" - / e t c / k a n i d m "
" - / e t c / s t a t i c / k a n i d m "
] ;
BindPaths = [
# To manage home directories
" / h o m e "
# To connect to kanidm-unixd
" / r u n / k a n i d m - u n i x d : / v a r / r u n / k a n i d m - u n i x d "
] ;
# CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
CapabilityBoundingSet = [ " C A P _ C H O W N " " C A P _ F O W N E R " " C A P _ D A C _ O V E R R I D E " " C A P _ D A C _ R E A D _ S E A R C H " ] ;
IPAddressDeny = " a n y " ;
# Need access to users
PrivateUsers = false ;
# Need access to home directories
ProtectHome = false ;
RestrictAddressFamilies = [ " A F _ U N I X " ] ;
TemporaryFileSystem = " / : r o " ;
Restart = " o n - f a i l u r e " ;
} ;
environment . RUST_LOG = serverSettings . log_level ;
} ;
environment . etc = {
" k a n i d m / s e r v e r . t o m l " . source = serverConfigFile ;
" k a n i d m / c o n f i g " . source = clientConfigFile ;
" k a n i d m / u n i x d " . source = unixdConfigFile ;
} ;
system . nssModules = [ package ] ;
system . nssDatabases . group = [ name ] ;
system . nssDatabases . passwd = [ name ] ;
## environment.etc."kanidm/server.toml" = {
## mode = "0600";
## group = "kanidm";
## user = "kanidm";
## };
## environment.etc."kanidm/config" = {
## mode = "0600";
## group = "kanidm";
## user = "kanidm";
## };
services . caddy . virtualHosts . " i d m . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y h t t p s : / / i d m . h . l y t e . d e v : 8 4 4 3 '' ;
} ;
networking = {
extraHosts = ''
: : 1 idm . h . lyte . dev
127 .0 .0 .1 idm . h . lyte . dev
'' ;
} ;
} ;
* /
} )
2024-09-06 16:48:59 -05:00
{
systemd . tmpfiles . settings = {
" 1 0 - a u d i o b o o k s h e l f " = {
" / s t o r a g e / a u d i o b o o k s h e l f " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / a u d i o b o o k s h e l f / a u d i o b o o k s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
" / s t o r a g e / a u d i o b o o k s h e l f / p o d c a s t s " = {
" d " = {
mode = " 0 7 7 0 " ;
user = " a u d i o b o o k s h e l f " ;
group = " w h e e l " ;
} ;
} ;
} ;
} ;
2024-09-06 20:45:10 -05:00
users . groups . audiobookshelf = { } ;
users . users . audiobookshelf = {
isSystemUser = true ;
group = " a u d i o b o o k s h e l f " ;
} ;
2024-09-06 16:48:59 -05:00
services . audiobookshelf = {
enable = true ;
dataDir = " / s t o r a g e / a u d i o b o o k s h e l f " ;
port = 8523 ;
} ;
2024-09-06 20:45:10 -05:00
systemd . services . audiobookshelf . serviceConfig = {
WorkingDirectory = lib . mkForce config . services . audiobookshelf . dataDir ;
StateDirectory = lib . mkForce config . services . audiobookshelf . dataDir ;
Group = " a u d i o b o o k s h e l f " ;
User = " a u d i o b o o k s h e l f " ;
} ;
2024-09-06 16:48:59 -05:00
services . caddy . virtualHosts . " a u d i o . l y t e . d e v " = {
2024-09-06 20:45:10 -05:00
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . audiobookshelf . port } '' ;
2024-09-06 16:48:59 -05:00
} ;
}
2024-09-11 14:31:48 -05:00
{
# prometheus
services . restic . commonPaths = [
# TODO: do I want this backed up?
# "/var/lib/prometheus"
] ;
services . prometheus = {
enable = true ;
checkConfig = true ;
listenAddress = " 1 2 7 . 0 . 0 . 1 " ;
port = 9090 ;
2024-09-11 15:28:52 -05:00
scrapeConfigs = [
{
job_name = " b e e f c a k e " ;
static_configs = [
{
targets = let inherit ( config . services . prometheus . exporters . node ) port listenAddress ; in [ " ${ listenAddress } : ${ toString port } " ] ;
}
2024-09-11 16:04:32 -05:00
{
targets = let inherit ( config . services . prometheus . exporters . zfs ) port listenAddress ; in [ " ${ listenAddress } : ${ toString port } " ] ;
}
{
targets = let inherit ( config . services . prometheus . exporters . postgres ) port listenAddress ; in [ " ${ listenAddress } : ${ toString port } " ] ;
}
2024-09-11 15:28:52 -05:00
] ;
}
] ;
2024-09-11 14:31:48 -05:00
exporters = {
postgres = {
enable = true ;
2024-09-11 16:04:32 -05:00
listenAddress = " 1 2 7 . 0 . 0 . 1 " ;
runAsLocalSuperUser = true ;
2024-09-11 14:31:48 -05:00
} ;
2024-09-11 15:28:52 -05:00
node = {
enable = true ;
listenAddress = " 1 2 7 . 0 . 0 . 1 " ;
enabledCollectors = [
" s y s t e m d "
] ;
} ;
zfs = {
enable = true ;
2024-09-11 16:04:32 -05:00
listenAddress = " 1 2 7 . 0 . 0 . 1 " ;
2024-09-11 15:28:52 -05:00
} ;
2024-09-11 14:31:48 -05:00
} ;
} ;
2024-09-12 11:58:24 -05:00
/*
TODO : promtail ?
idrac exporter ?
restic exporter ?
smartctl exporter ?
systemd exporter ?
NOTE : we probably don't want this exposed
services . caddy . virtualHosts . " p r o m e t h e u s . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . prometheus . port } '' ;
} ;
* /
2024-09-11 14:31:48 -05:00
}
{
# grafana
systemd . tmpfiles . settings = {
" 1 0 - g r a f a n a " = {
" / s t o r a g e / g r a f a n a " = {
" d " = {
mode = " 0 7 5 0 " ;
2024-09-13 00:50:31 -05:00
user = " g r a f a n a " ;
group = " g r a f a n a " ;
2024-09-11 14:31:48 -05:00
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
2024-09-11 14:58:17 -05:00
" / s t o r a g e / g r a f a n a "
2024-09-11 14:31:48 -05:00
] ;
2024-09-11 14:58:17 -05:00
sops . secrets = {
grafana-admin-password = {
owner = " g r a f a n a " ;
group = " g r a f a n a " ;
mode = " 0 4 0 0 " ;
} ;
2024-09-11 15:28:52 -05:00
grafana-smtp-password = {
owner = " g r a f a n a " ;
group = " g r a f a n a " ;
mode = " 0 4 0 0 " ;
} ;
2024-09-11 14:58:17 -05:00
} ;
2024-09-11 14:31:48 -05:00
services . grafana = {
enable = true ;
dataDir = " / s t o r a g e / g r a f a n a " ;
provision = {
enable = true ;
2024-09-11 15:28:52 -05:00
datasources = {
settings = {
datasources = [
{
name = " P r o m e t h e u s " ;
type = " p r o m e t h e u s " ;
access = " p r o x y " ;
url = " h t t p : / / l o c a l h o s t : ${ toString config . services . prometheus . port } " ;
isDefault = true ;
}
] ;
} ;
} ;
2024-09-11 14:31:48 -05:00
} ;
settings = {
server = {
http_port = 3814 ;
2024-09-13 13:25:34 -05:00
root_url = " h t t p s : / / g r a f a n a . h . l y t e . d e v " ;
2024-09-11 14:31:48 -05:00
} ;
2024-09-11 15:28:52 -05:00
smtp = {
enabled = true ;
from_address = " g r a f a n a @ l y t e . d e v " ;
2024-09-11 15:29:58 -05:00
user = " g r a f a n a @ l y t e . d e v " ;
host = " s m t p . m a i l g u n . o r g : 5 8 7 " ;
2024-09-11 15:28:52 -05:00
password = '' $_ _ f i l e { ${ config . sops . secrets . grafana-smtp-password . path } } '' ;
} ;
2024-09-11 14:58:17 -05:00
security = {
admin_email = " d a n i e l @ l y t e . d e v " ;
admin_user = " l y t e d e v " ;
admin_file = '' $_ _ f i l e { ${ config . sops . secrets . grafana-admin-password . path } } '' ;
} ;
# database = {
# };
2024-09-11 14:31:48 -05:00
} ;
} ;
networking . firewall . allowedTCPPorts = [
9000
] ;
services . caddy . virtualHosts . " g r a f a n a . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . grafana . settings . server . http_port } '' ;
} ;
}
2024-09-12 23:45:03 -05:00
{
2024-09-13 00:38:04 -05:00
systemd . tmpfiles . settings = {
" 1 0 - p a p e r l e s s " = {
" / s t o r a g e / p a p e r l e s s " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " p a p e r l e s s " ;
group = " p a p e r l e s s " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
" / s t o r a g e / p a p e r l e s s "
] ;
sops . secrets . paperless-superuser-password = {
owner = " p a p e r l e s s " ;
group = " p a p e r l e s s " ;
mode = " 4 0 0 " ;
} ;
services . paperless = {
enable = true ;
package = pkgs . paperless-ngx ;
dataDir = " / s t o r a g e / p a p e r l e s s " ;
passwordFile = config . sops . secrets . paperless-superuser-password . path ;
} ;
services . caddy . virtualHosts . " p a p e r l e s s . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . paperless . port } '' ;
} ;
2024-09-12 23:45:03 -05:00
}
{
systemd . tmpfiles . settings = {
" 1 0 - a c t u a l " = {
" / s t o r a g e / a c t u a l " = {
" d " = {
mode = " 0 7 5 0 " ;
user = " r o o t " ;
group = " f a m i l y " ;
} ;
} ;
} ;
} ;
services . restic . commonPaths = [
2024-09-13 00:02:57 -05:00
" / s t o r a g e / a c t u a l "
2024-09-12 23:45:03 -05:00
] ;
virtualisation . oci-containers = {
containers . actual = {
2024-09-12 23:52:06 -05:00
image = " d o c k e r . i o / a c t u a l b u d g e t / a c t u a l - s e r v e r : 2 4 . 9 . 0 " ;
2024-09-12 23:45:03 -05:00
autoStart = true ;
ports = [ " 5 0 0 6 : 5 0 0 6 " ] ;
volumes = [ " / s t o r a g e / a c t u a l : / d a t a " ] ;
} ;
} ;
services . caddy . virtualHosts . " f i n a n c e s . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 5 0 0 6 '' ;
} ;
}
2024-03-13 21:12:14 -05:00
] ;
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
/*
TODO : non-root processes and services that access secrets need to be part of
the ' keys' group
maybe this will fix plausible ?
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
systemd . services . some-service = {
serviceConfig . SupplementaryGroups = [ config . users . groups . keys . name ] ;
} ;
or
users . users . example-user . extraGroups = [ config . users . groups . keys . name ] ;
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
TODO : declarative directory quotas ? for storage / $ USER and /home / $ USER
* /
2023-09-04 11:40:30 -05:00
2024-02-21 22:22:40 -06:00
environment . systemPackages = with pkgs ; [
2024-09-06 20:45:10 -05:00
aria2
2024-09-04 11:25:02 -05:00
restic
2024-09-04 10:31:06 -05:00
btrfs-progs
zfs
smartmontools
2024-02-21 22:22:40 -06:00
htop
bottom
2024-02-21 22:59:49 -06:00
curl
xh
2024-02-21 22:22:40 -06:00
] ;
2024-09-03 20:03:24 -05:00
2024-03-13 21:12:14 -05:00
services . tailscale . useRoutingFeatures = " s e r v e r " ;
2023-09-04 11:40:30 -05:00
2024-09-12 11:58:24 -05:00
/*
2024-03-13 21:12:14 -05:00
# https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72
2024-09-12 11:58:24 -05:00
services . lidarr = {
enable = true ;
dataDir = " / s t o r a g e / l i d a r r " ;
} ;
services . radarr = {
enable = true ;
dataDir = " / s t o r a g e / r a d a r r " ;
} ;
services . sonarr = {
enable = true ;
dataDir = " / s t o r a g e / s o n a r r " ;
} ;
services . bazarr = {
enable = true ;
listenPort = 6767 ;
} ;
networking . firewall . allowedTCPPorts = [ 9876 9877 ] ;
networking . firewall . allowedUDPPorts = [ 9876 9877 ] ;
networking . firewall . allowedUDPPortRanges = [
{
from = 27000 ;
to = 27100 ;
}
] ;
* /
2023-09-04 11:40:30 -05:00
}