2023-10-12 22:54:05 -05:00
/*
if ur fans get loud :
# enable manual fan control
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x01 0x00
# set fan speed to last byte as decimal
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x02 0xff 0x00
* /
2023-10-03 11:52:44 -05:00
{
2024-02-21 21:14:46 -06:00
# inputs,
2024-02-16 16:52:58 -06:00
# outputs,
2024-02-21 22:15:41 -06:00
lib ,
2024-02-21 20:39:10 -06:00
config ,
2023-10-03 11:52:44 -05:00
pkgs ,
. . .
2024-03-13 21:12:14 -05:00
} : {
2024-07-29 10:01:59 -05:00
system . stateVersion = " 2 2 . 0 5 " ;
home-manager . users . daniel . home . stateVersion = " 2 4 . 0 5 " ;
2024-03-13 21:12:14 -05:00
networking . hostName = " b e e f c a k e " ;
2023-11-02 13:14:43 -05:00
2024-03-13 21:12:14 -05:00
imports = [
{
# hardware
boot = {
initrd . availableKernelModules = [ " e h c i _ p c i " " m e g a r a i d _ s a s " " u s b h i d " " u a s " " s d _ m o d " ] ;
kernelModules = [ " k v m - i n t e l " ] ;
loader . systemd-boot . enable = true ;
loader . efi . canTouchEfiVariables = true ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / " = {
device = " / d e v / d i s k / b y - u u i d / 0 7 4 7 d c b a - f 5 9 0 - 4 2 e 6 - 8 9 c 8 - 6 c b 2 f 9 1 1 4 d 6 4 " ;
fsType = " e x t 4 " ;
options = [
" u s r q u o t a "
] ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / b o o t " = {
device = " / d e v / d i s k / b y - u u i d / 7 E 3 C - 9 0 1 8 " ;
fsType = " v f a t " ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / s t o r a g e " = {
device = " / d e v / d i s k / b y - u u i d / e a 8 2 5 8 d 7 - 5 4 d 1 - 4 3 0 e - 9 3 b 3 - e 1 5 d 3 3 2 3 1 0 6 3 " ;
fsType = " b t r f s " ;
options = [
" c o m p r e s s = z s t d : 5 "
" s p a c e _ c a c h e = v 2 "
] ;
} ;
}
{
# sops secrets stuff
sops = {
defaultSopsFile = ../secrets/beefcake/secrets.yml ;
age = {
sshKeyPaths = [ " / e t c / s s h / s s h _ h o s t _ e d 2 5 5 1 9 _ k e y " ] ;
keyFile = " / v a r / l i b / s o p s - n i x / k e y . t x t " ;
generateKey = true ;
} ;
secrets = {
# example-key = {
# # see these and other options' documentation here:
# # https://github.com/Mic92/sops-nix#set-secret-permissionowner-and-allow-services-to-access-it
# # set permissions:
# # mode = "0440";
# # owner = config.users.users.nobody.name;
# # group = config.users.users.nobody.group;
# # restart service when a secret changes or is newly initialized
# # restartUnits = [ "home-assistant.service" ];
# # symlink to certain directories
# path = "/var/lib/my-example-key/secrets.yaml";
# # for use as a user password
# # neededForUsers = true;
# };
# subdirectory
# "myservice/my_subdir/my_secret" = { };
" j l a n d . e n v " = {
path = " / v a r / l i b / j l a n d / j l a n d . e n v " ;
# TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook?
mode = " 0 4 4 0 " ;
2024-05-06 23:46:17 -05:00
owner = config . users . users . daniel . name ;
group = config . users . groups . daniel . name ;
} ;
" d a w n c r a f t . e n v " = {
path = " / v a r / l i b / d a w n c r a f t / d a w n c r a f t . e n v " ;
# TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook?
mode = " 0 4 4 0 " ;
owner = config . users . users . daniel . name ;
group = config . users . groups . daniel . name ;
2024-03-13 21:12:14 -05:00
} ;
plausible-admin-password = {
# TODO: path = "${config.systemd.services.plausible.serviceConfig.WorkingDirectory}/plausible-admin-password.txt";
path = " / v a r / l i b / p l a u s i b l e / p l a u s i b l e - a d m i n - p a s s w o r d " ;
mode = " 0 4 4 0 " ;
owner = config . systemd . services . plausible . serviceConfig . User ;
group = config . systemd . services . plausible . serviceConfig . Group ;
} ;
plausible-secret-key-base = {
path = " / v a r / l i b / p l a u s i b l e / p l a u s i b l e - s e c r e t - k e y - b a s e " ;
mode = " 0 4 4 0 " ;
owner = config . systemd . services . plausible . serviceConfig . User ;
group = config . systemd . services . plausible . serviceConfig . Group ;
} ;
2024-07-24 11:21:31 -05:00
nextcloud-admin-password . path = " / v a r / l i b / n e x t c l o u d / a d m i n - p a s s w o r d " ;
" f o r g e j o - r u n n e r . e n v " = { mode = " 0 4 0 0 " ; } ;
2024-03-13 21:12:14 -05:00
} ;
} ;
2024-07-24 13:10:49 -05:00
systemd . services . gitea-runner-beefcake . after = [ " s o p s - n i x . s e r v i c e " ] ;
2024-03-13 21:12:14 -05:00
}
{
2024-03-28 13:10:51 -05:00
# nix binary cache
2024-03-13 21:12:14 -05:00
services . nix-serve = {
enable = true ;
secretKeyFile = " / v a r / c a c h e - p r i v - k e y . p e m " ;
} ;
services . caddy . virtualHosts . " n i x . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . nix-serve . port }
'' ;
} ;
networking . firewall . allowedTCPPorts = [
80
443
] ;
2024-03-28 13:10:51 -05:00
# regularly build this flake so we have stuff in the cache
2024-03-28 13:12:00 -05:00
# TODO: schedule this for nightly builds instead of intervals based on boot time
2024-03-28 13:10:51 -05:00
systemd . timers . " b u i l d - l y t e d e v - f l a k e " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 3 0 m " ; # 30 minutes after booting
OnUnitActiveSec = " 1 d " ; # every day afterwards
Unit = " b u i l d - l y t e d e v - f l a k e . s e r v i c e " ;
} ;
} ;
systemd . services . " b u i l d - l y t e d e v - f l a k e " = {
script = ''
2024-05-02 10:49:25 -05:00
# build self (main server) configuration
2024-03-28 17:00:09 -05:00
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git - - accept-flake-config
2024-05-02 10:49:25 -05:00
# build desktop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #dragon --accept-flake-config
# build main laptop configuration
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git #foxtrot --accept-flake-config
2024-03-28 13:10:51 -05:00
'' ;
2024-05-02 11:03:40 -05:00
path = with pkgs ; [ openssh git nixos-rebuild ] ;
2024-03-28 13:10:51 -05:00
serviceConfig = {
2024-03-28 17:13:57 -05:00
# TODO: mkdir -p...?
WorkingDirectory = " / h o m e / d a n i e l / . h o m e / n i g h t l y - f l a k e - b u i l d s " ;
2024-03-28 13:10:51 -05:00
Type = " o n e s h o t " ;
User = " d a n i e l " ; # might have to run as me for git ssh access to the repo
} ;
} ;
2024-03-30 07:50:23 -05:00
networking = {
extraHosts = ''
: : 1 nix . h . lyte . dev
127 .0 .0 .1 nix . h . lyte . dev
2024-08-09 16:42:57 -05:00
: : 1 idm . h . lyte . dev
127 .0 .0 .1 idm . h . lyte . dev
2024-03-30 07:50:23 -05:00
'' ;
} ;
2024-03-13 21:12:14 -05:00
}
{
services . headscale = {
enable = true ;
address = " 1 2 7 . 0 . 0 . 1 " ;
port = 7777 ;
settings = {
server_url = " h t t p s : / / t a i l s c a l e . v p n . h . l y t e . d e v " ;
db_type = " s q l i t e 3 " ;
db_path = " / v a r / l i b / h e a d s c a l e / d b . s q l i t e " ;
derp . server = {
enable = true ;
region_id = 999 ;
stun_listen_addr = " 0 . 0 . 0 . 0 : 3 4 7 8 " ;
} ;
dns_config = {
magic_dns = true ;
base_domain = " v p n . h . l y t e . d e v " ;
domains = [
" t s . v p n . h . l y t e . d e v "
] ;
nameservers = [
" 1 . 1 . 1 . 1 "
# "192.168.0.1"
] ;
override_local_dns = true ;
} ;
} ;
} ;
services . caddy . virtualHosts . " t a i l s c a l e . v p n . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy http://localhost:$ { toString config . services . headscale . port }
'' ;
} ;
networking . firewall . allowedUDPPorts = [ 3478 ] ;
}
{
services . soju = {
enable = true ;
listen = [ " i r c + i n s e c u r e : / / : 6 6 6 7 " ] ;
} ;
networking . firewall . allowedTCPPorts = [
6667
] ;
}
{
# samba
users . users . guest = {
# used for anonymous samba access
isSystemUser = true ;
group = " u s e r s " ;
createHome = true ;
} ;
users . users . scannerupload = {
# used for scanner samba access
isSystemUser = true ;
group = " u s e r s " ;
createHome = true ;
} ;
systemd . tmpfiles . rules = [
" d / v a r / s p o o l / s a m b a 1 7 7 7 r o o t r o o t - "
] ;
services . samba-wsdd = {
enable = true ;
} ;
services . samba = {
enable = true ;
openFirewall = true ;
securityType = " u s e r " ;
# not needed since I don't think I use printer sharing?
# https://nixos.wiki/wiki/Samba#Printer_sharing
# package = pkgs.sambaFull; # broken last I checked in nixpkgs?
extraConfig = ''
workgroup = WORKGROUP
server string = beefcake
netbios name = beefcake
security = user
#use sendfile = yes
#max protocol = smb2
# note: localhost is the ipv6 localhost ::1
hosts allow = 100.64.0.0/10 192.168.0.0/16 127 .0 .0 .1 localhost
hosts deny = 0.0.0.0/0
guest account = guest
map to guest = never
# load printers = yes
# printing = cups
# printcap name = cups
'' ;
shares = {
libre = {
path = " / s t o r a g e / l i b r e " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " y e s " ;
" c r e a t e m a s k " = " 0 6 6 6 " ;
" d i r e c t o r y m a s k " = " 0 7 7 7 " ;
# "force user" = "nobody";
# "force group" = "users";
} ;
public = {
path = " / s t o r a g e / p u b l i c " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " y e s " ;
" c r e a t e m a s k " = " 0 6 6 4 " ;
" d i r e c t o r y m a s k " = " 0 7 7 5 " ;
# "force user" = "nobody";
# "force group" = "users";
} ;
family = {
path = " / s t o r a g e / f a m i l y " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 6 0 " ;
" d i r e c t o r y m a s k " = " 0 7 7 0 " ;
# "force user" = "nobody";
# "force group" = "family";
} ;
scannerdocs = {
path = " / s t o r a g e / s c a n n e r d o c s " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 0 0 " ;
" d i r e c t o r y m a s k " = " 0 7 0 0 " ;
" v a l i d u s e r s " = " s c a n n e r u p l o a d " ;
" f o r c e u s e r " = " s c a n n e r u p l o a d " ;
" f o r c e g r o u p " = " u s e r s " ;
} ;
daniel = {
path = " / s t o r a g e / d a n i e l " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 0 0 " ;
" d i r e c t o r y m a s k " = " 0 7 0 0 " ;
# "force user" = "daniel";
# "force group" = "users";
} ;
# printers = {
# comment = "All Printers";
# path = "/var/spool/samba";
# public = "yes";
# browseable = "yes";
# # to allow user 'guest account' to print.
# "guest ok" = "yes";
# writable = "no";
# printable = "yes";
# "create mode" = 0700;
# };
} ;
} ;
}
{
# nextcloud
# users.users.nextcloud = {
# isSystemUser = true;
# createHome = false;
# group = "nextcloud";
# };
}
{
# plausible
users . users . plausible = {
isSystemUser = true ;
createHome = false ;
group = " p l a u s i b l e " ;
} ;
users . extraGroups = {
" p l a u s i b l e " = { } ;
} ;
services . plausible = {
# TODO: enable
2024-03-29 16:25:33 -05:00
enable = true ;
2024-03-13 21:12:14 -05:00
database = {
clickhouse . setup = true ;
postgres = {
setup = false ;
dbname = " p l a u s i b l e " ;
} ;
} ;
server = {
2024-03-29 16:25:33 -05:00
baseUrl = " h t t p s : / / a . l y t e . d e v " ;
2024-03-13 21:12:14 -05:00
disableRegistration = true ;
port = 8899 ;
2024-03-29 16:25:33 -05:00
secretKeybaseFile = config . sops . secrets . plausible-secret-key-base . path ;
2024-03-13 21:12:14 -05:00
} ;
adminUser = {
activate = false ;
email = " d a n i e l @ l y t e . d e v " ;
2024-03-29 16:25:33 -05:00
passwordFile = config . sops . secrets . plausible-admin-password . path ;
2024-03-13 21:12:14 -05:00
} ;
} ;
2024-03-29 16:25:33 -05:00
systemd . services . plausible = let
cfg = config . services . plausible ;
in {
serviceConfig . User = " p l a u s i b l e " ;
serviceConfig . Group = " p l a u s i b l e " ;
# since createdb is not gated behind postgres.setup, this breaks
script = lib . mkForce ''
# Elixir does not start up if `RELEASE_COOKIE` is not set,
# even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused.
# Thus, make a random one, which should then be ignored.
export RELEASE_COOKIE = $ ( tr - dc A-Za-z0-9 < /dev/urandom | head - c 20 )
export ADMIN_USER_PWD = " $ ( < $ C R E D E N T I A L S _ D I R E C T O R Y / A D M I N _ U S E R _ P W D ) "
export SECRET_KEY_BASE = " $ ( < $ C R E D E N T I A L S _ D I R E C T O R Y / S E C R E T _ K E Y _ B A S E ) "
$ { lib . optionalString ( cfg . mail . smtp . passwordFile != null )
'' e x p o r t S M T P _ U S E R _ P W D = " $( < $C R E D E N T I A L S _ D I R E C T O R Y / S M T P _ U S E R _ P W D ) " '' }
# setup
$ {
if cfg . database . postgres . setup
then " ${ cfg . package } / c r e a t e d b . s h "
else " "
}
$ { cfg . package } /migrate.sh
export IP_GEOLOCATION_DB = $ { pkgs . dbip-country-lite } /share/dbip/dbip-country-lite.mmdb
$ { cfg . package } /bin/plausible eval " ( P l a u s i b l e . R e l e a s e . p r e p a r e ( ) ; P l a u s i b l e . A u t h . c r e a t e _ u s e r ( \" $ A D M I N _ U S E R _ N A M E \" , \" $ A D M I N _ U S E R _ E M A I L \" , \" $ A D M I N _ U S E R _ P W D \" ) ) "
$ { lib . optionalString cfg . adminUser . activate ''
psql - d plausible < < < " U P D A T E u s e r s S E T e m a i l _ v e r i f i e d = t r u e w h e r e e m a i l = ' $ A D M I N _ U S E R _ E M A I L ' ; "
'' }
exec plausible start
'' ;
} ;
2024-03-13 21:12:14 -05:00
services . caddy . virtualHosts . " a . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . plausible . server . port }
'' ;
} ;
}
2024-03-29 16:25:33 -05:00
{
# clickhouse
environment . etc = {
" c l i c k h o u s e - s e r v e r / u s e r s . d / d i s a b l e - l o g g i n g - q u e r y . x m l " = {
text = ''
<clickhouse>
<profiles>
<default>
<log_queries> 0 < /log_queries >
<log_query_threads> 0 < /log_query_threads >
< /default >
< /profiles >
< /clickhouse >
'' ;
} ;
" c l i c k h o u s e - s e r v e r / c o n f i g . d / r e d u c e - l o g g i n g . x m l " = {
text = ''
<clickhouse>
<logger>
<level> warning < /level >
<console> true < /console >
< /logger >
< query_thread_log remove = " r e m o v e " / >
< query_log remove = " r e m o v e " / >
< text_log remove = " r e m o v e " / >
< trace_log remove = " r e m o v e " / >
< metric_log remove = " r e m o v e " / >
< asynchronous_metric_log remove = " r e m o v e " / >
< session_log remove = " r e m o v e " / >
< part_log remove = " r e m o v e " / >
< /clickhouse >
'' ;
} ;
} ;
}
2024-03-13 21:12:14 -05:00
{
# daniel augments
users . groups . daniel . members = [ " d a n i e l " ] ;
users . groups . nixadmin . members = [ " d a n i e l " ] ;
users . users . daniel = {
extraGroups = [
" n i x a d m i n " # write access to /etc/nixos/ files
" w h e e l " # sudo access
" c a d d y " # write access to /storage/files.lyte.dev
" u s e r s " # general users group
" j e l l y f i n " # write access to /storage/jellyfin
2024-08-13 14:43:48 -05:00
" a u d i o b o o k s h e l f " # write access to /storage/audiobookshelf
2024-03-13 21:12:14 -05:00
" f l a n i l l a "
] ;
} ;
}
{
services . jellyfin = {
enable = true ;
openFirewall = false ;
# uses port 8096 by default, configurable from admin UI
} ;
services . caddy . virtualHosts . " v i d e o . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 8 0 9 6 '' ;
} ;
# NOTE: this server's xeon chips DO NOT seem to support quicksync or graphics in general
# but I can probably throw in a crappy GPU (or a big, cheap ebay GPU for ML
# stuff, too?) and get good transcoding performance
# jellyfin hardware encoding
2024-07-29 10:01:59 -05:00
# hardware.graphics = {
2024-03-13 21:12:14 -05:00
# enable = true;
# extraPackages = with pkgs; [
# intel-media-driver
# vaapiIntel
# vaapiVdpau
# libvdpau-va-gl
# intel-compute-runtime
# ];
# };
# nixpkgs.config.packageOverrides = pkgs: {
# vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
# };
}
{
services . postgresql = {
enable = true ;
ensureDatabases = [
" d a n i e l "
" p l a u s i b l e "
" n e x t c l o u d "
# "atuin"
] ;
ensureUsers = [
{
name = " d a n i e l " ;
ensureDBOwnership = true ;
}
{
name = " p l a u s i b l e " ;
ensureDBOwnership = true ;
}
{
name = " n e x t c l o u d " ;
ensureDBOwnership = true ;
}
# {
# name = "atuin";
# ensureDBOwnership = true;
# }
] ;
dataDir = " / s t o r a g e / p o s t g r e s " ;
enableTCPIP = true ;
package = pkgs . postgresql_15 ;
2024-03-29 16:25:33 -05:00
# https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
2024-03-13 21:12:14 -05:00
authentication = pkgs . lib . mkOverride 10 ''
2024-03-29 16:25:33 -05:00
#type database user auth-method auth-options
local all postgres peer map = superuser_map
local all daniel peer map = superuser_map
local sameuser all peer map = superuser_map
# local plausible plausible peer
# local nextcloud nextcloud peer
# local atuin atuin peer
2024-03-13 21:12:14 -05:00
# lan ipv4
2024-03-29 16:25:33 -05:00
host all daniel 192.168.0.0/16 trust
host all daniel 10.0.0.0/24 trust
2024-03-13 21:12:14 -05:00
# tailnet ipv4
2024-03-29 16:25:33 -05:00
host all daniel 100.64.0.0/10 trust
2024-03-13 21:12:14 -05:00
'' ;
identMap = ''
2024-03-29 16:25:33 -05:00
# map system_user db_user
superuser_map root postgres
superuser_map postgres postgres
superuser_map daniel postgres
2024-03-13 21:12:14 -05:00
# Let other names login as themselves
2024-03-29 16:25:33 -05:00
superuser_map / ^ ( . * ) $ \ 1
2024-03-13 21:12:14 -05:00
'' ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
services . postgresqlBackup = {
enable = true ;
backupAll = true ;
compression = " n o n e " ; # hoping for deduplication here?
location = " / s t o r a g e / p o s t g r e s - b a c k u p s " ;
startAt = " * - * - * 0 3 : 0 0 : 0 0 " ;
} ;
}
{
# friends
users . users . ben = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I K U f L Z + I X 8 5 p 9 3 5 5 P o 2 z P 1 H 2 t A x i E 0 r E 6 I Y b 8 S f + e F 9 T b e n @ b e n h a n y . c o m "
] ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
users . users . alan = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" "
] ;
} ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
networking . firewall . allowedTCPPorts = [
64022
] ;
networking . firewall . allowedUDPPorts = [
64020
] ;
}
{
# flanilla family minecraft server
users . groups . flanilla = { } ;
users . users . flanilla = {
isSystemUser = true ;
createHome = false ;
group = " f l a n i l l a " ;
} ;
}
{
# restic backups
users . users . restic = {
# used for other machines to backup to
isNormalUser = true ;
openssh . authorizedKeys . keys =
[
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I J b P q z K B 0 9 U + i 4 K q u 1 3 6 y O j f l L Z / J 7 p Y s N u l T A d 4 x 9 0 3 r o o t @ c h r o m e b o x . h . l y t e . d e v "
]
++ config . users . users . daniel . openssh . authorizedKeys . keys ;
} ;
# TODO: move previous backups over and put here
# clickhouse and plausible analytics once they're up and running?
services . restic . backups = let
defaults = {
2024-05-31 10:18:37 -05:00
passwordFile = " / r o o t / r e s t i c - r e m o t e b a c k u p - p a s s w o r d " ;
2024-03-13 21:12:14 -05:00
paths = [
" / s t o r a g e / f i l e s . l y t e . d e v "
" / s t o r a g e / d a n i e l "
2024-07-24 10:13:15 -05:00
" / s t o r a g e / f o r g e j o " # TODO: should maybe use configuration.nix's services.forgejo.dump ?
2024-03-13 21:12:14 -05:00
" / s t o r a g e / p o s t g r e s - b a c k u p s "
# https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
# specifically, https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault#sqlite-database-files
" / v a r / l i b / b i t w a r d e n _ r s " # does this need any sqlite preprocessing?
# TODO: backup *arr configs?
] ;
initialize = true ;
exclude = [ ] ;
timerConfig = {
2024-06-14 09:29:04 -05:00
OnCalendar = [ " 0 4 : 4 5 " " 1 7 : 4 5 " ] ;
2024-03-13 21:12:14 -05:00
} ;
} ;
in {
local =
defaults
// {
2024-05-31 10:18:37 -05:00
passwordFile = " / r o o t / r e s t i c - l o c a l b a c k u p - p a s s w o r d " ;
2024-03-13 21:12:14 -05:00
repository = " / s t o r a g e / b a c k u p s / l o c a l " ;
} ;
rascal =
defaults
// {
extraOptions = [
" s f t p . c o m m a n d = ' s s h b e e f c a k e @ r a s c a l - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / b e e f c a k e @ r a s c a l : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
# TODO: add ruby?
benland =
defaults
// {
extraOptions = [
" s f t p . c o m m a n d = ' s s h d a n i e l @ n . b e n h a n e y . c o m - p 1 0 0 2 2 - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / d a n i e l @ n . b e n h a n e y . c o m : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
} ;
}
{
services . caddy = {
2024-07-30 17:07:01 -05:00
# TODO: 502 and other error pages
2024-03-13 21:12:14 -05:00
enable = true ;
email = " d a n i e l @ l y t e . d e v " ;
adapter = " c a d d y f i l e " ;
virtualHosts = {
" d e v . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : 8000
'' ;
} ;
" f i l e s . l y t e . d e v " = {
# TODO: customize the files.lyte.dev template?
extraConfig = ''
2024-04-20 10:31:20 -05:00
# @options {
# method OPTIONS
# }
# @corsOrigin {
# header_regexp Origin ^https?://([a-zA-Z0-9-]+\.)*lyte\.dev$
# }
header {
Access-Control-Allow-Origin " { h t t p . r e q u e s t . h e a d e r . O r i g i n } "
Access-Control-Allow-Credentials true
Access-Control-Allow-Methods *
Access-Control-Allow-Headers *
Vary Origin
defer
}
# reverse_proxy shuwashuwa:8848 {
# header_down -Access-Control-Allow-Origin
# }
2024-03-13 21:12:14 -05:00
file_server browse {
# browse template
# hide .*
root /storage/files.lyte.dev
}
'' ;
} ;
} ;
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
} ;
networking . firewall . allowedTCPPorts = [
8000 # random development stuff
] ;
}
{
2024-07-24 10:13:15 -05:00
services . forgejo = {
2024-03-13 21:12:14 -05:00
enable = true ;
2024-07-24 10:13:15 -05:00
stateDir = " / s t o r a g e / f o r g e j o " ;
2024-03-13 21:12:14 -05:00
settings = {
2024-07-24 10:13:15 -05:00
DEFAULT = {
APP_NAME = " g i t . l y t e . d e v " ;
} ;
2024-03-13 21:12:14 -05:00
server = {
ROOT_URL = " h t t p s : / / g i t . l y t e . d e v " ;
HTTP_ADDR = " 1 2 7 . 0 . 0 . 1 " ;
HTTP_PORT = 3088 ;
DOMAIN = " g i t . l y t e . d e v " ;
} ;
actions = {
ENABLED = true ;
} ;
service = {
DISABLE_REGISTRATION = true ;
} ;
session = {
COOKIE_SECURE = true ;
} ;
log = {
# TODO: raise the log level
2024-08-07 20:17:16 -05:00
# LEVEL = "Debug";
2024-03-13 21:12:14 -05:00
} ;
ui = {
2024-07-24 12:14:01 -05:00
THEMES = " f o r g e j o - a u t o , f o r g e j o - l i g h t , f o r g e j o - d a r k , c a t p p u c c i n - m o c h a - s a p p h i r e " ;
DEFAULT_THEME = " f o r g e j o - a u t o " ;
2024-03-13 21:12:14 -05:00
} ;
2024-03-29 15:04:34 -05:00
indexer = {
REPO_INDEXER_ENABLED = " t r u e " ;
REPO_INDEXER_PATH = " i n d e x e r s / r e p o s . b l e v e " ;
MAX_FILE_SIZE = " 1 0 4 8 5 7 6 " ;
# REPO_INDEXER_INCLUDE =
REPO_INDEXER_EXCLUDE = " r e s o u r c e s / b i n / * * " ;
} ;
2024-03-13 21:12:14 -05:00
} ;
lfs = {
enable = true ;
} ;
dump = {
enable = true ;
} ;
database = {
# TODO: move to postgres?
type = " s q l i t e 3 " ;
} ;
} ;
2024-07-24 11:21:31 -05:00
services . gitea-actions-runner = {
# TODO: simple git-based automation would be dope? maybe especially for
# mirroring to github super easy?
# enable = true;
package = pkgs . forgejo-runner ;
instances . " b e e f c a k e " = {
enable = true ;
name = " b e e f c a k e " ;
url = " h t t p s : / / g i t . l y t e . d e v " ;
2024-07-24 13:05:09 -05:00
settings = {
container = {
2024-07-24 13:10:49 -05:00
# use the shared network which is bridged by default
# this lets us hit git.lyte.dev just fine
2024-07-24 13:05:09 -05:00
network = " p o d m a n " ;
} ;
} ;
2024-07-24 11:21:31 -05:00
labels = [
# type ":host" does not depend on docker/podman/lxc
" p o d m a n "
2024-07-24 12:14:01 -05:00
" n i x : d o c k e r : / / g i t . l y t e . d e v / l y t e d e v / n i x : l a t e s t "
2024-07-24 13:31:28 -05:00
" b e e f c a k e : h o s t "
2024-07-30 17:02:42 -05:00
" n i x o s - h o s t : h o s t "
2024-07-24 11:21:31 -05:00
] ;
tokenFile = config . sops . secrets . " f o r g e j o - r u n n e r . e n v " . path ;
2024-07-24 11:45:07 -05:00
hostPackages = with pkgs ; [
2024-07-24 13:37:04 -05:00
nix
2024-07-24 11:45:07 -05:00
bash
coreutils
curl
gawk
gitMinimal
gnused
2024-07-24 13:36:36 -05:00
nodejs
2024-08-06 10:33:09 -05:00
gnutar # needed for cache action
2024-07-24 11:45:07 -05:00
wget
] ;
2024-07-24 11:21:31 -05:00
} ;
} ;
2024-07-24 13:36:36 -05:00
# environment.systemPackages = with pkgs; [nodejs];
2024-03-13 21:12:14 -05:00
services . caddy . virtualHosts . " g i t . l y t e . d e v " = {
extraConfig = ''
2024-07-24 10:13:15 -05:00
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
2024-03-13 21:12:14 -05:00
'' ;
} ;
2024-07-18 15:54:50 -05:00
services . caddy . virtualHosts . " h t t p : / / g i t . b e e f c a k e . l a n " = {
2024-07-16 10:48:32 -05:00
extraConfig = ''
2024-07-24 10:13:15 -05:00
reverse_proxy : $ { toString config . services . forgejo . settings . server . HTTP_PORT }
2024-07-16 10:48:32 -05:00
'' ;
} ;
2024-03-13 21:12:14 -05:00
}
{
services . vaultwarden = {
enable = true ;
config = {
DOMAIN = " h t t p s : / / b w . l y t e . d e v " ;
SIGNUPS_ALLOWED = " f a l s e " ;
ROCKET_ADDRESS = " 1 2 7 . 0 . 0 . 1 " ;
ROCKET_PORT = 8222 ;
} ;
} ;
services . caddy . virtualHosts . " b w . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . vaultwarden . config . ROCKET_PORT } '' ;
} ;
}
{
# TODO: make the client declarative? right now I think it's manually git
# clone'd to /root
systemd . services . deno-netlify-ddns-client = {
serviceConfig . Type = " o n e s h o t " ;
path = with pkgs ; [ curl bash ] ;
environment = {
NETLIFY_DDNS_RC_FILE = " / r o o t / d e n o - n e t l i f y - d d n s - c l i e n t / . e n v " ;
} ;
script = ''
bash /root/deno-netlify-ddns-client/netlify-ddns-client.sh
'' ;
} ;
systemd . timers . deno-netlify-ddns-client = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
partOf = [ " d e n o - n e t l i f y - d d n s - c l i e n t . s e r v i c e " ] ;
timerConfig = {
OnBootSec = " 1 0 s e c " ;
OnUnitActiveSec = " 5 m i n " ;
Unit = " d e n o - n e t l i f y - d d n s - c l i e n t . s e r v i c e " ;
} ;
} ;
}
{
services . atuin = {
enable = true ;
database = {
createLocally = true ;
# uri = "postgresql://atuin@localhost:5432/atuin";
} ;
openRegistration = false ;
} ;
services . caddy . virtualHosts . " a t u i n . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . atuin . port } '' ;
} ;
}
{
# jland minecraft server
users . groups . jland = {
gid = 982 ;
} ;
users . users . jland = {
uid = 986 ;
isSystemUser = true ;
createHome = false ;
group = " j l a n d " ;
} ;
virtualisation . oci-containers . containers . minecraft-jland = {
autoStart = false ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
# user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}";
extraOptions = [
" - - t t y "
" - - i n t e r a c t i v e "
] ;
environment = {
EULA = " t r u e " ;
# UID = toString config.users.users.jland.uid;
# GID = toString config.users.groups.jland.gid;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 1 " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
TYPE = " F O R G E " ;
FORGE_VERSION = " 4 7 . 1 . 3 " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
MODPACK = " / d a t a / o r i g i n a t i o n - f i l e s / S e r v e r - F i l e s - 0 . 2 . 1 4 . z i p " ;
# TYPE = "AUTO_CURSEFORGE";
# CF_SLUG = "monumental-experience";
# CF_FILE_ID = "4826863"; # 2.2.53
# due to
# Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory.
# we must upload manually
# CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip";
# ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time
# May also have mod/loader incompatibilities?
# https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/
} ;
environmentFiles = [
# config.sops.secrets."jland.env".path
] ;
ports = [ " 2 6 9 6 5 : 2 5 5 6 5 " ] ;
volumes = [
" / s t o r a g e / j l a n d / d a t a : / d a t a "
" / s t o r a g e / j l a n d / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26965
] ;
}
2024-05-06 23:46:17 -05:00
{
# dawncraft minecraft server
systemd . tmpfiles . rules = [
" d / s t o r a g e / d a w n c r a f t / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / d a t a / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / w o r l d s / 0 7 7 0 1 0 0 0 1 0 0 0 - "
" d / s t o r a g e / d a w n c r a f t / d o w n l o a d s / 0 7 7 0 1 0 0 0 1 0 0 0 - "
] ;
virtualisation . oci-containers . containers . minecraft-dawncraft = {
2024-05-31 10:18:37 -05:00
autoStart = false ;
2024-05-06 23:46:17 -05:00
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
extraOptions = [
" - - t t y "
" - - i n t e r a c t i v e "
] ;
environment = {
EULA = " t r u e " ;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 1 8 . 2 " ;
MEMORY = " 8 G " ;
2024-05-07 01:04:52 -05:00
MAX_MEMORY = " 3 2 G " ;
2024-05-06 23:46:17 -05:00
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
SERVER_PORT = " 2 6 9 6 8 " ;
QUERY_PORT = " 2 6 9 6 8 " ;
TYPE = " A U T O _ C U R S E F O R G E " ;
CF_SLUG = " d a w n - c r a f t " ;
CF_EXCLUDE_MODS = " 3 6 8 3 9 8 " ;
CF_FORCE_SYNCHRONIZE = " t r u e " ;
# CF_FILE_ID = "5247696"; # 2.0.7 server
} ;
environmentFiles = [
config . sops . secrets . " d a w n c r a f t . e n v " . path
] ;
ports = [ " 2 6 9 6 8 : 2 6 9 6 8 / t c p " " 2 6 9 6 8 : 2 6 9 6 8 / u d p " ] ;
volumes = [
" / s t o r a g e / d a w n c r a f t / d a t a : / d a t a "
" / s t o r a g e / d a w n c r a f t / w o r l d s : / w o r l d s "
" / s t o r a g e / d a w n c r a f t / d o w n l o a d s : / d o w n l o a d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26968
] ;
}
2024-03-13 21:12:14 -05:00
{
virtualisation . oci-containers . containers . minecraft-flanilla = {
autoStart = true ;
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
user = " ${ toString config . users . users . flanilla . uid } : ${ toString config . users . groups . flanilla . gid } " ;
extraOptions = [ " - - t t y " " - - i n t e r a c t i v e " ] ;
environment = {
EULA = " t r u e " ;
UID = toString config . users . users . flanilla . uid ;
GID = toString config . users . groups . flanilla . gid ;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 4 " ;
OPS = " l y t e d e v " ;
MODE = " c r e a t i v e " ;
DIFFICULTY = " p e a c e f u l " ;
ONLINE_MODE = " f a l s e " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
ENABLE_COMMAND_BLOCK = " t r u e " ;
} ;
environmentFiles = [
# config.sops.secrets."flanilla.env".path
] ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
ports = [ " 2 6 9 6 6 : 2 5 5 6 5 " ] ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
volumes = [
" / s t o r a g e / f l a n i l l a / d a t a : / d a t a "
" / s t o r a g e / f l a n i l l a / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26966
] ;
}
2024-08-06 13:09:31 -05:00
( { options , . . . }: let
toml = pkgs . formats . toml { } ;
package = pkgs . kanidm ;
domain = " i d m . h . l y t e . d e v " ;
name = " k a n i d m " ;
storage = " / s t o r a g e / ${ name } " ;
cert = " ${ storage } / c e r t s / i d m . h . l y t e . d e v . c r t " ;
key = " ${ storage } / c e r t s / i d m . h . l y t e . d e v . k e y " ;
2024-08-09 16:42:57 -05:00
2024-08-06 13:09:31 -05:00
serverSettings = {
inherit domain ;
2024-08-09 16:42:57 -05:00
bindaddress = " 1 2 7 . 0 . 0 . 1 : 8 4 4 3 " ;
2024-08-06 13:09:31 -05:00
# ldapbindaddress
tls_chain = cert ;
tls_key = key ;
origin = " h t t p s : / / ${ domain } " ;
db_path = " ${ storage } / d a t a / k a n i d m . d b " ;
2024-08-07 20:17:16 -05:00
log_level = " i n f o " ;
2024-08-06 13:09:31 -05:00
online_backup = {
path = " ${ storage } / b a c k u p s / " ;
schedule = " 0 0 2 2 * * * " ;
# versions = 7;
} ;
} ;
2024-08-09 16:42:57 -05:00
unixdSettings = {
hsm_pin_path = " / v a r / c a c h e / ${ name } - u n i x d / h s m - p i n " ;
pam_allowed_login_groups = [ ] ;
} ;
clientSettings = {
uri = " h t t p s : / / i d m . h . l y t e . d e v " ;
} ;
2024-08-06 13:09:31 -05:00
user = name ;
group = name ;
serverConfigFile = toml . generate " s e r v e r . t o m l " serverSettings ;
2024-08-09 16:42:57 -05:00
unixdConfigFile = toml . generate " k a n i d m - u n i x d . t o m l " unixdSettings ;
clientConfigFile = toml . generate " k a n i d m - c o n f i g . t o m l " clientSettings ;
defaultServiceConfig = {
BindReadOnlyPaths = [
" / n i x / s t o r e "
" - / e t c / r e s o l v . c o n f "
" - / e t c / n s s w i t c h . c o n f "
" - / e t c / h o s t s "
" - / e t c / l o c a l t i m e "
] ;
CapabilityBoundingSet = [ ] ;
# ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = " " ;
# Implies ProtectSystem=strict, which re-mounts all paths
# DynamicUser = true;
LockPersonality = true ;
MemoryDenyWriteExecute = true ;
NoNewPrivileges = true ;
PrivateDevices = true ;
PrivateMounts = true ;
PrivateNetwork = true ;
PrivateTmp = true ;
PrivateUsers = true ;
ProcSubset = " p i d " ;
ProtectClock = true ;
ProtectHome = true ;
ProtectHostname = true ;
# Would re-mount paths ignored by temporary root
#ProtectSystem = "strict";
ProtectControlGroups = true ;
ProtectKernelLogs = true ;
ProtectKernelModules = true ;
ProtectKernelTunables = true ;
ProtectProc = " i n v i s i b l e " ;
RestrictAddressFamilies = [ ] ;
RestrictNamespaces = true ;
RestrictRealtime = true ;
RestrictSUIDSGID = true ;
SystemCallArchitectures = " n a t i v e " ;
SystemCallFilter = [ " @ s y s t e m - s e r v i c e " " ~ @ p r i v i l e g e d @ r e s o u r c e s @ s e t u i d @ k e y r i n g " ] ;
# Does not work well with the temporary root
#UMask = "0066";
} ;
2024-08-06 13:09:31 -05:00
in {
2024-08-05 20:42:50 -05:00
# kanidm
2024-08-06 13:09:31 -05:00
config = {
# we need a mechanism to get the certificates that caddy provisions for us
systemd . timers . " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 1 0 m " ; # 10 minutes after booting
OnUnitActiveSec = " 5 m " ; # every 5 minutes afterwards
Unit = " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y . s e r v i c e " ;
} ;
} ;
systemd . services . " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y " = {
script = ''
umask 077
install - d - m 0700 - o " ${ user } " - g " ${ group } " " ${ storage } / d a t a " " ${ storage } / c e r t s "
2024-08-09 16:42:57 -05:00
cd /var/lib/caddy/.local/share/caddy/certificates/acme-v02.api.letsencrypt.org-directory/idm.h.lyte.dev
install - m 0700 - o " ${ user } " - g " ${ group } " idm . h . lyte . dev . key idm . h . lyte . dev . crt " ${ storage } / c e r t s "
2024-08-06 13:09:31 -05:00
'' ;
path = with pkgs ; [ rsync ] ;
serviceConfig = {
Type = " o n e s h o t " ;
User = " r o o t " ;
2024-08-05 20:42:50 -05:00
} ;
} ;
2024-08-06 13:09:31 -05:00
environment . systemPackages = [ package ] ;
# TODO: should I use this for /storage/kanidm/certs etc.?
systemd . tmpfiles . settings . " 1 0 - k a n i d m " = {
" ${ serverSettings . online_backup . path } " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
2024-08-09 16:42:57 -05:00
# "${builtins.dirOf unixdSettings.hsm_pin_path}".d = {
# user = "${user}-unixd";
# group = "${group}-unixd";
# mode = "0700";
# };
2024-08-06 13:09:31 -05:00
" ${ storage } / d a t a " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
2024-08-06 13:59:56 -05:00
" ${ storage } / c e r t s " . d = {
inherit user group ;
mode = " 0 7 0 0 " ;
} ;
2024-08-06 10:33:09 -05:00
} ;
2024-08-06 13:09:31 -05:00
users . groups = {
$ { group } = { } ;
2024-08-09 16:42:57 -05:00
" ${ group } - u n i x d " = { } ;
2024-08-05 20:42:50 -05:00
} ;
2024-08-06 13:09:31 -05:00
users . users . ${ user } = {
inherit group ;
description = " k a n i d m s e r v e r " ;
isSystemUser = true ;
packages = [ package ] ;
} ;
2024-08-09 16:42:57 -05:00
users . users . " ${ user } - u n i x d " = {
group = " ${ group } - u n i x d " ;
2024-08-13 14:35:09 -05:00
description = lib . mkForce " k a n i d m P A M d a e m o n " ;
2024-08-09 16:42:57 -05:00
isSystemUser = true ;
} ;
2024-08-06 13:09:31 -05:00
# the kanidm module in nixpkgs was not working for me, so I rolled my own
# loosely based off it
systemd . services . kanidm = {
2024-08-06 13:59:56 -05:00
enable = true ;
2024-08-09 16:42:57 -05:00
path = with pkgs ; [ openssl ] ++ [ package ] ;
2024-08-06 13:09:31 -05:00
description = " k a n i d m i d e n t i t y m a n a g e m e n t d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " ] ;
requires = [ " c o p y - k a n i d m - c e r t i f i c a t e s - f r o m - c a d d y . s e r v i c e " ] ;
2024-08-09 16:42:57 -05:00
script = ''
pwd
ls - la
ls - laR /storage/kanidm
$ { package } /bin/kanidmd server - c $ { serverConfigFile }
'' ;
2024-08-06 13:09:31 -05:00
# environment.RUST_LOG = serverSettings.log_level;
2024-08-09 16:42:57 -05:00
serviceConfig = lib . mkMerge [
defaultServiceConfig
{
StateDirectory = name ;
StateDirectoryMode = " 0 7 0 0 " ;
RuntimeDirectory = " ${ name } d " ;
User = user ;
Group = group ;
AmbientCapabilities = [ " C A P _ N E T _ B I N D _ S E R V I C E " ] ;
CapabilityBoundingSet = [ " C A P _ N E T _ B I N D _ S E R V I C E " ] ;
PrivateUsers = lib . mkForce false ;
PrivateNetwork = lib . mkForce false ;
RestrictAddressFamilies = [ " A F _ I N E T " " A F _ I N E T 6 " " A F _ U N I X " ] ;
# TemporaryFileSystem = "/:ro";
BindReadOnlyPaths = [
" ${ storage } / c e r t s "
] ;
BindPaths = [
" ${ storage } / d a t a "
# socket
" / r u n / ${ name } d : / r u n / ${ name } d "
# backups
serverSettings . online_backup . path
] ;
}
] ;
} ;
systemd . services . kanidm-unixd = {
description = " K a n i d m P A M d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " ] ;
restartTriggers = [ unixdConfigFile clientConfigFile ] ;
serviceConfig = lib . mkMerge [
defaultServiceConfig
{
CacheDirectory = " ${ name } - u n i x d " ;
CacheDirectoryMode = " 0 7 0 0 " ;
RuntimeDirectory = " ${ name } - u n i x d " ;
ExecStart = " ${ package } / b i n / k a n i d m _ u n i x d " ;
User = " ${ user } - u n i x d " ;
Group = " ${ group } - u n i x d " ;
BindReadOnlyPaths = [
" - / e t c / k a n i d m "
" - / e t c / s t a t i c / k a n i d m "
" - / e t c / s s l "
" - / e t c / s t a t i c / s s l "
" - / e t c / p a s s w d "
" - / e t c / g r o u p "
] ;
BindPaths = [
# socket
" / r u n / k a n i d m - u n i x d : / v a r / r u n / k a n i d m - u n i x d "
] ;
# Needs to connect to kanidmd
PrivateNetwork = lib . mkForce false ;
RestrictAddressFamilies = [ " A F _ I N E T " " A F _ I N E T 6 " " A F _ U N I X " ] ;
TemporaryFileSystem = " / : r o " ;
}
] ;
environment . RUST_LOG = serverSettings . log_level ;
} ;
systemd . services . kanidm-unixd-tasks = {
description = " K a n i d m P A M h o m e m a n a g e m e n t d a e m o n " ;
wantedBy = [ " m u l t i - u s e r . t a r g e t " ] ;
after = [ " n e t w o r k . t a r g e t " " k a n i d m - u n i x d . s e r v i c e " ] ;
partOf = [ " k a n i d m - u n i x d . s e r v i c e " ] ;
restartTriggers = [ unixdConfigFile clientConfigFile ] ;
2024-08-06 13:09:31 -05:00
serviceConfig = {
2024-08-09 16:42:57 -05:00
ExecStart = " ${ package } / b i n / k a n i d m _ u n i x d _ t a s k s " ;
BindReadOnlyPaths = [
" / n i x / s t o r e "
" - / e t c / r e s o l v . c o n f "
" - / e t c / n s s w i t c h . c o n f "
" - / e t c / h o s t s "
" - / e t c / l o c a l t i m e "
" - / e t c / k a n i d m "
" - / e t c / s t a t i c / k a n i d m "
] ;
BindPaths = [
# To manage home directories
" / h o m e "
# To connect to kanidm-unixd
" / r u n / k a n i d m - u n i x d : / v a r / r u n / k a n i d m - u n i x d "
] ;
# CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
CapabilityBoundingSet = [ " C A P _ C H O W N " " C A P _ F O W N E R " " C A P _ D A C _ O V E R R I D E " " C A P _ D A C _ R E A D _ S E A R C H " ] ;
IPAddressDeny = " a n y " ;
# Need access to users
PrivateUsers = false ;
# Need access to home directories
ProtectHome = false ;
RestrictAddressFamilies = [ " A F _ U N I X " ] ;
TemporaryFileSystem = " / : r o " ;
Restart = " o n - f a i l u r e " ;
2024-08-06 13:09:31 -05:00
} ;
2024-08-09 16:42:57 -05:00
environment . RUST_LOG = serverSettings . log_level ;
} ;
environment . etc = {
" k a n i d m / s e r v e r . t o m l " . source = serverConfigFile ;
" k a n i d m / c o n f i g " . source = clientConfigFile ;
" k a n i d m / u n i x d " . source = unixdConfigFile ;
2024-08-06 13:09:31 -05:00
} ;
2024-08-09 16:42:57 -05:00
system . nssModules = [ package ] ;
system . nssDatabases . group = [ name ] ;
system . nssDatabases . passwd = [ name ] ;
2024-08-06 13:09:31 -05:00
# environment.etc."kanidm/server.toml" = {
# mode = "0600";
# group = "kanidm";
# user = "kanidm";
# };
# environment.etc."kanidm/config" = {
# mode = "0600";
# group = "kanidm";
# user = "kanidm";
# };
services . caddy . virtualHosts . " i d m . h . l y t e . d e v " = {
2024-08-09 16:42:57 -05:00
extraConfig = '' r e v e r s e _ p r o x y h t t p s : / / i d m . h . l y t e . d e v : 8 4 4 3 '' ;
} ;
networking = {
extraHosts = ''
: : 1 idm . h . lyte . dev
127 .0 .0 .1 idm . h . lyte . dev
'' ;
2024-08-06 13:09:31 -05:00
} ;
2024-08-05 20:42:50 -05:00
} ;
2024-08-06 13:09:31 -05:00
} )
2024-08-13 14:35:09 -05:00
{
services . audiobookshelf = {
enable = true ;
# dataDir = "/storage/audiobookshelf";
port = 8523 ;
} ;
services . caddy . virtualHosts . " a u d i o . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 8 5 2 3 '' ;
} ;
}
2024-03-13 21:12:14 -05:00
] ;
2023-09-04 11:40:30 -05:00
# TODO: non-root processes and services that access secrets need to be part of
# the 'keys' group
2023-10-04 21:34:20 -05:00
# maybe this will fix plausible?
2023-09-04 11:40:30 -05:00
# systemd.services.some-service = {
# serviceConfig.SupplementaryGroups = [ config.users.groups.keys.name ];
# };
# or
# users.users.example-user.extraGroups = [ config.users.groups.keys.name ];
# TODO: directory attributes for /storage subdirectories?
# example: user daniel should be able to write to /storage/files.lyte.dev and
# caddy should be able to serve it
# TODO: declarative directory quotas? for storage/$USER and /home/$USER
# TODO: would be nice to get ALL the storage stuff declared in here
# should I be using btrfs subvolumes? can I capture file ownership, perimssions, and ACLs?
2024-03-13 21:12:14 -05:00
virtualisation . oci-containers . backend = " p o d m a n " ;
2024-07-24 13:05:09 -05:00
virtualisation . podman = {
# autoPrune.enable = true;
2024-07-24 13:10:49 -05:00
# defaultNetwork.settings = {
# driver = "host";
# };
2024-07-24 13:05:09 -05:00
} ;
2024-02-21 22:22:40 -06:00
environment . systemPackages = with pkgs ; [
linuxquota
htop
bottom
2024-02-21 22:59:49 -06:00
curl
xh
2024-02-21 22:22:40 -06:00
] ;
2024-03-13 21:12:14 -05:00
services . tailscale . useRoutingFeatures = " s e r v e r " ;
2023-09-04 11:40:30 -05:00
services . openssh = {
listenAddresses = [
2023-10-03 11:52:44 -05:00
{
addr = " 0 . 0 . 0 . 0 " ;
port = 64022 ;
}
{
addr = " 0 . 0 . 0 . 0 " ;
port = 22 ;
}
2023-09-04 11:40:30 -05:00
] ;
} ;
2024-03-13 21:12:14 -05:00
# https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72
2023-11-02 13:14:43 -05:00
# services.lidarr = {
# enable = true;
# dataDir = "/storage/lidarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.radarr = {
# enable = true;
# dataDir = "/storage/radarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.sonarr = {
# enable = true;
# dataDir = "/storage/sonarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.bazarr = {
# enable = true;
# listenPort = 6767;
# };
2023-09-28 12:29:21 -05:00
2024-06-28 15:03:41 -05:00
networking . firewall . allowedTCPPorts = [ 9876 9877 ] ;
2024-05-23 09:41:53 -05:00
networking . firewall . allowedUDPPorts = [ 9876 9877 ] ;
networking . firewall . allowedUDPPortRanges = [
{
from = 27000 ;
to = 27100 ;
}
] ;
2023-09-04 11:40:30 -05:00
}