2023-10-12 22:54:05 -05:00
/*
if ur fans get loud :
# enable manual fan control
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x01 0x00
# set fan speed to last byte as decimal
sudo nix run nixpkgs #ipmitool -- raw 0x30 0x30 0x02 0xff 0x00
* /
2023-10-03 11:52:44 -05:00
{
2024-02-21 21:14:46 -06:00
# inputs,
2024-02-16 16:52:58 -06:00
# outputs,
2024-02-21 22:15:41 -06:00
lib ,
2024-02-21 22:06:36 -06:00
api-lyte-dev ,
2024-02-21 20:39:10 -06:00
config ,
2023-10-03 11:52:44 -05:00
pkgs ,
. . .
2024-03-13 21:12:14 -05:00
} : {
networking . hostName = " b e e f c a k e " ;
2023-11-02 13:14:43 -05:00
2024-03-13 21:12:14 -05:00
imports = [
../modules/nixos/intel.nix
../modules/nixos/fonts.nix
2024-02-21 22:59:49 -06:00
2024-03-13 21:12:14 -05:00
# TODO: break these modules out someday maybe?
{
# hardware
boot = {
initrd . availableKernelModules = [ " e h c i _ p c i " " m e g a r a i d _ s a s " " u s b h i d " " u a s " " s d _ m o d " ] ;
kernelModules = [ " k v m - i n t e l " ] ;
loader . systemd-boot . enable = true ;
loader . efi . canTouchEfiVariables = true ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / " = {
device = " / d e v / d i s k / b y - u u i d / 0 7 4 7 d c b a - f 5 9 0 - 4 2 e 6 - 8 9 c 8 - 6 c b 2 f 9 1 1 4 d 6 4 " ;
fsType = " e x t 4 " ;
options = [
" u s r q u o t a "
] ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / b o o t " = {
device = " / d e v / d i s k / b y - u u i d / 7 E 3 C - 9 0 1 8 " ;
fsType = " v f a t " ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
fileSystems . " / s t o r a g e " = {
device = " / d e v / d i s k / b y - u u i d / e a 8 2 5 8 d 7 - 5 4 d 1 - 4 3 0 e - 9 3 b 3 - e 1 5 d 3 3 2 3 1 0 6 3 " ;
fsType = " b t r f s " ;
options = [
" c o m p r e s s = z s t d : 5 "
" s p a c e _ c a c h e = v 2 "
] ;
} ;
}
{
# sops secrets stuff
sops = {
defaultSopsFile = ../secrets/beefcake/secrets.yml ;
age = {
sshKeyPaths = [ " / e t c / s s h / s s h _ h o s t _ e d 2 5 5 1 9 _ k e y " ] ;
keyFile = " / v a r / l i b / s o p s - n i x / k e y . t x t " ;
generateKey = true ;
} ;
secrets = {
# example-key = {
# # see these and other options' documentation here:
# # https://github.com/Mic92/sops-nix#set-secret-permissionowner-and-allow-services-to-access-it
# # set permissions:
# # mode = "0440";
# # owner = config.users.users.nobody.name;
# # group = config.users.users.nobody.group;
# # restart service when a secret changes or is newly initialized
# # restartUnits = [ "home-assistant.service" ];
# # symlink to certain directories
# path = "/var/lib/my-example-key/secrets.yaml";
# # for use as a user password
# # neededForUsers = true;
# };
# subdirectory
# "myservice/my_subdir/my_secret" = { };
" a p i . l y t e . d e v " = {
# path = "${config.services.api-lyte-dev.stateDir}/secrets.json";
# TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook?
mode = " 0 4 4 0 " ;
owner = config . services . api-lyte-dev . user ;
group = config . services . api-lyte-dev . group ;
} ;
" j l a n d . e n v " = {
path = " / v a r / l i b / j l a n d / j l a n d . e n v " ;
# TODO: would be cool to assert that it's correctly-formatted JSON? probably should be done in a pre-commit hook?
mode = " 0 4 4 0 " ;
owner = config . users . users . jland . name ;
group = config . users . groups . jland . name ;
} ;
plausible-admin-password = {
# TODO: path = "${config.systemd.services.plausible.serviceConfig.WorkingDirectory}/plausible-admin-password.txt";
path = " / v a r / l i b / p l a u s i b l e / p l a u s i b l e - a d m i n - p a s s w o r d " ;
mode = " 0 4 4 0 " ;
owner = config . systemd . services . plausible . serviceConfig . User ;
group = config . systemd . services . plausible . serviceConfig . Group ;
} ;
plausible-secret-key-base = {
path = " / v a r / l i b / p l a u s i b l e / p l a u s i b l e - s e c r e t - k e y - b a s e " ;
mode = " 0 4 4 0 " ;
owner = config . systemd . services . plausible . serviceConfig . User ;
group = config . systemd . services . plausible . serviceConfig . Group ;
} ;
nextcloud-admin-password = {
path = " / v a r / l i b / n e x t c l o u d / a d m i n - p a s s w o r d " ;
mode = " 0 4 4 0 " ;
# owner = config.services.nextcloud.serviceConfig.User;
# group = config.services.nextcloud.serviceConfig.Group;
} ;
} ;
} ;
}
{
2024-03-28 13:10:51 -05:00
# nix binary cache
2024-03-13 21:12:14 -05:00
services . nix-serve = {
enable = true ;
secretKeyFile = " / v a r / c a c h e - p r i v - k e y . p e m " ;
} ;
services . caddy . virtualHosts . " n i x . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . nix-serve . port }
'' ;
} ;
networking . firewall . allowedTCPPorts = [
80
443
] ;
2024-03-28 13:10:51 -05:00
# regularly build this flake so we have stuff in the cache
2024-03-28 13:12:00 -05:00
# TODO: schedule this for nightly builds instead of intervals based on boot time
2024-03-28 13:10:51 -05:00
systemd . timers . " b u i l d - l y t e d e v - f l a k e " = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
timerConfig = {
OnBootSec = " 3 0 m " ; # 30 minutes after booting
OnUnitActiveSec = " 1 d " ; # every day afterwards
Unit = " b u i l d - l y t e d e v - f l a k e . s e r v i c e " ;
} ;
} ;
systemd . services . " b u i l d - l y t e d e v - f l a k e " = {
script = ''
2024-03-28 17:00:09 -05:00
nixos-rebuild build - - flake git+https://git.lyte.dev/lytedev/nix.git - - accept-flake-config
2024-03-28 13:10:51 -05:00
'' ;
2024-03-28 16:58:56 -05:00
path = with pkgs ; [ git nixos-rebuild ] ;
2024-03-28 13:10:51 -05:00
serviceConfig = {
2024-03-28 17:13:57 -05:00
# TODO: mkdir -p...?
WorkingDirectory = " / h o m e / d a n i e l / . h o m e / n i g h t l y - f l a k e - b u i l d s " ;
2024-03-28 13:10:51 -05:00
Type = " o n e s h o t " ;
User = " d a n i e l " ; # might have to run as me for git ssh access to the repo
} ;
} ;
2024-03-30 07:50:23 -05:00
networking = {
extraHosts = ''
: : 1 nix . h . lyte . dev
127 .0 .0 .1 nix . h . lyte . dev
'' ;
} ;
2024-03-13 21:12:14 -05:00
}
{
services . headscale = {
enable = true ;
address = " 1 2 7 . 0 . 0 . 1 " ;
port = 7777 ;
settings = {
server_url = " h t t p s : / / t a i l s c a l e . v p n . h . l y t e . d e v " ;
db_type = " s q l i t e 3 " ;
db_path = " / v a r / l i b / h e a d s c a l e / d b . s q l i t e " ;
derp . server = {
enable = true ;
region_id = 999 ;
stun_listen_addr = " 0 . 0 . 0 . 0 : 3 4 7 8 " ;
} ;
dns_config = {
magic_dns = true ;
base_domain = " v p n . h . l y t e . d e v " ;
domains = [
" t s . v p n . h . l y t e . d e v "
] ;
nameservers = [
" 1 . 1 . 1 . 1 "
# "192.168.0.1"
] ;
override_local_dns = true ;
} ;
} ;
} ;
services . caddy . virtualHosts . " t a i l s c a l e . v p n . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy http://localhost:$ { toString config . services . headscale . port }
'' ;
} ;
networking . firewall . allowedUDPPorts = [ 3478 ] ;
}
{
services . soju = {
enable = true ;
listen = [ " i r c + i n s e c u r e : / / : 6 6 6 7 " ] ;
} ;
networking . firewall . allowedTCPPorts = [
6667
] ;
}
{
services . api-lyte-dev = rec {
enable = true ;
port = 5757 ;
stateDir = " / v a r / l i b / a p i - l y t e - d e v " ;
configFile = config . sops . secrets . " a p i . l y t e . d e v " . path ;
user = " a p i - l y t e - d e v " ;
group = user ;
} ;
systemd . services . api-lyte-dev . environment = {
RELEASE_HOST = lib . mkForce " a p i . l y t e . d e v " ;
LOG_LEVEL = " d e b u g " ;
} ;
services . caddy . virtualHosts . " a p i . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . api-lyte-dev . port }
'' ;
} ;
}
{
# samba
users . users . guest = {
# used for anonymous samba access
isSystemUser = true ;
group = " u s e r s " ;
createHome = true ;
} ;
users . users . scannerupload = {
# used for scanner samba access
isSystemUser = true ;
group = " u s e r s " ;
createHome = true ;
} ;
systemd . tmpfiles . rules = [
" d / v a r / s p o o l / s a m b a 1 7 7 7 r o o t r o o t - "
] ;
services . samba-wsdd = {
enable = true ;
} ;
services . samba = {
enable = true ;
openFirewall = true ;
securityType = " u s e r " ;
# not needed since I don't think I use printer sharing?
# https://nixos.wiki/wiki/Samba#Printer_sharing
# package = pkgs.sambaFull; # broken last I checked in nixpkgs?
extraConfig = ''
workgroup = WORKGROUP
server string = beefcake
netbios name = beefcake
security = user
#use sendfile = yes
#max protocol = smb2
# note: localhost is the ipv6 localhost ::1
hosts allow = 100.64.0.0/10 192.168.0.0/16 127 .0 .0 .1 localhost
hosts deny = 0.0.0.0/0
guest account = guest
map to guest = never
# load printers = yes
# printing = cups
# printcap name = cups
'' ;
shares = {
libre = {
path = " / s t o r a g e / l i b r e " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " y e s " ;
" c r e a t e m a s k " = " 0 6 6 6 " ;
" d i r e c t o r y m a s k " = " 0 7 7 7 " ;
# "force user" = "nobody";
# "force group" = "users";
} ;
public = {
path = " / s t o r a g e / p u b l i c " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " y e s " ;
" c r e a t e m a s k " = " 0 6 6 4 " ;
" d i r e c t o r y m a s k " = " 0 7 7 5 " ;
# "force user" = "nobody";
# "force group" = "users";
} ;
family = {
path = " / s t o r a g e / f a m i l y " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 6 0 " ;
" d i r e c t o r y m a s k " = " 0 7 7 0 " ;
# "force user" = "nobody";
# "force group" = "family";
} ;
scannerdocs = {
path = " / s t o r a g e / s c a n n e r d o c s " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 0 0 " ;
" d i r e c t o r y m a s k " = " 0 7 0 0 " ;
" v a l i d u s e r s " = " s c a n n e r u p l o a d " ;
" f o r c e u s e r " = " s c a n n e r u p l o a d " ;
" f o r c e g r o u p " = " u s e r s " ;
} ;
daniel = {
path = " / s t o r a g e / d a n i e l " ;
browseable = " y e s " ;
" r e a d o n l y " = " n o " ;
" g u e s t o k " = " n o " ;
" c r e a t e m a s k " = " 0 6 0 0 " ;
" d i r e c t o r y m a s k " = " 0 7 0 0 " ;
# "force user" = "daniel";
# "force group" = "users";
} ;
# printers = {
# comment = "All Printers";
# path = "/var/spool/samba";
# public = "yes";
# browseable = "yes";
# # to allow user 'guest account' to print.
# "guest ok" = "yes";
# writable = "no";
# printable = "yes";
# "create mode" = 0700;
# };
} ;
} ;
}
{
# nextcloud
# users.users.nextcloud = {
# isSystemUser = true;
# createHome = false;
# group = "nextcloud";
# };
}
{
# plausible
users . users . plausible = {
isSystemUser = true ;
createHome = false ;
group = " p l a u s i b l e " ;
} ;
users . extraGroups = {
" p l a u s i b l e " = { } ;
} ;
services . plausible = {
# TODO: enable
2024-03-29 16:25:33 -05:00
enable = true ;
2024-03-13 21:12:14 -05:00
database = {
clickhouse . setup = true ;
postgres = {
setup = false ;
dbname = " p l a u s i b l e " ;
} ;
} ;
server = {
2024-03-29 16:25:33 -05:00
baseUrl = " h t t p s : / / a . l y t e . d e v " ;
2024-03-13 21:12:14 -05:00
disableRegistration = true ;
port = 8899 ;
2024-03-29 16:25:33 -05:00
secretKeybaseFile = config . sops . secrets . plausible-secret-key-base . path ;
2024-03-13 21:12:14 -05:00
} ;
adminUser = {
activate = false ;
email = " d a n i e l @ l y t e . d e v " ;
2024-03-29 16:25:33 -05:00
passwordFile = config . sops . secrets . plausible-admin-password . path ;
2024-03-13 21:12:14 -05:00
} ;
} ;
2024-03-29 16:25:33 -05:00
systemd . services . plausible = let
cfg = config . services . plausible ;
in {
serviceConfig . User = " p l a u s i b l e " ;
serviceConfig . Group = " p l a u s i b l e " ;
# since createdb is not gated behind postgres.setup, this breaks
script = lib . mkForce ''
# Elixir does not start up if `RELEASE_COOKIE` is not set,
# even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused.
# Thus, make a random one, which should then be ignored.
export RELEASE_COOKIE = $ ( tr - dc A-Za-z0-9 < /dev/urandom | head - c 20 )
export ADMIN_USER_PWD = " $ ( < $ C R E D E N T I A L S _ D I R E C T O R Y / A D M I N _ U S E R _ P W D ) "
export SECRET_KEY_BASE = " $ ( < $ C R E D E N T I A L S _ D I R E C T O R Y / S E C R E T _ K E Y _ B A S E ) "
$ { lib . optionalString ( cfg . mail . smtp . passwordFile != null )
'' e x p o r t S M T P _ U S E R _ P W D = " $( < $C R E D E N T I A L S _ D I R E C T O R Y / S M T P _ U S E R _ P W D ) " '' }
# setup
$ {
if cfg . database . postgres . setup
then " ${ cfg . package } / c r e a t e d b . s h "
else " "
}
$ { cfg . package } /migrate.sh
export IP_GEOLOCATION_DB = $ { pkgs . dbip-country-lite } /share/dbip/dbip-country-lite.mmdb
$ { cfg . package } /bin/plausible eval " ( P l a u s i b l e . R e l e a s e . p r e p a r e ( ) ; P l a u s i b l e . A u t h . c r e a t e _ u s e r ( \" $ A D M I N _ U S E R _ N A M E \" , \" $ A D M I N _ U S E R _ E M A I L \" , \" $ A D M I N _ U S E R _ P W D \" ) ) "
$ { lib . optionalString cfg . adminUser . activate ''
psql - d plausible < < < " U P D A T E u s e r s S E T e m a i l _ v e r i f i e d = t r u e w h e r e e m a i l = ' $ A D M I N _ U S E R _ E M A I L ' ; "
'' }
exec plausible start
'' ;
} ;
2024-03-13 21:12:14 -05:00
services . caddy . virtualHosts . " a . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . plausible . server . port }
'' ;
} ;
}
2024-03-29 16:25:33 -05:00
{
# clickhouse
environment . etc = {
" c l i c k h o u s e - s e r v e r / u s e r s . d / d i s a b l e - l o g g i n g - q u e r y . x m l " = {
text = ''
<clickhouse>
<profiles>
<default>
<log_queries> 0 < /log_queries >
<log_query_threads> 0 < /log_query_threads >
< /default >
< /profiles >
< /clickhouse >
'' ;
} ;
" c l i c k h o u s e - s e r v e r / c o n f i g . d / r e d u c e - l o g g i n g . x m l " = {
text = ''
<clickhouse>
<logger>
<level> warning < /level >
<console> true < /console >
< /logger >
< query_thread_log remove = " r e m o v e " / >
< query_log remove = " r e m o v e " / >
< text_log remove = " r e m o v e " / >
< trace_log remove = " r e m o v e " / >
< metric_log remove = " r e m o v e " / >
< asynchronous_metric_log remove = " r e m o v e " / >
< session_log remove = " r e m o v e " / >
< part_log remove = " r e m o v e " / >
< /clickhouse >
'' ;
} ;
} ;
}
2024-03-13 21:12:14 -05:00
{
# daniel augments
users . groups . daniel . members = [ " d a n i e l " ] ;
users . groups . nixadmin . members = [ " d a n i e l " ] ;
users . users . daniel = {
packages = [ pkgs . weechat ] ;
extraGroups = [
" n i x a d m i n " # write access to /etc/nixos/ files
" w h e e l " # sudo access
" c a d d y " # write access to /storage/files.lyte.dev
" u s e r s " # general users group
" j e l l y f i n " # write access to /storage/jellyfin
" j l a n d "
" f l a n i l l a "
] ;
} ;
}
{
services . jellyfin = {
enable = true ;
openFirewall = false ;
# uses port 8096 by default, configurable from admin UI
} ;
services . caddy . virtualHosts . " v i d e o . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : 8 0 9 6 '' ;
} ;
# NOTE: this server's xeon chips DO NOT seem to support quicksync or graphics in general
# but I can probably throw in a crappy GPU (or a big, cheap ebay GPU for ML
# stuff, too?) and get good transcoding performance
# jellyfin hardware encoding
# hardware.opengl = {
# enable = true;
# extraPackages = with pkgs; [
# intel-media-driver
# vaapiIntel
# vaapiVdpau
# libvdpau-va-gl
# intel-compute-runtime
# ];
# };
# nixpkgs.config.packageOverrides = pkgs: {
# vaapiIntel = pkgs.vaapiIntel.override { enableHybridCodec = true; };
# };
}
{
services . postgresql = {
enable = true ;
ensureDatabases = [
" d a n i e l "
" p l a u s i b l e "
" n e x t c l o u d "
# "atuin"
] ;
ensureUsers = [
{
name = " d a n i e l " ;
ensureDBOwnership = true ;
}
{
name = " p l a u s i b l e " ;
ensureDBOwnership = true ;
}
{
name = " n e x t c l o u d " ;
ensureDBOwnership = true ;
}
# {
# name = "atuin";
# ensureDBOwnership = true;
# }
] ;
dataDir = " / s t o r a g e / p o s t g r e s " ;
enableTCPIP = true ;
package = pkgs . postgresql_15 ;
2024-03-29 16:25:33 -05:00
# https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
2024-03-13 21:12:14 -05:00
authentication = pkgs . lib . mkOverride 10 ''
2024-03-29 16:25:33 -05:00
#type database user auth-method auth-options
local all postgres peer map = superuser_map
local all daniel peer map = superuser_map
local sameuser all peer map = superuser_map
# local plausible plausible peer
# local nextcloud nextcloud peer
# local atuin atuin peer
2024-03-13 21:12:14 -05:00
# lan ipv4
2024-03-29 16:25:33 -05:00
host all daniel 192.168.0.0/16 trust
host all daniel 10.0.0.0/24 trust
2024-03-13 21:12:14 -05:00
# tailnet ipv4
2024-03-29 16:25:33 -05:00
host all daniel 100.64.0.0/10 trust
2024-03-13 21:12:14 -05:00
'' ;
identMap = ''
2024-03-29 16:25:33 -05:00
# map system_user db_user
superuser_map root postgres
superuser_map postgres postgres
superuser_map daniel postgres
2024-03-13 21:12:14 -05:00
# Let other names login as themselves
2024-03-29 16:25:33 -05:00
superuser_map / ^ ( . * ) $ \ 1
2024-03-13 21:12:14 -05:00
'' ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
services . postgresqlBackup = {
enable = true ;
backupAll = true ;
compression = " n o n e " ; # hoping for deduplication here?
location = " / s t o r a g e / p o s t g r e s - b a c k u p s " ;
startAt = " * - * - * 0 3 : 0 0 : 0 0 " ;
} ;
}
{
# friends
users . users . ben = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I K U f L Z + I X 8 5 p 9 3 5 5 P o 2 z P 1 H 2 t A x i E 0 r E 6 I Y b 8 S f + e F 9 T b e n @ b e n h a n y . c o m "
] ;
} ;
2023-09-05 21:46:55 -05:00
2024-03-13 21:12:14 -05:00
users . users . alan = {
isNormalUser = true ;
packages = [ pkgs . vim ] ;
openssh . authorizedKeys . keys = [
" "
] ;
} ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
networking . firewall . allowedTCPPorts = [
64022
] ;
networking . firewall . allowedUDPPorts = [
64020
] ;
}
{
# flanilla family minecraft server
users . groups . flanilla = { } ;
users . users . flanilla = {
isSystemUser = true ;
createHome = false ;
group = " f l a n i l l a " ;
} ;
}
{
# restic backups
users . users . restic = {
# used for other machines to backup to
isNormalUser = true ;
openssh . authorizedKeys . keys =
[
" s s h - e d 2 5 5 1 9 A A A A C 3 N z a C 1 l Z D I 1 N T E 5 A A A A I J b P q z K B 0 9 U + i 4 K q u 1 3 6 y O j f l L Z / J 7 p Y s N u l T A d 4 x 9 0 3 r o o t @ c h r o m e b o x . h . l y t e . d e v "
]
++ config . users . users . daniel . openssh . authorizedKeys . keys ;
} ;
# TODO: move previous backups over and put here
# clickhouse and plausible analytics once they're up and running?
services . restic . backups = let
defaults = {
passwordFile = " / r o o t / r e s t i c - l o c a l b a c k u p - p a s s w o r d " ;
paths = [
" / s t o r a g e / f i l e s . l y t e . d e v "
" / s t o r a g e / d a n i e l "
" / s t o r a g e / g i t e a " # TODO: should maybe use configuration.nix's services.gitea.dump ?
" / s t o r a g e / p o s t g r e s - b a c k u p s "
# https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
# specifically, https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault#sqlite-database-files
" / v a r / l i b / b i t w a r d e n _ r s " # does this need any sqlite preprocessing?
# TODO: backup *arr configs?
] ;
initialize = true ;
exclude = [ ] ;
timerConfig = {
OnCalendar = " 0 4 : 4 5 " ;
} ;
} ;
in {
local =
defaults
// {
repository = " / s t o r a g e / b a c k u p s / l o c a l " ;
} ;
rascal =
defaults
// {
extraOptions = [
" s f t p . c o m m a n d = ' s s h b e e f c a k e @ r a s c a l - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / b e e f c a k e @ r a s c a l : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
# TODO: add ruby?
benland =
defaults
// {
extraOptions = [
" s f t p . c o m m a n d = ' s s h d a n i e l @ n . b e n h a n e y . c o m - p 1 0 0 2 2 - i / r o o t / . s s h / i d _ e d 2 5 5 1 9 - s s f t p ' "
] ;
repository = " s f t p : / / d a n i e l @ n . b e n h a n e y . c o m : / / s t o r a g e / b a c k u p s / b e e f c a k e " ;
} ;
} ;
}
{
services . caddy = {
enable = true ;
email = " d a n i e l @ l y t e . d e v " ;
adapter = " c a d d y f i l e " ;
virtualHosts = {
" d e v . h . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : 8000
'' ;
} ;
" f i l e s . l y t e . d e v " = {
# TODO: customize the files.lyte.dev template?
extraConfig = ''
2024-04-20 10:31:20 -05:00
# @options {
# method OPTIONS
# }
# @corsOrigin {
# header_regexp Origin ^https?://([a-zA-Z0-9-]+\.)*lyte\.dev$
# }
header {
Access-Control-Allow-Origin " { h t t p . r e q u e s t . h e a d e r . O r i g i n } "
Access-Control-Allow-Credentials true
Access-Control-Allow-Methods *
Access-Control-Allow-Headers *
Vary Origin
defer
}
# reverse_proxy shuwashuwa:8848 {
# header_down -Access-Control-Allow-Origin
# }
2024-03-13 21:12:14 -05:00
file_server browse {
# browse template
# hide .*
root /storage/files.lyte.dev
}
'' ;
} ;
} ;
# acmeCA = "https://acme-staging-v02.api.letsencrypt.org/directory";
} ;
networking . firewall . allowedTCPPorts = [
8000 # random development stuff
] ;
}
{
services . gitea = {
enable = true ;
appName = " g i t . l y t e . d e v " ;
stateDir = " / s t o r a g e / g i t e a " ;
settings = {
server = {
ROOT_URL = " h t t p s : / / g i t . l y t e . d e v " ;
HTTP_ADDR = " 1 2 7 . 0 . 0 . 1 " ;
HTTP_PORT = 3088 ;
DOMAIN = " g i t . l y t e . d e v " ;
} ;
actions = {
ENABLED = true ;
} ;
service = {
DISABLE_REGISTRATION = true ;
} ;
session = {
COOKIE_SECURE = true ;
} ;
log = {
# TODO: raise the log level
LEVEL = " D e b u g " ;
} ;
ui = {
THEMES = " c a t p p u c c i n - m o c h a - s a p p h i r e , g i t e a , a r c - g r e e n , a u t o , p i t c h b l a c k " ;
DEFAULT_THEME = " c a t p p u c c i n - m o c h a - s a p p h i r e " ;
} ;
2024-03-29 15:04:34 -05:00
indexer = {
REPO_INDEXER_ENABLED = " t r u e " ;
REPO_INDEXER_PATH = " i n d e x e r s / r e p o s . b l e v e " ;
MAX_FILE_SIZE = " 1 0 4 8 5 7 6 " ;
# REPO_INDEXER_INCLUDE =
REPO_INDEXER_EXCLUDE = " r e s o u r c e s / b i n / * * " ;
} ;
2024-03-13 21:12:14 -05:00
} ;
lfs = {
enable = true ;
} ;
dump = {
enable = true ;
} ;
database = {
# TODO: move to postgres?
type = " s q l i t e 3 " ;
} ;
} ;
# services.gitea-actions-runner.instances.main = {
# # TODO: simple git-based automation would be dope? maybe especially for
# # mirroring to github super easy?
# enable = false;
# };
services . caddy . virtualHosts . " g i t . l y t e . d e v " = {
extraConfig = ''
reverse_proxy : $ { toString config . services . gitea . settings . server . HTTP_PORT }
'' ;
} ;
}
{
services . vaultwarden = {
enable = true ;
config = {
DOMAIN = " h t t p s : / / b w . l y t e . d e v " ;
SIGNUPS_ALLOWED = " f a l s e " ;
ROCKET_ADDRESS = " 1 2 7 . 0 . 0 . 1 " ;
ROCKET_PORT = 8222 ;
} ;
} ;
services . caddy . virtualHosts . " b w . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . vaultwarden . config . ROCKET_PORT } '' ;
} ;
}
{
# TODO: make the client declarative? right now I think it's manually git
# clone'd to /root
systemd . services . deno-netlify-ddns-client = {
serviceConfig . Type = " o n e s h o t " ;
path = with pkgs ; [ curl bash ] ;
environment = {
NETLIFY_DDNS_RC_FILE = " / r o o t / d e n o - n e t l i f y - d d n s - c l i e n t / . e n v " ;
} ;
script = ''
bash /root/deno-netlify-ddns-client/netlify-ddns-client.sh
'' ;
} ;
systemd . timers . deno-netlify-ddns-client = {
wantedBy = [ " t i m e r s . t a r g e t " ] ;
partOf = [ " d e n o - n e t l i f y - d d n s - c l i e n t . s e r v i c e " ] ;
timerConfig = {
OnBootSec = " 1 0 s e c " ;
OnUnitActiveSec = " 5 m i n " ;
Unit = " d e n o - n e t l i f y - d d n s - c l i e n t . s e r v i c e " ;
} ;
} ;
}
{
services . atuin = {
enable = true ;
database = {
createLocally = true ;
# uri = "postgresql://atuin@localhost:5432/atuin";
} ;
openRegistration = false ;
} ;
services . caddy . virtualHosts . " a t u i n . h . l y t e . d e v " = {
extraConfig = '' r e v e r s e _ p r o x y : ${ toString config . services . atuin . port } '' ;
} ;
}
{
# jland minecraft server
users . groups . jland = {
gid = 982 ;
} ;
users . users . jland = {
uid = 986 ;
isSystemUser = true ;
createHome = false ;
group = " j l a n d " ;
} ;
virtualisation . oci-containers . containers . minecraft-jland = {
autoStart = false ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
# sending commands: https://docker-minecraft-server.readthedocs.io/en/latest/commands/
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
# user = "${toString config.users.users.jland.uid}:${toString config.users.groups.jland.gid}";
extraOptions = [
" - - t t y "
" - - i n t e r a c t i v e "
] ;
environment = {
EULA = " t r u e " ;
# UID = toString config.users.users.jland.uid;
# GID = toString config.users.groups.jland.gid;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 1 " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
TYPE = " F O R G E " ;
FORGE_VERSION = " 4 7 . 1 . 3 " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
MODPACK = " / d a t a / o r i g i n a t i o n - f i l e s / S e r v e r - F i l e s - 0 . 2 . 1 4 . z i p " ;
# TYPE = "AUTO_CURSEFORGE";
# CF_SLUG = "monumental-experience";
# CF_FILE_ID = "4826863"; # 2.2.53
# due to
# Nov 02 13:45:22 beefcake minecraft-jland[2738672]: me.itzg.helpers.errors.GenericException: The modpack authors have indicated this file is not allowed for project distribution. Please download the client zip file from https://www.curseforge.com/minecraft/modpacks/monumental-experience and pass via CF_MODPACK_ZIP environment variable or place indownloads repo directory.
# we must upload manually
# CF_MODPACK_ZIP = "/data/origination-files/Monumental+Experience-2.2.53.zip";
# ENABLE_AUTOPAUSE = "true"; # TODO: must increate or disable max-tick-time
# May also have mod/loader incompatibilities?
# https://docker-minecraft-server.readthedocs.io/en/latest/misc/autopause-autostop/autopause/
} ;
environmentFiles = [
# config.sops.secrets."jland.env".path
] ;
ports = [ " 2 6 9 6 5 : 2 5 5 6 5 " ] ;
volumes = [
" / s t o r a g e / j l a n d / d a t a : / d a t a "
" / s t o r a g e / j l a n d / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26965
] ;
}
{
virtualisation . oci-containers . containers . minecraft-flanilla = {
autoStart = true ;
image = " d o c k e r . i o / i t z g / m i n e c r a f t - s e r v e r " ;
user = " ${ toString config . users . users . flanilla . uid } : ${ toString config . users . groups . flanilla . gid } " ;
extraOptions = [ " - - t t y " " - - i n t e r a c t i v e " ] ;
environment = {
EULA = " t r u e " ;
UID = toString config . users . users . flanilla . uid ;
GID = toString config . users . groups . flanilla . gid ;
STOP_SERVER_ANNOUNCE_DELAY = " 2 0 " ;
TZ = " A m e r i c a / C h i c a g o " ;
VERSION = " 1 . 2 0 . 4 " ;
OPS = " l y t e d e v " ;
MODE = " c r e a t i v e " ;
DIFFICULTY = " p e a c e f u l " ;
ONLINE_MODE = " f a l s e " ;
MEMORY = " 8 G " ;
MAX_MEMORY = " 1 6 G " ;
ALLOW_FLIGHT = " t r u e " ;
ENABLE_QUERY = " t r u e " ;
ENABLE_COMMAND_BLOCK = " t r u e " ;
} ;
environmentFiles = [
# config.sops.secrets."flanilla.env".path
] ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
ports = [ " 2 6 9 6 6 : 2 5 5 6 5 " ] ;
2023-09-04 11:40:30 -05:00
2024-03-13 21:12:14 -05:00
volumes = [
" / s t o r a g e / f l a n i l l a / d a t a : / d a t a "
" / s t o r a g e / f l a n i l l a / w o r l d s : / w o r l d s "
] ;
} ;
networking . firewall . allowedTCPPorts = [
26966
] ;
}
api-lyte-dev . nixosModules . api-lyte-dev
] ;
2023-09-04 11:40:30 -05:00
# TODO: non-root processes and services that access secrets need to be part of
# the 'keys' group
2023-10-04 21:34:20 -05:00
# maybe this will fix plausible?
2023-09-04 11:40:30 -05:00
# systemd.services.some-service = {
# serviceConfig.SupplementaryGroups = [ config.users.groups.keys.name ];
# };
# or
# users.users.example-user.extraGroups = [ config.users.groups.keys.name ];
# TODO: directory attributes for /storage subdirectories?
# example: user daniel should be able to write to /storage/files.lyte.dev and
# caddy should be able to serve it
# TODO: declarative directory quotas? for storage/$USER and /home/$USER
# TODO: would be nice to get ALL the storage stuff declared in here
# should I be using btrfs subvolumes? can I capture file ownership, perimssions, and ACLs?
2024-03-13 21:12:14 -05:00
virtualisation . oci-containers . backend = " p o d m a n " ;
2024-02-21 22:22:40 -06:00
environment . systemPackages = with pkgs ; [
linuxquota
htop
bottom
2024-02-21 22:59:49 -06:00
curl
xh
2024-02-21 22:22:40 -06:00
] ;
2024-03-13 21:12:14 -05:00
services . tailscale . useRoutingFeatures = " s e r v e r " ;
2023-09-04 11:40:30 -05:00
services . openssh = {
listenAddresses = [
2023-10-03 11:52:44 -05:00
{
addr = " 0 . 0 . 0 . 0 " ;
port = 64022 ;
}
{
addr = " 0 . 0 . 0 . 0 " ;
port = 22 ;
}
2023-09-04 11:40:30 -05:00
] ;
} ;
2024-03-13 21:12:14 -05:00
# https://github.com/NixOS/nixpkgs/blob/04af42f3b31dba0ef742d254456dc4c14eedac86/nixos/modules/services/misc/lidarr.nix#L72
2023-11-02 13:14:43 -05:00
# services.lidarr = {
# enable = true;
# dataDir = "/storage/lidarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.radarr = {
# enable = true;
# dataDir = "/storage/radarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.sonarr = {
# enable = true;
# dataDir = "/storage/sonarr";
# };
2023-09-28 12:29:21 -05:00
2023-11-02 13:14:43 -05:00
# services.bazarr = {
# enable = true;
# listenPort = 6767;
# };
2023-09-28 12:29:21 -05:00
2024-03-13 21:12:14 -05:00
home-manager . users . daniel . home . stateVersion = " 2 4 . 0 5 " ;
2023-09-06 00:57:08 -05:00
system . stateVersion = " 2 2 . 0 5 " ;
2023-09-04 11:40:30 -05:00
}