This commit is contained in:
BOTAlex 2026-04-01 22:53:54 +02:00
parent 38d26110e1
commit f2bb1de7d8
15 changed files with 505 additions and 87 deletions

View file

@ -1,12 +1,12 @@
{ pkgs, ... }: { { pkgs, ... }:
{
programs.fish = { programs.fish = {
enable = true; enable = true;
shellAliases = { shellAliases = {
nrb = "sudo nixos-rebuild switch --flake /etc/nixos --impure"; nrb = "sudo nixos-rebuild switch --flake /etc/nixos --impure";
ni = "nvim /etc/nixos/configuration.nix"; ni = "nvim /etc/nixos/configuration.nix";
bat = bat = "upower -i /org/freedesktop/UPower/devices/battery_BAT0| grep -E 'state|percentage'";
"upower -i /org/freedesktop/UPower/devices/battery_BAT0| grep -E 'state|percentage'";
gpu = "nvidia-smi -q | grep -i 'draw.*W'"; gpu = "nvidia-smi -q | grep -i 'draw.*W'";
wifi = "sudo nmtui"; wifi = "sudo nmtui";
all = "sudo chmod -R a+rwx ./*"; all = "sudo chmod -R a+rwx ./*";
@ -20,15 +20,13 @@
fed = "nvim flake.nix"; fed = "nvim flake.nix";
cdn = "cd /etc/nixos"; cdn = "cd /etc/nixos";
snorre = "ssh bot@spoodythe.one"; snorre = "ssh bot@spoodythe.one";
kube-vm = kube-vm = "ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' root@10.0.0.3";
"ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' root@10.0.0.3"; kube-vm2 = "ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' -p 2223 root@localhost";
kube-vm2 = kube-daddy = "ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' root@10.0.0.2";
"ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' -p 2223 root@localhost"; cpu = "sudo turbostat --quiet --show PkgWatt --interval 1 --num_iterations 1 | awk 'NR==2{print $1}'";
kube-daddy =
"ssh -o 'UserKnownHostsFile=/dev/null' -o 'StrictHostKeyChecking=no' root@10.0.0.2";
cpu =
"sudo turbostat --quiet --show PkgWatt --interval 1 --num_iterations 1 | awk 'NR==2{print $1}'";
r = "nix run"; r = "nix run";
wipe = "sudo rm -fr /var/lib/microvms/kube-* || sudo rm -fr /var/lib/microvms/shared/kube";
wg-keys = "wg genkey > privatekey && wg pubkey < privatekey > publickey";
}; };

View file

@ -2,7 +2,13 @@
# your system. Help is available in the configuration.nix(5) man page # your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help). # and in the NixOS manual (accessible by running nixos-help).
{ config, pkgs, lib, inputs, ... }: {
config,
pkgs,
lib,
inputs,
...
}:
{ {
imports = [ imports = [
@ -25,9 +31,12 @@
./networking/caddy.nix ./networking/caddy.nix
./modules/buildCache.nix ./modules/buildCache.nix
./vms/kube-vm ./modules/nfs.nix
# ./vms/kube-vm
# ./vms/kube-vm2 # ./vms/kube-vm2
./vms/kube-daddy ./vms/kube-daddy
# ./networking/wireguard-kube.nix
# ./modules/de.nix # ./modules/de.nix
./modules/displayOff.nix ./modules/displayOff.nix
@ -76,11 +85,17 @@
settings = { settings = {
nix-path = lib.mapAttrsToList (n: _: "${n}=flake:${n}") inputs; nix-path = lib.mapAttrsToList (n: _: "${n}=flake:${n}") inputs;
flake-registry = ""; # optional, ensures flakes are truly self-contained flake-registry = ""; # optional, ensures flakes are truly self-contained
experimental-features = [ "nix-command" "flakes" "pipe-operators" ]; experimental-features = [
"nix-command"
"flakes"
"pipe-operators"
];
}; };
}; };
services.openssh = { enable = true; }; services.openssh = {
enable = true;
};
programs.neovim = { programs.neovim = {
enable = true; enable = true;
@ -90,14 +105,18 @@
home-manager = { home-manager = {
extraSpecialArgs = { inherit inputs; }; extraSpecialArgs = { inherit inputs; };
users = { "botserver" = import ./home.nix; }; users = {
"botserver" = import ./home.nix;
};
}; };
# Root uses the exact same module # Root uses the exact same module
home-manager.users.root = { pkgs, ... }: { home-manager.users.root =
home.stateVersion = "24.05"; { pkgs, ... }:
imports = [ ./modules/nvim.nix ]; {
}; home.stateVersion = "24.05";
imports = [ ./modules/nvim.nix ];
};
# Configure keymap in X11 # Configure keymap in X11
services.xserver.xkb = { services.xserver.xkb = {

View file

@ -0,0 +1,33 @@
{ pkgs, lib, fetchFromGitHub, wireguard-tools, makeWrapper, stdenv, ... }:
let
version = "1.1";
wgmesh-unwrapped = pkgs.buildGoModule rec {
name = "wgmesh-unwrapped";
inherit version;
src = fetchFromGitHub {
owner = "Dan-J-D";
repo = "wgmesh";
tag = "v${version}";
hash = "sha256-7CXTyvCD4ywRZE0xTc3BbU6Ze72KQ2Q25qHl3LjBO28=";
};
vendorHash = "sha256-JGaaQ+y+hbO5eBm51Wxj8u8AMdfXN9pKWIdYxPr2Ix8=";
meta.mainProgram = "wgmesh";
};
binPath = lib.makeBinPath [ wireguard-tools ];
in stdenv.mkDerivation rec {
name = "wgmesh";
inherit version;
nativeBuildInputs = [ makeWrapper ];
buildCommand = ''
makeWrapper ${lib.getExe wgmesh-unwrapped} $out/bin/${name} \
--suffix-each PATH ':' "${binPath}"
'';
meta.mainProgram = "wgmesh";
}

View file

@ -1,6 +1,8 @@
{pkgs, ...} : { { pkgs, ... }:
{
programs.fish.enable = true; programs.fish.enable = true;
documentation.man.generateCaches = false; documentation.man.generateCaches = false;
users.users."botserver".shell = pkgs.fish; users.users."botserver".shell = pkgs.fish;
users.users.root.shell = pkgs.fish;
} }

12
modules/nfs.nix Normal file
View file

@ -0,0 +1,12 @@
{ pkgs, ... }:
{
fileSystems."/export/mafuyu" = {
device = "/kube-store";
options = [ "bind" ];
};
services.nfs.server.enable = true;
services.nfs.server.exports = ''
/export 10.0.0.0/42(rw,fsid=0,no_subtree_check)
'';
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
with lib; with lib;
let let
@ -19,6 +24,7 @@ let
zstd zstd
glib glib
libcxx libcxx
lua
]; ];
makePkgConfigPath = x: makeSearchPathOutput "dev" "lib/pkgconfig" x; makePkgConfigPath = x: makeSearchPathOutput "dev" "lib/pkgconfig" x;
@ -51,17 +57,21 @@ let
"NIX_LD_LIBRARY_PATH=${config.home.profileDirectory}/lib/nvim-depends/lib" "NIX_LD_LIBRARY_PATH=${config.home.profileDirectory}/lib/nvim-depends/lib"
"PKG_CONFIG_PATH=${config.home.profileDirectory}/lib/nvim-depends/pkgconfig" "PKG_CONFIG_PATH=${config.home.profileDirectory}/lib/nvim-depends/pkgconfig"
]; ];
in { in
{
home.packages = with pkgs; [ home.packages = with pkgs; [
patchelf patchelf
nvim-depends-include nvim-depends-include
nvim-depends-library nvim-depends-library
nvim-depends-pkgconfig nvim-depends-pkgconfig
ripgrep ripgrep
lua
]; ];
home.extraOutputsToInstall = [ "nvim-depends" ]; home.extraOutputsToInstall = [ "nvim-depends" ];
home.shellAliases.nvim = (concatStringsSep " " buildEnv) home.shellAliases.nvim =
+ " SQLITE_CLIB_PATH=${pkgs.sqlite.out}/lib/libsqlite3.so " + "nvim"; (concatStringsSep " " buildEnv)
+ " SQLITE_CLIB_PATH=${pkgs.sqlite.out}/lib/libsqlite3.so "
+ "nvim";
programs.neovim = { programs.neovim = {
enable = true; enable = true;
@ -84,6 +94,8 @@ in {
yarn yarn
texlivePackages.latex texlivePackages.latex
tree-sitter tree-sitter
luarocks
lua
]; ];
extraLuaPackages = ls: with ls; [ luarocks ]; extraLuaPackages = ls: with ls; [ luarocks ];

View file

@ -1,4 +1,5 @@
{ pkgs, ... }: { { pkgs, ... }:
{
imports = [ ./networkSetup.nix ]; imports = [ ./networkSetup.nix ];
services.caddy.virtualHosts."immich.deprived.dev" = { services.caddy.virtualHosts."immich.deprived.dev" = {
@ -24,7 +25,9 @@
# ''; # '';
# }; # };
services.caddy.virtualHosts."argocd.deprived.dev" = { services.caddy.virtualHosts."argocd.deprived.dev" = {
extraConfig = "reverse_proxy https://127.0.0.1:4325"; extraConfig = ''
reverse_proxy * 10.0.0.2:4325
'';
}; };
services.caddy.virtualHosts."webui.deprived.dev" = { services.caddy.virtualHosts."webui.deprived.dev" = {
@ -56,6 +59,31 @@
reverse_proxy * 127.0.0.1:5544 reverse_proxy * 127.0.0.1:5544
''; '';
}; };
services.caddy.virtualHosts."akupunktur-herlev.dk" = {
extraConfig = ''
redir https://www.akupunktur-herlev.dk{uri} 301
'';
};
services.caddy.virtualHosts."www.akupunktur-herlev.dk" = {
extraConfig = ''
reverse_proxy 127.0.0.1:6642 {
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-Proto {scheme}
}
'';
};
services.caddy.virtualHosts."devcam.deprived.dev" = {
extraConfig = ''
@protected not method OPTIONS
basicauth @protected {
alex $2a$14$GbqQnETcOz5fNEbS06Y0E.HxRIIgPKAK7OMijT1Bv63h3V6S/gwRG
}
reverse_proxy * 192.168.50.85:80
'';
};
services.caddy.virtualHosts."api.deprived.dev" = { services.caddy.virtualHosts."api.deprived.dev" = {
extraConfig = '' extraConfig = ''

View file

@ -0,0 +1,30 @@
{ config, pkgs, ... }:
{
# Ensure the necessary tools are installed
environment.systemPackages = [ pkgs.wireguard-tools ];
boot.kernel.sysctl."net.ipv4.ip_forward" = 1;
systemd.services.wireguard-kube = {
description = "WireGuard VPN Service for kube-wg";
# Ensure the service starts after the network is up
after = [
"network.target"
"network-online.target"
];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
# Use wg-quick to setup and teardown the interface
ExecStart = "${pkgs.wireguard-tools}/bin/wg-quick up /etc/wireguard/wireguard-kube.conf";
ExecStop = "${pkgs.wireguard-tools}/bin/wg-quick down /etc/wireguard/wireguard-kube.conf";
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
};
};
}

View file

@ -1,4 +1,5 @@
{ pkgs, ... }: { { pkgs, ... }:
{
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
neovim neovim
wget wget
@ -17,6 +18,9 @@
wireguard-tools wireguard-tools
apacheHttpd apacheHttpd
p7zip p7zip
kubectl
lua5_1
luarocks
vtk vtk
immich-cli immich-cli
parted parted

View file

@ -0,0 +1,27 @@
{
pkgs,
...
}:
{
systemd.services."argo-forward" = {
description = "forwards argo running on kubernetes";
after = [
"network-online.target"
"microvm@kubernetes.service"
];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
script = ''
sleep 10
${pkgs.kubernetes}/bin/kubectl patch cm argocd-cmd-params-cm -n argocd --type merge --patch '{"data":{"server.insecure": "true", "url":"https://argocd.deprived.dev"}}'
${pkgs.kubernetes}/bin/kubectl port-forward svc/argocd-server -n argocd 4325:443 --address 0.0.0.0 || true
'';
serviceConfig = {
User = "root";
Restart = "always";
};
};
}

View file

@ -1,25 +1,42 @@
{ pkgs, ... }: { { pkgs, ... }:
{
environment.systemPackages = with pkgs; [ virtiofsd ]; environment.systemPackages = with pkgs; [ virtiofsd ];
microvm.autostart = [ "kube-daddy" ]; microvm.autostart = [ "kube-daddy" ];
microvm.vms."kube-daddy" = { config = ./kube-daddy.nix; }; microvm.vms."kube-daddy" = {
config = ./kube-daddy.nix;
};
systemd.services.kube-iptable = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.iptables}/bin/iptables -t nat -I POSTROUTING 1 -s 10.0.0.0/24 -o enp8s0 -j MASQUERADE ";
RemainAfterExit = true;
User = "root";
};
stopIfChanged = true;
};
networking = { networking = {
# 1. Create a Bridge (The Switch) bridges = {
bridges = { "br0" = { interfaces = [ "microvm-tap1" "microvm-tap2" ]; }; }; "br0" = {
interfaces = [
"microvm-tap1"
"microvm-tap2"
];
};
};
# 2. Assign the Gateway IP to the Bridge (NOT the taps) interfaces.br0.ipv4.addresses = [
interfaces.br0.ipv4.addresses = [{ {
address = "10.0.0.1"; address = "10.0.0.1";
prefixLength = 24; prefixLength = 24;
}]; }
];
# 3. Create persistent TAP interfaces so they exist at boot
# (This requires you to create a systemd service or use ip tuntap commands.
# Below is a "hack" using a dummy script, or use systemd-networkd netdevs if enabled)
# The cleanest NixOS way without networkd is often just letting the bridge create them
# or defining them as virtual devices (requires manual script usually).
#
# Use this script to ensure they exist before the bridge tries to enslave them:
localCommands = '' localCommands = ''
ip tuntap add dev microvm-tap1 mode tap user root || true ip tuntap add dev microvm-tap1 mode tap user root || true
ip tuntap add dev microvm-tap2 mode tap user root || true ip tuntap add dev microvm-tap2 mode tap user root || true
@ -27,18 +44,21 @@
ip link set microvm-tap2 up ip link set microvm-tap2 up
''; '';
# 4. Update NAT to use the Bridge
nat = { nat = {
enable = true; enable = true;
externalInterface = "enp8s0"; # Your physical interface externalInterface = "enp8s0";
internalInterfaces = [ "br0" ]; # NAT traffic coming from the bridge internalIPs = [ "10.0.0.0/24" ];
forwardPorts = [ forwardPorts = [
{ {
sourcePort = 8877; sourcePort = 8877;
destination = "10.0.0.2:8888"; destination = "10.0.0.2:8888";
proto = "tcp"; proto = "tcp";
} }
# { # Access this directly from host by 10.0.0.2:4325
# sourcePort = 4325; # argocd
# destination = "10.0.0.2:8080";
# proto = "tcp";
# }
{ {
sourcePort = 6443; sourcePort = 6443;
destination = "10.0.0.2:6443"; destination = "10.0.0.2:6443";
@ -49,6 +69,41 @@
destination = "10.0.0.2:4123"; destination = "10.0.0.2:4123";
proto = "tcp"; proto = "tcp";
} }
{
sourcePort = 8472;
destination = "10.0.0.2:8472";
proto = "udp";
}
{
sourcePort = 2379;
destination = "10.0.0.2:2379";
proto = "udp";
}
{
sourcePort = 2380;
destination = "10.0.0.2:2380";
proto = "udp";
}
{
sourcePort = 2379;
proto = "tcp";
destination = "10.0.0.2:2379";
}
{
sourcePort = 2380;
destination = "10.0.0.2:2380";
proto = "tcp";
}
{
sourcePort = 4001;
destination = "10.0.0.2:4001";
proto = "udp";
}
{
sourcePort = 4001;
destination = "10.0.0.2:4001";
proto = "tcp";
}
# If your app uses UDP (like HTTP/3 or QUIC), add this too: # If your app uses UDP (like HTTP/3 or QUIC), add this too:
# { sourcePort = 8888; destination = "10.0.0.2:8888"; proto = "udp"; } # { sourcePort = 8888; destination = "10.0.0.2:8888"; proto = "udp"; }
]; ];
@ -57,4 +112,12 @@
# 5. Update Firewall to trust the Bridge # 5. Update Firewall to trust the Bridge
firewall.trustedInterfaces = [ "br0" ]; firewall.trustedInterfaces = [ "br0" ];
}; };
systemd.tmpfiles.rules = [
"d /var/lib/microvms/shared 0755 microvm kvm -"
"d /var/lib/microvms/shared/kube 0755 microvm kvm -"
"d /var/lib/microvms/shared/docking 0755 microvm kvm -"
"d /var/lib/microvms/shared/.config 0755 microvm kvm -"
"d /var/lib/microvms/shared/.local 0755 microvm kvm -"
];
} }

View file

@ -1,4 +1,6 @@
{ pkgs, ... }: { { lib, pkgs, ... }:
{
users.users.root = { users.users.root = {
openssh.authorizedKeys.keys = [ openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhiPhFbCi64NduuV794omgS8mctBLXtqxbaEJyUo6lg botalex@DESKTOPSKTOP-ENDVV0V" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhiPhFbCi64NduuV794omgS8mctBLXtqxbaEJyUo6lg botalex@DESKTOPSKTOP-ENDVV0V"
@ -13,8 +15,7 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLSUXsao6rjC3FDtRHhh7z6wqMtA/mqL50e1Dj9a2wE botserver@botserver" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLSUXsao6rjC3FDtRHhh7z6wqMtA/mqL50e1Dj9a2wE botserver@botserver"
]; ];
hashedPassword = hashedPassword = "$6$HpwhjoEuhRZuFhJF$jEV3SxbcGKVlRRgbDx6YpySyTHKUIOnmUD0Rd4PLXsXhbnrgeBVCPfkK.cBCUmxUeQjNTzj4CDpP4XBxLz0EV0";
"$6$HpwhjoEuhRZuFhJF$jEV3SxbcGKVlRRgbDx6YpySyTHKUIOnmUD0Rd4PLXsXhbnrgeBVCPfkK.cBCUmxUeQjNTzj4CDpP4XBxLz0EV0";
shell = pkgs.fish; shell = pkgs.fish;
@ -22,8 +23,14 @@
environment.variables.EDITOR = "nvim"; environment.variables.EDITOR = "nvim";
services.openssh = { enable = true; }; services.openssh = {
imports = [ ./../../modules/getNvim.nix ./kubernetes.nix ]; enable = true;
};
imports = [
./../../modules/getNvim.nix
./kubernetes.nix
# ./wg-snorre.nix
];
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
neovim neovim
git git
@ -42,6 +49,9 @@
openssl openssl
dig dig
argocd argocd
gnutar
wireguard-tools
python312
]; ];
programs.fish = { programs.fish = {
@ -59,11 +69,13 @@
vcpu = 8; vcpu = 8;
# Create a tap interface or user networking # Create a tap interface or user networking
interfaces = [{ interfaces = [
type = "tap"; {
id = "microvm-tap1"; # Matches the host's first tap type = "tap";
mac = "02:00:00:00:00:01"; id = "microvm-tap1"; # Matches the host's first tap
}]; mac = "02:00:00:00:00:01";
}
];
# forwardPorts = [ # forwardPorts = [
# { # {
@ -91,42 +103,120 @@
# Mount the host's /nix/store explicitly (read-only) # Mount the host's /nix/store explicitly (read-only)
# This makes the VM start instantly as it shares the host store. # This makes the VM start instantly as it shares the host store.
shares = [{ shares = [
tag = "ro-store"; {
source = "/nix/store"; tag = "ro-store";
mountPoint = "/nix/.ro-store"; source = "/nix/store";
}]; mountPoint = "/nix/.ro-store";
}
{
proto = "9p";
tag = "docking-mount";
# Source path can be absolute or relative
# to /var/lib/microvms/$hostName
source = "../shared/docking";
mountPoint = "/root/docking";
}
{
proto = "9p";
tag = "kube-wireguard";
# Source path can be absolute or relative
# to /var/lib/microvms/$hostName
source = "../shared/wg";
mountPoint = "/root/wg";
}
# {
# proto = "9p";
# tag = "kube-mount";
# source = "../shared/kube";
# mountPoint = "/var/lib/kubernetes";
# }
# {
# proto = "9p";
# tag = "config";
# source = "../shared/.config";
# mountPoint = "/root/.config";
# }
# {
# proto = "9p";
# tag = "local";
# source = "../shared/.local";
# mountPoint = "/root/.local";
# }
];
# Writable disk allocation # Writable disk allocation
volumes = [{ volumes = [
image = "/var/lib/microvms/kube-daddy/kube-daddy.img"; {
mountPoint = "/"; image = "/var/lib/microvms/kube-daddy/kube-daddy.img";
size = 32768; # Size in MB mountPoint = "/";
}]; size = 32768; # Size in MB
}
];
}; };
services.resolved.enable = true;
networking = { networking = {
hostName = "kube-daddy"; hostName = "kube-daddy";
useNetworkd = true; useNetworkd = true;
firewall.enable = firewall.enable = false;
false; # Keep disabled for easier testing, or allow port 22 nameservers = [
"10.0.0.1"
"8.8.8.8"
];
};
interfaces.enp0s4.ipv4.addresses = [{ systemd.network = {
address = "10.0.0.2"; # 1. Define the Bridge Device
prefixLength = 24; netdevs."20-br0" = {
}]; netdevConfig = {
Kind = "bridge";
defaultGateway = { Name = "br0";
address = "10.0.0.1"; };
interface = "enp0s4"; };
networks = {
# 2. Configure the Bridge (IP & Gateway go here now)
"30-br0" = {
matchConfig.Name = "br0";
networkConfig = {
Address = "10.0.0.2/24";
Gateway = "10.0.0.1";
DNS = [
"10.0.0.1"
"8.8.8.8"
];
};
linkConfig.RequiredForOnline = "routable";
};
# 3. Catch the changing interface and attach it to the bridge
"40-uplink" = {
# This wildcard matches enp0s7, enp1s0, etc.
matchConfig.Name = "en*";
networkConfig.Bridge = "br0";
};
}; };
nameservers = [ "1.1.1.1" ];
}; };
# Allow passwordless root login for testing (Do not use in production!) # Allow passwordless root login for testing (Do not use in production!)
services.getty.autologinUser = "root"; services.getty.autologinUser = "root";
users.users.root.password = ""; users.users.root.password = "";
systemd.services."load-br_netfilter" = {
enable = true;
description = "Modprobe br_netfilter";
before = [ "flannel.service" ];
wantedBy = [
"multi-user.target"
"flannel.service"
];
script = ''
${pkgs.kmod}/bin/modprobe br_netfilter
'';
};
systemd.network.enable = true; systemd.network.enable = true;
systemd.network.networks."11-microvm" = { systemd.network.networks."11-microvm" = {
matchConfig.Name = "vm-*"; matchConfig.Name = "vm-*";
@ -135,5 +225,11 @@
}; };
system.stateVersion = "24.11"; system.stateVersion = "24.11";
}
systemd.tmpfiles.rules = [
"d /root/.kube 0755 root root -"
"d /root/.config 0755 root root -"
"d /root/.local 0755 root root -"
"L+ /root/.kube/config - - - - /etc/kubernetes/cluster-admin.kubeconfig"
];
}

View file

@ -1,4 +1,9 @@
{ config, pkgs, ... }: {
config,
pkgs,
lib,
...
}:
let let
# When using easyCerts=true the IP Address must resolve to the master on creation. # When using easyCerts=true the IP Address must resolve to the master on creation.
# So use simply 127.0.0.1 in that case. Otherwise you will have errors like this https://github.com/NixOS/nixpkgs/issues/59364 # So use simply 127.0.0.1 in that case. Otherwise you will have errors like this https://github.com/NixOS/nixpkgs/issues/59364
@ -8,17 +13,33 @@ let
in in
{ {
# resolve master hostname # resolve master hostname
networking.extraHosts = "${kubeMasterIP} ${kubeMasterHostname}"; networking.extraHosts = ''
${kubeMasterIP} ${kubeMasterHostname}
10.0.0.2 kube-daddy
10.0.0.4 kube-desk
10.0.0.5 kube-snorre'';
networking.firewall.enable = false; networking.firewall.enable = false;
imports = [
./argo-forward.nix
./longhorn-deps.nix
];
# packages for administration tasks # packages for administration tasks
environment.systemPackages = with pkgs; [ kompose kubectl kubernetes ]; environment.systemPackages = with pkgs; [
kompose
kubectl
kubernetes
(pkgs.callPackage /etc/nixos/modules/customPackages/wgmesh { })
];
services.kubernetes = { services.kubernetes = {
roles = [ "master" "node" ]; roles = [
"master"
"node"
];
masterAddress = kubeMasterHostname; masterAddress = kubeMasterHostname;
apiserverAddress = apiserverAddress = "https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
"https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
easyCerts = true; easyCerts = true;
apiserver = { apiserver = {
securePort = kubeMasterAPIServerPort; securePort = kubeMasterAPIServerPort;
@ -31,6 +52,11 @@ in
addons.dns.enable = true; addons.dns.enable = true;
# needed if you use swap # needed if you use swap
kubelet.extraOpts = "--fail-swap-on=false"; kubelet.extraOpts = "--fail-swap-on=false --resolv-conf=/run/systemd/resolve/resolv.conf";
};
services.flannel = {
iface = "br0";
publicIp = "10.0.0.2";
}; };
} }

View file

@ -0,0 +1,33 @@
{ config, pkgs, ... }:
{
# 1. Enable iSCSI daemon (Crucial for Longhorn)
services.openiscsi = {
enable = true;
name = "iqn.2026-03.com.proxy-m:${config.networking.hostName}";
};
# 2. Enable NFS support (For RWX volumes)
boot.supportedFilesystems = [ "nfs" ];
services.rpcbind.enable = true;
# 3. Load required kernel modules
boot.kernelModules = [
"iscsi_tcp"
"dm_crypt"
"dm_multipath"
];
# 4. Ensure necessary tools are available in the system path
environment.systemPackages = with pkgs; [
openiscsi
nfs-utils
util-linux # for findmnt, etc.
bash
];
systemd.tmpfiles.rules = [
"L+ /usr/local/bin/iscsiadm - - - - /run/current-system/sw/bin/iscsiadm"
"L+ /usr/bin/iscsiadm - - - - /run/current-system/sw/bin/iscsiadm"
];
}

View file

@ -0,0 +1,35 @@
{ config, ... }:
{
networking.wg-quick.interfaces = {
wg0 = {
# The local IP address for this interface
address = [ "10.13.13.3/32" ];
# The DNS server to use when the tunnel is active
dns = [ "10.0.101.1" ];
# The port to listen on
listenPort = 51820;
# Path to the private key file (using age for security)
privateKeyFile = "/root/wg/priv.key";
peers = [
{
# The public key of the remote peer
publicKey = "QD36zS9c4IWYzqPAjP88hX9nx4wWJ9thB9YlO6vCtzo=";
# Path to the preshared key file (security best practice)
presharedKeyFile = "/root/wg/pre.key";
# The remote endpoint and port
endpoint = "37.49.130.171:51820";
# Traffic to route through the tunnel (0.0.0.0/0 sends everything)
allowedIPs = [ "0.0.0.0/0" ];
}
];
};
};
}