checkpoint

This commit is contained in:
BOTAlex 2026-01-26 21:39:43 +01:00
parent 376912c631
commit 38d26110e1
13 changed files with 547 additions and 82 deletions

View file

@ -0,0 +1,60 @@
{ pkgs, ... }: {
environment.systemPackages = with pkgs; [ virtiofsd ];
microvm.autostart = [ "kube-daddy" ];
microvm.vms."kube-daddy" = { config = ./kube-daddy.nix; };
networking = {
# 1. Create a Bridge (The Switch)
bridges = { "br0" = { interfaces = [ "microvm-tap1" "microvm-tap2" ]; }; };
# 2. Assign the Gateway IP to the Bridge (NOT the taps)
interfaces.br0.ipv4.addresses = [{
address = "10.0.0.1";
prefixLength = 24;
}];
# 3. Create persistent TAP interfaces so they exist at boot
# (This requires you to create a systemd service or use ip tuntap commands.
# Below is a "hack" using a dummy script, or use systemd-networkd netdevs if enabled)
# The cleanest NixOS way without networkd is often just letting the bridge create them
# or defining them as virtual devices (requires manual script usually).
#
# Use this script to ensure they exist before the bridge tries to enslave them:
localCommands = ''
ip tuntap add dev microvm-tap1 mode tap user root || true
ip tuntap add dev microvm-tap2 mode tap user root || true
ip link set microvm-tap1 up
ip link set microvm-tap2 up
'';
# 4. Update NAT to use the Bridge
nat = {
enable = true;
externalInterface = "enp8s0"; # Your physical interface
internalInterfaces = [ "br0" ]; # NAT traffic coming from the bridge
forwardPorts = [
{
sourcePort = 8877;
destination = "10.0.0.2:8888";
proto = "tcp";
}
{
sourcePort = 6443;
destination = "10.0.0.2:6443";
proto = "tcp";
}
{
sourcePort = 4123;
destination = "10.0.0.2:4123";
proto = "tcp";
}
# If your app uses UDP (like HTTP/3 or QUIC), add this too:
# { sourcePort = 8888; destination = "10.0.0.2:8888"; proto = "udp"; }
];
};
# 5. Update Firewall to trust the Bridge
firewall.trustedInterfaces = [ "br0" ];
};
}

View file

@ -0,0 +1,139 @@
{ pkgs, ... }: {
users.users.root = {
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhiPhFbCi64NduuV794omgS8mctBLXtqxbaEJyUo6lg botalex@DESKTOPSKTOP-ENDVV0V"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFhTExbc9m4dCK6676wGiA8zPjE0l/9Fz2yf0IKvUvg snorre@archlinux"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGxUPAsPkri0B+xkO3sCHJZfKgAbgPcepP8J4WW4yyLj u0_a167@localhost"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfQLOKUnOARUAs8X1EL1GRHoCQ0oMun0vzL7Z78yOsM nixos@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJw1ckvXz78ITeqANrWSkJl6PJo2AMA4myNrRMBAB7xW zhentao2004@gmail.com"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKhcUZbIMX0W27l/FMF5WijpdsJAK329/P008OEAfcyz botmain@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILB0esg3ABIcYWxvQKlPuwEE6cbhNcWjisfky0wnGirJ root@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGxUPAsPkri0B+xkO3sCHJZfKgAbgPcepP8J4WW4yyLj u0_a167@localhost"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKyZOZlcQBmqSPxjaGgE2tP+K7LYziqjFUo3EX12rGtf botlap@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLSUXsao6rjC3FDtRHhh7z6wqMtA/mqL50e1Dj9a2wE botserver@botserver"
];
hashedPassword =
"$6$HpwhjoEuhRZuFhJF$jEV3SxbcGKVlRRgbDx6YpySyTHKUIOnmUD0Rd4PLXsXhbnrgeBVCPfkK.cBCUmxUeQjNTzj4CDpP4XBxLz0EV0";
shell = pkgs.fish;
};
environment.variables.EDITOR = "nvim";
services.openssh = { enable = true; };
imports = [ ./../../modules/getNvim.nix ./kubernetes.nix ];
environment.systemPackages = with pkgs; [
neovim
git
wget
curl
busybox
gcc
tree-sitter
busybox
nodejs_22
screen
fastfetch
btop
openssh
ripgrep
openssl
dig
argocd
];
programs.fish = {
enable = true;
};
documentation.man.generateCaches = false;
# --- MicroVM Specific Settings ---
microvm = {
# Choose your hypervisor: "qemu", "firecracker", "cloud-hypervisor", etc.
hypervisor = "qemu";
mem = 8192;
vcpu = 8;
# Create a tap interface or user networking
interfaces = [{
type = "tap";
id = "microvm-tap1"; # Matches the host's first tap
mac = "02:00:00:00:00:01";
}];
# forwardPorts = [
# {
# from = "host";
# host.port = 22222;
# guest.port = 22;
# }
# {
# from = "host";
# host.port = 6443; # Port you will access on your machine
# guest.port = 6443; # Port the service is listening on inside the VM
# }
# {
# from = "host";
# host.port = 8877; # certmgr
# guest.port = 8888;
# }
# {
# from = "host";
# host.port = 4325; # argocd
# guest.port = 4325;
# }
#
# ];
# Mount the host's /nix/store explicitly (read-only)
# This makes the VM start instantly as it shares the host store.
shares = [{
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}];
# Writable disk allocation
volumes = [{
image = "/var/lib/microvms/kube-daddy/kube-daddy.img";
mountPoint = "/";
size = 32768; # Size in MB
}];
};
networking = {
hostName = "kube-daddy";
useNetworkd = true;
firewall.enable =
false; # Keep disabled for easier testing, or allow port 22
interfaces.enp0s4.ipv4.addresses = [{
address = "10.0.0.2";
prefixLength = 24;
}];
defaultGateway = {
address = "10.0.0.1";
interface = "enp0s4";
};
nameservers = [ "1.1.1.1" ];
};
# Allow passwordless root login for testing (Do not use in production!)
services.getty.autologinUser = "root";
users.users.root.password = "";
systemd.network.enable = true;
systemd.network.networks."11-microvm" = {
matchConfig.Name = "vm-*";
# Attach to the bridge that was configured above
networkConfig.Bridge = "microvm";
};
system.stateVersion = "24.11";
}

View file

@ -0,0 +1,36 @@
{ config, pkgs, ... }:
let
# When using easyCerts=true the IP Address must resolve to the master on creation.
# So use simply 127.0.0.1 in that case. Otherwise you will have errors like this https://github.com/NixOS/nixpkgs/issues/59364
kubeMasterIP = "176.23.63.215";
kubeMasterHostname = "clussy.deprived.dev";
kubeMasterAPIServerPort = 6443;
in
{
# resolve master hostname
networking.extraHosts = "${kubeMasterIP} ${kubeMasterHostname}";
networking.firewall.enable = false;
# packages for administration tasks
environment.systemPackages = with pkgs; [ kompose kubectl kubernetes ];
services.kubernetes = {
roles = [ "master" "node" ];
masterAddress = kubeMasterHostname;
apiserverAddress =
"https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
easyCerts = true;
apiserver = {
securePort = kubeMasterAPIServerPort;
advertiseAddress = kubeMasterIP;
};
flannel.enable = true;
# use coredns
addons.dns.enable = true;
# needed if you use swap
kubelet.extraOpts = "--fail-swap-on=false";
};
}

View file

@ -0,0 +1,108 @@
{ config, pkgs, ... }: {
environment.systemPackages = with pkgs; [
kompose
kubectl
kubernetes
containerd
];
virtualisation = {
docker.enable = true;
containerd.enable = true;
};
services = {
etcd = {
enable = true;
peerCertFile = "/etc/kubernetes/pki/etcd/peer.crt";
peerKeyFile = "/etc/kubernetes/pki/etcd/peer.key";
peerTrustedCaFile = "/etc/kubernetes/pki/etcd/ca.crt";
peerClientCertAuth = true;
certFile = "/etc/kubernetes/pki/etcd/server.crt";
keyFile = "/etc/kubernetes/pki/etcd/server.key";
trustedCaFile = "/etc/kubernetes/pki/etcd/ca.crt";
};
};
services.kubernetes = {
masterAddress = "10.0.2.15"; # From "ip addr" and choosing enp0s4:
kubelet.enable = true;
apiserver = {
enable = true;
advertiseAddress = "10.0.2.15"; # From your logs
bindAddress = "0.0.0.0";
securePort = 6443;
# 1. Etcd Connectivity (Fixes "unknown authority" & "remote error: tls: certificate required")
etcd = {
servers = [ "https://10.0.2.15:2379" ];
caFile = "/etc/kubernetes/pki/etcd/ca.crt"; # MUST be Etcd CA [cite: 60]
certFile = "/etc/kubernetes/pki/apiserver-etcd-client.crt"; # [cite: 59]
keyFile = "/etc/kubernetes/pki/apiserver-etcd-client.key"; # [cite: 59]
};
# 2. Service Account Signing (Fixes "invalid RSA key")
serviceAccountIssuer = "https://kubernetes.default.svc"; # [cite: 108]
serviceAccountSigningKeyFile =
"/etc/kubernetes/pki/sa.key"; # Private Key [cite: 110]
serviceAccountKeyFile =
"/etc/kubernetes/pki/sa.pub"; # Public Key [cite: 112]
# 3. Serving TLS (Fixes Scheduler "certificate signed by unknown authority")
tlsCertFile =
"/etc/kubernetes/pki/apiserver.crt"; # Server Identity [cite: 116]
tlsKeyFile = "/etc/kubernetes/pki/apiserver.key"; # [cite: 117]
clientCaFile =
"/etc/kubernetes/pki/ca.crt"; # Trust Client Certs (Scheduler) [cite: 76]
# 4. Kubelet Communication (Best Practice)
kubeletClientCaFile = "/etc/kubernetes/pki/ca.crt"; # [cite: 94]
kubeletClientCertFile =
"/etc/kubernetes/pki/apiserver-kubelet-client.crt"; # [cite: 96]
kubeletClientKeyFile =
"/etc/kubernetes/pki/apiserver-kubelet-client.key"; # [cite: 98]
};
scheduler = {
enable = true;
address = "0.0.0.0"; # Listen on all interfaces
leaderElect = true;
# Maps to --kubeconfig
kubeconfig = {
server = "https://10.0.2.15:6443";
caFile = "/etc/kubernetes/pki/ca.crt";
certFile = "/etc/kubernetes/pki/scheduler.crt"; # Client Cert
keyFile = "/etc/kubernetes/pki/scheduler.key";
};
};
controllerManager = {
enable = true;
bindAddress = "0.0.0.0"; # Listen on all interfaces
leaderElect = true;
# 1. Signing Service Accounts (MUST match API Server sa.key)
serviceAccountKeyFile = "/etc/kubernetes/pki/sa.key";
# 2. CA included in Service Account secrets
rootCaFile = "/etc/kubernetes/pki/ca.crt";
# 3. Kubeconfig for talking to API Server
kubeconfig = {
server = "https://10.0.2.15:6443";
caFile = "/etc/kubernetes/pki/ca.crt";
certFile = "/etc/kubernetes/pki/controller-manager.crt"; # Client Cert
keyFile = "/etc/kubernetes/pki/controller-manager.key";
};
# 4. HTTPS Serving Certs (for metrics/health)
tlsCertFile =
"/etc/kubernetes/pki/controller-manager.crt"; # Reusing client cert is fine here
tlsKeyFile = "/etc/kubernetes/pki/controller-manager.key";
};
};
}

View file

@ -12,9 +12,16 @@
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKyZOZlcQBmqSPxjaGgE2tP+K7LYziqjFUo3EX12rGtf botlap@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLSUXsao6rjC3FDtRHhh7z6wqMtA/mqL50e1Dj9a2wE botserver@botserver"
];
shell = pkgs.fish;
};
programs.fish = { enable = true; };
documentation.man.generateCaches = false;
services.openssh = { enable = true; };
imports = [ ./../../modules/getNvim.nix ];
imports = [ ./../../modules/getNvim.nix ./kubernetes.nix ];
environment.systemPackages = with pkgs; [
neovim
git
@ -30,6 +37,8 @@
btop
openssh
ripgrep
dig
argocd
];
# --- MicroVM Specific Settings ---
@ -39,15 +48,9 @@
# Create a tap interface or user networking
interfaces = [{
type = "user"; # 'user' networking is easiest for testing (slirp)
id = "eth0";
mac = "02:00:00:00:00:01";
}];
forwardPorts = [{
from = "host";
host.port = 2222;
guest.port = 22;
type = "tap";
id = "microvm-tap2"; # Matches the host's second tap
mac = "02:00:00:00:00:02";
}];
# Mount the host's /nix/store explicitly (read-only)
@ -66,5 +69,38 @@
}];
};
boot.kernelModules = [ "br_netfilter" ];
networking = {
hostName = "kube-vm";
useNetworkd = true;
firewall.enable = false;
# 1. Define the interface explicitly
interfaces.enp0s4.ipv4.addresses = [{
address = "10.0.0.3";
prefixLength = 24;
}];
# 2. Fix: Specify both address AND interface for the gateway
defaultGateway = {
address = "10.0.0.1";
interface = "enp0s4";
};
nameservers = [ "1.1.1.1" "8.8.8.8" ];
};
# Allow passwordless root login for testing (Do not use in production!)
services.getty.autologinUser = "root";
users.users.root.password = "";
systemd.network.enable = true;
systemd.network.networks."11-microvm" = {
matchConfig.Name = "vm-*";
# Attach to the bridge that was configured above
networkConfig.Bridge = "microvm";
};
system.stateVersion = "24.11";
}

View file

@ -1,72 +1,36 @@
{ pkgs, ... }:
{ config, pkgs, ... }:
let
kubeMasterIP = "37.49.130.171";
kubeMasterHostname = "polycule.deprived";
kubeMasterIP = "176.23.63.215";
kubeMasterHostname = "clussy.deprived.dev";
kubeMasterAPIServerPort = 6443;
in {
nixpkgs.overlays = [
(final: prev: {
containerd = prev.containerd.overrideAttrs rec {
version = "1.7.29";
in
{
# resolve master hostname
networking.extraHosts = "${kubeMasterIP} ${kubeMasterHostname}";
networking.firewall.enable = false;
src = final.fetchFromGitHub {
owner = "containerd";
repo = "containerd";
rev = "v${version}";
sha256 = "sha256-aR0i+0v2t6vyI+QN30P1+t+pHU2Bw7/XPUYLjJm1rhw=";
};
# packages for administration tasks
environment.systemPackages = with pkgs; [ kompose kubectl kubernetes ];
installTargets = [ "install" ];
outputs = [ "out" ];
};
})
];
services.kubernetes =
let
api = "https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
in
{
roles = [ "node" ];
masterAddress = kubeMasterHostname;
easyCerts = true;
virtualisation.containerd.enable = true;
environment.systemPackages = with pkgs; [
kompose
kubectl
kubernetes
argocd
openiscsi
nfs-utils
];
# point kubelet and other services to kube-apiserver
kubelet.kubeconfig.server = api;
apiserverAddress = api;
networking.useNetworkd = true;
networking.extraHosts = ''
${kubeMasterIP} ${kubeMasterHostname}
192.168.50.82 botkube'';
services.kubernetes = let
api = "https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
in {
roles = [ "node" ];
masterAddress = kubeMasterHostname;
easyCerts = true;
apiserver.allowPrivileged = true;
# use coredns
addons.dns.enable = true;
flannel.enable = true;
# point kubelet and other services to kube-apiserver
kubelet.kubeconfig.server = api;
apiserverAddress = api;
# use coredns
addons.dns.enable = true;
# needed if you use swap
kubelet.extraOpts =
"--fail-swap-on=false --resolv-conf=/run/systemd/resolve/resolv.conf";
};
systemd.services."forward-argocd" = {
enable = true;
description =
"forwards argocd running on kubernetes to argocd.spoodythe.one";
after = [ "network-online.target" "kubelet.service" ];
wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
script = ''
${pkgs.kubernetes}/bin/kubectl port-forward svc/argocd-server -n argocd --address 0.0.0.0 4325:80
'';
serviceConfig = { User = "botserver"; };
};
# needed if you use swap
kubelet.extraOpts = "--fail-swap-on=false";
};
}

6
vms/kube-vm2/default.nix Normal file
View file

@ -0,0 +1,6 @@
{ pkgs, ... }: {
environment.systemPackages = with pkgs; [ virtiofsd ];
microvm.autostart = [ "kube-vm2" ];
microvm.vms."kube-vm2" = { config = ./kube-vm.nix; };
}

78
vms/kube-vm2/kube-vm.nix Normal file
View file

@ -0,0 +1,78 @@
{ pkgs, ... }: {
users.users.root = {
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhiPhFbCi64NduuV794omgS8mctBLXtqxbaEJyUo6lg botalex@DESKTOPSKTOP-ENDVV0V"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIFhTExbc9m4dCK6676wGiA8zPjE0l/9Fz2yf0IKvUvg snorre@archlinux"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGxUPAsPkri0B+xkO3sCHJZfKgAbgPcepP8J4WW4yyLj u0_a167@localhost"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfQLOKUnOARUAs8X1EL1GRHoCQ0oMun0vzL7Z78yOsM nixos@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJw1ckvXz78ITeqANrWSkJl6PJo2AMA4myNrRMBAB7xW zhentao2004@gmail.com"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKhcUZbIMX0W27l/FMF5WijpdsJAK329/P008OEAfcyz botmain@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILB0esg3ABIcYWxvQKlPuwEE6cbhNcWjisfky0wnGirJ root@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGxUPAsPkri0B+xkO3sCHJZfKgAbgPcepP8J4WW4yyLj u0_a167@localhost"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKyZOZlcQBmqSPxjaGgE2tP+K7LYziqjFUo3EX12rGtf botlap@nixos"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLSUXsao6rjC3FDtRHhh7z6wqMtA/mqL50e1Dj9a2wE botserver@botserver"
];
shell = pkgs.fish;
};
programs.fish = { enable = true; };
documentation.man.generateCaches = false;
services.openssh = { enable = true; };
imports = [ ./../../modules/getNvim.nix ./kubernetes.nix ];
environment.systemPackages = with pkgs; [
neovim
git
wget
curl
busybox
gcc
tree-sitter
busybox
nodejs_22
screen
fastfetch
btop
openssh
ripgrep
dig
];
# --- MicroVM Specific Settings ---
microvm = {
# Choose your hypervisor: "qemu", "firecracker", "cloud-hypervisor", etc.
hypervisor = "qemu";
# Create a tap interface or user networking
interfaces = [{
type = "user"; # 'user' networking is easiest for testing (slirp)
id = "eth0";
mac = "02:00:00:00:00:01";
}];
forwardPorts = [{
from = "host";
host.port = 2223;
guest.port = 22;
}];
# Mount the host's /nix/store explicitly (read-only)
# This makes the VM start instantly as it shares the host store.
shares = [{
tag = "ro-store";
source = "/nix/store";
mountPoint = "/nix/.ro-store";
}];
# Writable disk allocation
volumes = [{
image = "/var/lib/microvms/kube-vm2/kube-vm2.img";
mountPoint = "/";
size = 512 * 4; # Size in MB
}];
};
system.stateVersion = "24.11";
}

View file

@ -0,0 +1,35 @@
{ config, pkgs, ... }:
let
kubeMasterIP = "176.23.63.215";
kubeMasterHostname = "clussy.deprived.dev";
kubeMasterAPIServerPort = 6443;
in
{
# resolve master hostname
networking.extraHosts = "${kubeMasterIP} ${kubeMasterHostname}";
networking.firewall.enable = false;
# packages for administration tasks
environment.systemPackages = with pkgs; [ kompose kubectl kubernetes ];
services.kubernetes =
let
api = "https://${kubeMasterHostname}:${toString kubeMasterAPIServerPort}";
in
{
roles = [ "node" ];
masterAddress = kubeMasterHostname;
easyCerts = true;
# point kubelet and other services to kube-apiserver
kubelet.kubeconfig.server = api;
apiserverAddress = api;
# use coredns
addons.dns.enable = true;
# needed if you use swap
kubelet.extraOpts = "--fail-swap-on=false";
};
}