522 lines
14 KiB
Nix
522 lines
14 KiB
Nix
# SPDX-FileCopyrightText: 2023 Luke Granger-Brown <depot@lukegb.com>
|
|
#
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
{ depot, lib, pkgs, config, ... }:
|
|
let
|
|
vhostsConfig = {
|
|
int = rec {
|
|
proxy = _apply (value: { extraConfig = ''
|
|
${bind}
|
|
reverse_proxy ${value}
|
|
''; }) {
|
|
"http://deluge.int.lukegb.com" = "http://localhost:8112";
|
|
"http://radarr.int.lukegb.com" = "http://localhost:7878";
|
|
"http://sonarr.int.lukegb.com" = "http://localhost:8989";
|
|
};
|
|
serve = _apply (value: { extraConfig = ''
|
|
${bind}
|
|
root * ${value}
|
|
file_server
|
|
''; }) {
|
|
"http://int.lukegb.com" = depot.web.int;
|
|
"http://logged-out.int.lukegb.com" = depot.web.logged-out-int;
|
|
};
|
|
other = _apply lib.id {
|
|
"http://content.int.lukegb.com" = {
|
|
serverAliases = ["http://content.int.lukegb.com:18081"];
|
|
extraConfig = ''
|
|
${bind}
|
|
root * /store/content
|
|
file_server browse
|
|
'';
|
|
};
|
|
};
|
|
_apply = f: builtins.mapAttrs (name: value: lib.recursiveUpdate hostBase (f value));
|
|
};
|
|
};
|
|
vhosts = vhostsConfig.int.proxy // vhostsConfig.int.serve // vhostsConfig.int.other // {
|
|
"https://plex.lukegb.xyz" = {
|
|
extraConfig = ''
|
|
tls /var/lib/acme/plex.lukegb.xyz/fullchain.pem /var/lib/acme/plex.lukegb.xyz/privkey.pem
|
|
redir https://plex.lukegb.xyz:32400{uri}
|
|
'';
|
|
};
|
|
"http://plex.lukegb.xyz" = {
|
|
extraConfig = ''
|
|
redir https://plex.lukegb.xyz:32400{uri}
|
|
'';
|
|
};
|
|
};
|
|
hostBase = {
|
|
extraConfig = ''
|
|
${bind}
|
|
'';
|
|
};
|
|
bind = "bind [${config.my.ip.tailscale6}] ${config.my.ip.tailscale}";
|
|
in
|
|
{
|
|
imports = [
|
|
../lib/zfs.nix
|
|
../lib/bgp.nix
|
|
../lib/whitby-distributed.nix
|
|
../lib/nixbuild-distributed.nix
|
|
#../lib/gitlab-runner-cacher.nix
|
|
../lib/coredns/default.nix
|
|
../lib/deluge.nix
|
|
../lib/plex.nix
|
|
./vm-bridge.nix
|
|
#./vxlan-bridge.nix
|
|
];
|
|
|
|
my.plex.customTLS = {
|
|
enable = true;
|
|
domain = "plex.lukegb.xyz";
|
|
};
|
|
users.users.caddy.extraGroups = lib.mkAfter [ "plexcert" ];
|
|
|
|
# Otherwise _this_ machine won't enumerate things properly.
|
|
boot.zfs.devNodes = "/dev/disk/by-id";
|
|
|
|
boot.initrd = {
|
|
availableKernelModules = [
|
|
"nvme"
|
|
"xhci_pci"
|
|
"ahci"
|
|
"usb_storage"
|
|
"usbhid"
|
|
"sd_mod"
|
|
"sr_mod"
|
|
];
|
|
};
|
|
boot.kernelModules = [ "kvm-amd" ];
|
|
hardware.cpu.amd.updateMicrocode = true;
|
|
|
|
# Use the systemd-boot EFI boot loader.
|
|
boot.loader.systemd-boot.enable = true;
|
|
boot.loader.efi.canTouchEfiVariables = true;
|
|
|
|
boot.blacklistedKernelModules = [ "ib_core" "irdma" ];
|
|
|
|
powerManagement.cpuFreqGovernor = lib.mkDefault "performance";
|
|
services.zfs.rollbackOnBoot = {
|
|
enable = true;
|
|
snapshot = "zfast/local/root@blank";
|
|
};
|
|
|
|
fileSystems = let
|
|
zfs = device: {
|
|
device = device;
|
|
fsType = "zfs";
|
|
};
|
|
in {
|
|
"/" = zfs "zfast/local/root";
|
|
"/nix" = zfs "zfast/local/nix";
|
|
"/tmp" = zfs "zfast/local/tmp";
|
|
|
|
"/persist" = zfs "zfast/safe/persist";
|
|
"/store" = zfs "zslow/safe/store";
|
|
"/home" = (zfs "zslow/safe/home") // { neededForBoot = true; };
|
|
|
|
"/boot" = {
|
|
device = "/dev/disk/by-label/ESP";
|
|
fsType = "vfat";
|
|
};
|
|
"/boot2" = {
|
|
device = "/dev/disk/by-label/ESP2";
|
|
fsType = "vfat";
|
|
};
|
|
};
|
|
boot.loader.systemd-boot.extraInstallCommands = ''
|
|
rsync -a /boot/ /boot2/
|
|
'';
|
|
|
|
nix.settings.max-jobs = lib.mkDefault 8;
|
|
|
|
# Networking!
|
|
networking = {
|
|
hostName = "cofractal-ams01";
|
|
domain = "as205479.net";
|
|
hostId = "a1cf1a9f";
|
|
useNetworkd = true;
|
|
|
|
nameservers = [
|
|
"2001:4860:4860::8888"
|
|
"2001:4860:4860::8844"
|
|
"8.8.8.8"
|
|
"8.8.4.4"
|
|
];
|
|
bonds.bond0 = {
|
|
interfaces = [ "enp45s0f0np0" "enp45s0f1np1" ];
|
|
driverOptions = {
|
|
miimon = "100";
|
|
mode = "802.3ad";
|
|
};
|
|
};
|
|
defaultGateway6 = {
|
|
address = "2a09:a446:1337:ffff::1";
|
|
interface = "bond0";
|
|
};
|
|
interfaces.bond0 = {
|
|
ipv6.addresses = [
|
|
{ address = "2a09:a446:1337::10"; prefixLength = 64; }
|
|
{ address = "2a09:a446:1337:ffff::10"; prefixLength = 120; }
|
|
];
|
|
ipv4.addresses = [
|
|
{ address = "199.19.152.160"; prefixLength = 30; }
|
|
];
|
|
};
|
|
firewall.interfaces.bond0.allowedTCPPorts = [
|
|
32400 # Plex
|
|
4001 # IPFS
|
|
80 # HTTP
|
|
443 # HTTPS
|
|
];
|
|
firewall.interfaces.bond0.allowedUDPPorts = [
|
|
34197 # factorio
|
|
4001 # IPFS
|
|
443 # HTTP/3
|
|
51821 51822 51823 # wireguard
|
|
];
|
|
firewall.extraCommands = ''
|
|
# Flush old rules.
|
|
ip46tables -D FORWARD -j lukegb-forward 2>/dev/null || true
|
|
for chain in lukegb-forward lukegb-fwd-accept lukegb-fwd-reject; do
|
|
ip46tables -F "$chain" 2>/dev/null || true
|
|
ip46tables -X "$chain" 2>/dev/null || true
|
|
done
|
|
|
|
ip46tables -N lukegb-fwd-accept
|
|
ip46tables -A lukegb-fwd-accept -j ACCEPT
|
|
|
|
ip46tables -N lukegb-fwd-reject
|
|
ip46tables -A lukegb-fwd-reject -p tcp ! --syn -j REJECT --reject-with tcp-reset
|
|
ip46tables -A lukegb-fwd-reject -j REJECT
|
|
|
|
ip46tables -N lukegb-forward
|
|
|
|
ip46tables -A lukegb-forward -i br-public -j lukegb-fwd-accept
|
|
ip46tables -A lukegb-forward -o br-public -j lukegb-fwd-accept
|
|
|
|
# Accept from "trusted" quadv2 interface
|
|
ip46tables -A lukegb-forward -i quadv2 -j lukegb-fwd-accept
|
|
|
|
# Accept to quadv2 interface if we're multipathing.
|
|
ip46tables -A lukegb-forward -o quadv2 -j lukegb-fwd-accept
|
|
|
|
# Accept from established/related connections.
|
|
ip46tables -A lukegb-forward -m conntrack --ctstate ESTABLISHED,RELATED -j lukegb-fwd-accept
|
|
|
|
# Set up the firewall.
|
|
ip46tables -A lukegb-forward -j lukegb-fwd-reject
|
|
ip46tables -A FORWARD -j lukegb-forward
|
|
'';
|
|
};
|
|
systemd.network = let
|
|
wireguard = { name, listenPort, privateKey, publicKey, endpoint ? null }: {
|
|
netdevConfig = {
|
|
Name = name;
|
|
Kind = "wireguard";
|
|
Description = "WireGuard tunnel ${name}";
|
|
};
|
|
wireguardConfig = {
|
|
ListenPort = listenPort;
|
|
PrivateKeyFile = privateKey;
|
|
};
|
|
wireguardPeers = [{
|
|
PublicKey = publicKey;
|
|
AllowedIPs = [
|
|
"0.0.0.0/0"
|
|
"::/0"
|
|
];
|
|
Endpoint = endpoint;
|
|
}];
|
|
};
|
|
swannWireguard = args: wireguard (args // {
|
|
privateKey = config.my.vault.secrets.wg-swann-private.path;
|
|
publicKey = "N7nMSpFl+t+FVluRJY8dGJuB4Yn11mJlBW5+LwFqOhg=";
|
|
});
|
|
rexxarWireguard = args: wireguard (args // {
|
|
privateKey = config.my.vault.secrets.wg-rexxar-private.path;
|
|
publicKey = "Rhzn9S8WLpoohsk0Y2oanQSa9waThlK7dbA7ufzzMSU=";
|
|
});
|
|
in {
|
|
netdevs."40-wg-swann-ee" = swannWireguard {
|
|
name = "wg-swann-ee";
|
|
listenPort = 51821;
|
|
};
|
|
netdevs."40-wg-swann-gnet" = swannWireguard {
|
|
name = "wg-swann-gnet";
|
|
listenPort = 51822;
|
|
endpoint = "185.250.189.20:51822";
|
|
};
|
|
netdevs."40-wg-rexxar" = rexxarWireguard {
|
|
name = "wg-rexxar";
|
|
listenPort = 51823;
|
|
};
|
|
|
|
networks."40-wg-swann-ee" = {
|
|
matchConfig.Name = "wg-swann-ee";
|
|
address = [
|
|
"92.118.30.1/31"
|
|
"2a09:a442::1:2/112"
|
|
];
|
|
};
|
|
networks."40-wg-swann-gnet" = {
|
|
matchConfig.Name = "wg-swann-gnet";
|
|
address = [
|
|
"92.118.30.7/31"
|
|
"2a09:a442::4:2/112"
|
|
];
|
|
};
|
|
networks."40-wg-rexxar" = {
|
|
matchConfig.Name = "wg-rexxar";
|
|
address = [
|
|
"169.254.200.1/31"
|
|
];
|
|
};
|
|
|
|
networks."40-bond0".linkConfig.RequiredForOnline = "yes";
|
|
networks."40-enp45s0f0np0".linkConfig.RequiredForOnline = "no";
|
|
networks."40-enp45s0f1np1".linkConfig.RequiredForOnline = "no";
|
|
networks."50-tailscale".linkConfig.RequiredForOnline = "no";
|
|
networks."60-lo" = {
|
|
matchConfig.Name = "lo";
|
|
addresses = [{
|
|
Address = "127.0.0.1/8";
|
|
Scope = "host";
|
|
} {
|
|
Address = "::1/128";
|
|
} {
|
|
Address = "92.118.30.252/32";
|
|
} {
|
|
Address = "2a09:a442:2000::/128";
|
|
}];
|
|
};
|
|
|
|
netdevs.quadv2 = {
|
|
netdevConfig = {
|
|
Name = "quadv2";
|
|
Kind = "wireguard";
|
|
};
|
|
|
|
wireguardConfig = {
|
|
PrivateKeyFile = pkgs.writeText "cofractal-ams01-quadv" depot.ops.secrets.wireguard.quadv2.lukegb.privateKey;
|
|
ListenPort = 51820;
|
|
RouteTable = "off";
|
|
};
|
|
|
|
wireguardPeers = [{
|
|
PublicKey = depot.ops.secrets.wireguard.quadv2.quadv.publicKey;
|
|
AllowedIPs = "0.0.0.0/0,::/0";
|
|
}];
|
|
};
|
|
networks.quadv2 = {
|
|
matchConfig.Name = "quadv2";
|
|
networkConfig.Address = "169.254.112.0/31";
|
|
|
|
routes = [{
|
|
Gateway = "169.254.112.1";
|
|
Destination = "92.118.31.0/24";
|
|
}];
|
|
};
|
|
};
|
|
my.ip.tailscale = "100.83.36.130";
|
|
my.ip.tailscale6 = "fd7a:115c:a1e0:ab12:4843:cd96:6253:2482";
|
|
my.coredns.bind = [ "bond0" "tailscale0" "127.0.0.1" "::1" ];
|
|
|
|
services.openssh.hostKeys = [
|
|
{
|
|
path = "/persist/etc/ssh/ssh_host_ed25519_key";
|
|
type = "ed25519";
|
|
}
|
|
{
|
|
path = "/persist/etc/ssh/ssh_host_rsa_key";
|
|
type = "rsa";
|
|
bits = 4096;
|
|
}
|
|
];
|
|
|
|
systemd.mounts = let
|
|
bindMount' = dir: {
|
|
unitConfig.RequiresMountsFor = dir;
|
|
options = "bind";
|
|
what = "/persist${dir}";
|
|
where = dir;
|
|
};
|
|
bindMountSvc = dir: svc: (bindMount' dir) // {
|
|
requiredBy = [svc];
|
|
before = [svc];
|
|
wantedBy = ["multi-user.target"];
|
|
};
|
|
bindMountSvcDynamic = dir: svc: (bindMount' "/var/lib/private/${dir}") // {
|
|
requiredBy = [svc];
|
|
before = [svc];
|
|
wantedBy = ["multi-user.target"];
|
|
};
|
|
bindMount = dir: (bindMount' dir) // {
|
|
wantedBy = ["multi-user.target"];
|
|
};
|
|
in [
|
|
(bindMountSvc "/var/lib/tailscale" "tailscaled.service")
|
|
(bindMountSvc "/var/lib/private/factorio" "factorio.service")
|
|
(bindMountSvc "/var/lib/libvirt" "libvirt.service")
|
|
];
|
|
|
|
services.lukegbgp = let
|
|
local.asn = 205479;
|
|
in {
|
|
enable = true;
|
|
config = let
|
|
peer.swann = {
|
|
local.asn = 205479;
|
|
remote.bfd = true;
|
|
remote.asn = 205479;
|
|
remote.must_be_next_hop = false;
|
|
remote.export_community = 10;
|
|
remote.export_internal = true;
|
|
};
|
|
in {
|
|
local = {
|
|
routerID = "199.19.152.160";
|
|
};
|
|
export.v4 = [ "92.118.28.0/24" ];
|
|
export.v6 = [ "2a09:a446:1337::/48" "2a09:a442::/48" "2a09:a442:2000::/48" ];
|
|
|
|
internal.export.v4 = [ "92.118.30.252/32" ];
|
|
internal.export.v4Extra = ''
|
|
route 92.118.30.0/31 via "wg-swann-ee";
|
|
route 92.118.30.6/31 via "wg-swann-gnet";
|
|
'';
|
|
internal.export.v6 = [ "2a09:a446:1337::/48" "2a09:a442:2000::/48" "2a09:a442::/48" ];
|
|
internal.export.v6Extra = ''
|
|
route 2a09:a442::1:0/112 via "wg-swann-ee";
|
|
route 2a09:a442::4:0/112 via "wg-swann-gnet";
|
|
'';
|
|
|
|
peering.swann_ee = lib.mkMerge [peer.swann {
|
|
local.v4 = "92.118.30.1";
|
|
local.v6 = "2a09:a442::1:2";
|
|
remote.routers = [{
|
|
enabled = true;
|
|
v4 = "92.118.30.0";
|
|
v6 = "2a09:a442::1:1";
|
|
}];
|
|
remote.bgp_local_pref = 10;
|
|
}];
|
|
peering.swann_gnet = lib.mkMerge [peer.swann {
|
|
local.v4 = "92.118.30.7";
|
|
local.v6 = "2a09:a442::4:2";
|
|
remote.routers = [{
|
|
enabled = true;
|
|
v4 = "92.118.30.6";
|
|
v6 = "2a09:a442::4:1";
|
|
}];
|
|
}];
|
|
|
|
peering.cofractal = {
|
|
local = local // {
|
|
v6 = "2a09:a446:1337:ffff::10";
|
|
};
|
|
v4onv6 = true;
|
|
remote = {
|
|
asn = 26073;
|
|
export_community = 6000;
|
|
routers = [{
|
|
v6 = "2a09:a446:1337:ffff::2";
|
|
} {
|
|
v6 = "2a09:a446:1337:ffff::3";
|
|
}];
|
|
};
|
|
};
|
|
|
|
peering.quadv = {
|
|
local = local // {
|
|
v4 = "169.254.112.0";
|
|
};
|
|
remote = {
|
|
asn = 197753;
|
|
export_community = 4099;
|
|
routers = [{
|
|
v4 = "169.254.112.1";
|
|
}];
|
|
prefix_limit.v4 = 10;
|
|
prefix_limit.v6 = 10;
|
|
set_imported_next_hop_to = "2a09:a446:1337:ffff::10";
|
|
};
|
|
};
|
|
|
|
bfd = ''
|
|
interface "*" {
|
|
min rx interval 10ms;
|
|
min tx interval 50ms;
|
|
idle tx interval 1s;
|
|
multiplier 20;
|
|
};
|
|
neighbor 92.118.30.0;
|
|
neighbor 2a09:a442::1:1;
|
|
neighbor 92.118.30.6;
|
|
neighbor 2a09:a442::4:1;
|
|
'';
|
|
};
|
|
};
|
|
|
|
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
|
|
|
services.sonarr.enable = true;
|
|
services.radarr.enable = true;
|
|
services.kubo = {
|
|
enable = false; # kubo is so expensive for no reason :/
|
|
settings = {
|
|
Discovery.MDNS.Enabled = false;
|
|
Swarm.DisableNatPortMap = true;
|
|
Experimental.FilestoreEnabled = true;
|
|
};
|
|
dataDir = "/store/ipfs";
|
|
};
|
|
services.caddy = {
|
|
enable = true;
|
|
virtualHosts = vhosts;
|
|
};
|
|
|
|
services.factorio = {
|
|
inherit (depot.ops.secrets.factorio) username token;
|
|
enable = true;
|
|
saveName = "lukegb20230312-krastorio2";
|
|
game-name = "Briefcase Full of Bees";
|
|
mods = depot.nix.pkgs.factorio-mods._all;
|
|
mods-dat = ./mod-settings.dat;
|
|
admins = ["lukegb"];
|
|
extraSettings = {
|
|
auto_pause = true;
|
|
only_admins_can_pause_the_game = false;
|
|
game_password = depot.ops.secrets.factorioServerPassword;
|
|
non_blocking_saving = true;
|
|
autosave_only_on_server = true;
|
|
autosave_interval = 5;
|
|
autosave_slots = 60;
|
|
};
|
|
};
|
|
|
|
virtualisation.libvirtd = {
|
|
enable = true;
|
|
};
|
|
security.polkit.enable = true;
|
|
users.users.lukegb.extraGroups = lib.mkAfter [ "libvirtd" ];
|
|
|
|
my.vault.secrets = let
|
|
wireguardSecret = key: {
|
|
group = "systemd-network";
|
|
template = ''
|
|
{{- with secret "kv/apps/wireguard/cofractal-ams01" -}}
|
|
{{- .Data.data.${key} -}}
|
|
{{- end -}}
|
|
'';
|
|
};
|
|
in {
|
|
wg-swann-private = wireguardSecret "privateKeyToSwann";
|
|
wg-rexxar-private = wireguardSecret "privateKeyToRexxar";
|
|
};
|
|
|
|
system.stateVersion = "23.05";
|
|
}
|