depot/ops/nixos/etheroute-lon01/default.nix

401 lines
12 KiB
Nix

# SPDX-FileCopyrightText: 2020 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0
{ depot, lib, pkgs, config, ... }:
{
imports = [
../lib/bgp.nix
../lib/zfs.nix
../lib/pomerium.nix
../totoro/barf.nix # eww
];
boot.initrd = {
availableKernelModules = [
"ehci_pci"
"ahci"
"usbhid"
"usb_storage"
"sd_mod"
"sr_mod"
"bnx2" # ethernet
];
network = {
enable = true;
ssh = {
enable = true;
hostKeys = ["/persist/etc/ssh/ssh_host_ed25519_key"];
authorizedKeys = map builtins.readFile config.users.users.lukegb.openssh.authorizedKeys.keyFiles;
};
postCommands = ''
echo "zfs load-key -a; killall zfs" >> /root/.profile
'';
};
};
boot.kernelParams = [
"ip=103.141.25.50::103.141.25.49:255.255.255.252:etheroute-lon01:eno1:none"
];
boot.kernelModules = [ "kvm-intel" ];
boot.kernel.sysctl = {
"net.ipv4.conf.all.forwarding" = true;
"net.ipv4.conf.default.forwarding" = true;
};
# Use the systemd-boot EFI boot loader.
boot.loader.systemd-boot.enable = true;
boot.loader.efi.canTouchEfiVariables = true;
powerManagement.cpuFreqGovernor = lib.mkDefault "performance";
services.zfs.rollbackOnBoot = {
enable = true;
snapshot = "tank/local/root@blank";
};
systemd.services."systemd-networkd-wait-online".wantedBy = lib.mkForce [];
fileSystems = let
zfs = device: {
device = device;
fsType = "zfs";
};
in {
"/" = zfs "tank/local/root";
"/nix" = zfs "tank/local/nix";
"/tmp" = zfs "tank/local/tmp";
"/persist" = zfs "tank/safe/persist";
"/home" = zfs "tank/safe/home";
"/boot" = {
device = "/dev/disk/by-partlabel/ESP";
fsType = "vfat";
};
};
nix.settings.max-jobs = lib.mkDefault 8;
# Networking!
networking = {
hostName = "etheroute-lon01";
domain = "as205479.net";
hostId = "420bee1b";
useNetworkd = true;
nameservers = [
"2001:4860:4860::8888"
"2001:4860:4860::8844"
"8.8.8.8"
"8.8.4.4"
];
defaultGateway = {
address = "103.141.25.49";
interface = "eno1";
};
defaultGateway6 = {
address = "2a07:242:800:64::1";
interface = "eno1";
};
interfaces.eno1 = {
ipv4.addresses = [{ address = "103.141.25.50"; prefixLength = 30; }];
ipv6.addresses = [{ address = "2a07:242:800:64::68"; prefixLength = 64; }];
};
#interfaces.quadv1-4 = {
# ipv4.addresses = [{ address = "92.118.31.254"; prefixLength = 24; }];
# virtual = true;
#};
firewall.allowedTCPPorts = [ 80 443 ];
firewall.allowedUDPPorts = [ 51820 ];
firewall.extraCommands = ''
# Flush old rules.
ip46tables -D FORWARD -j lukegb-forward 2>/dev/null || true
for chain in lukegb-forward lukegb-fwd-accept lukegb-fwd-reject; do
ip46tables -F "$chain" 2>/dev/null || true
ip46tables -X "$chain" 2>/dev/null || true
done
ip46tables -N lukegb-fwd-accept
ip46tables -A lukegb-fwd-accept -j ACCEPT
ip46tables -N lukegb-fwd-reject
ip46tables -A lukegb-fwd-reject -p tcp ! --syn -j REJECT --reject-with tcp-reset
ip46tables -A lukegb-fwd-reject -j REJECT
ip46tables -N lukegb-forward
# Accept from "trusted" quadv1 interface
ip46tables -A lukegb-forward -i quadv1 -j lukegb-fwd-accept
# Accept to quadv1 interface if we're multipathing.
ip46tables -A lukegb-forward -o quadv1 -j lukegb-fwd-accept
# Accept from established/related connections.
ip46tables -A lukegb-forward -m conntrack --ctstate ESTABLISHED,RELATED -j lukegb-fwd-accept
# Set up the firewall.
ip46tables -A lukegb-forward -j lukegb-fwd-reject
ip46tables -A FORWARD -j lukegb-forward
'';
};
my.ip.tailscale = "100.99.227.112";
my.ip.tailscale6 = "fd7a:115c:a1e0:ab12:4843:cd96:6263:e370";
systemd.network.netdevs.quadv1 = {
netdevConfig = {
Name = "quadv1";
Kind = "wireguard";
};
wireguardConfig = {
PrivateKeyFile = pkgs.writeText "etheroute-lon01-quadv" depot.ops.secrets.wireguard.quadv1.lukegb.privateKey;
ListenPort = 51820;
RouteTable = "off";
};
wireguardPeers = [{
PublicKey = depot.ops.secrets.wireguard.quadv1.quadv.publicKey;
AllowedIPs = "0.0.0.0/0,::/0";
}];
};
systemd.network.networks.quadv1 = {
matchConfig.Name = "quadv1";
networkConfig.Address = "169.254.111.0/31";
routes = [{
Gateway = "169.254.111.1";
Destination = "92.118.31.0/24";
}];
};
services.openssh.hostKeys = [
{
path = "/persist/etc/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
{
path = "/persist/etc/ssh/ssh_host_rsa_key";
type = "rsa";
bits = 4096;
}
];
users.users = {
lukegb.extraGroups = [ "bird2" ];
};
services.lukegbgp = let local = {
asn = 205479;
}; in {
enable = true;
config = {
local = {
routerID = "103.141.25.50";
};
peering = {
etheroute = {
local = local // {
v4 = "103.141.25.50";
v6 = "2a07:242:800:64::68";
};
remote = {
asn = 3170;
export_community = 4000;
routers = [{
v4 = "103.141.25.49";
v6 = "2a07:242:800:64::1";
}];
};
};
bgptoolscollector = {
local = local // {
v4 = "103.141.25.50";
v6 = "2a07:242:800:64::68";
};
remote = {
asn = 212232;
export_community = 5000;
is_route_collector = true;
routers = [{
v4 = "185.230.223.48";
v6 = "2a0c:2f07:9459::b6";
multihop = 64;
}];
prefix_limit.v4 = 0;
prefix_limit.v6 = 0;
};
};
quadv = {
local = local // {
v4 = "169.254.111.0";
};
remote = {
asn = 197753;
export_community = 4099;
routers = [{
v4 = "169.254.111.1";
}];
prefix_limit.v4 = 10;
prefix_limit.v6 = 10;
};
};
};
};
};
systemd.mounts = let
bindMount' = dir: {
unitConfig.RequiresMountsFor = dir;
options = "bind";
what = "/persist${dir}";
where = dir;
};
bindMountSvc = dir: svc: (bindMount' dir) // {
bindsTo = [svc];
partOf = [svc];
};
bindMountSvcDynamic = dir: svc: (bindMount' "/var/lib/private/${dir}") // {
requiredBy = [svc];
before = [svc];
wantedBy = ["multi-user.target"];
};
bindMount = dir: (bindMount' dir) // {
wantedBy = ["multi-user.target"];
};
in [
(bindMountSvc "/var/lib/tailscale" "tailscaled.service")
(bindMountSvcDynamic "barf-fe" "barf-fe.service")
];
services.redis.servers."" = {
enable = true;
bind = "127.0.0.1";
};
services.pomerium = {
settings = {
certificates = [
{ cert = "/var/lib/acme/lukegb.com/fullchain.pem"; key = "/var/lib/acme/lukegb.com/privkey.pem"; }
];
routes = let
baseConfig = {
policy = [{
allow.and = [{
domain.is = "lukegb.com";
}];
}];
pass_identity_headers = true;
timeout = "30s";
};
service = server: hostName: extraConfig: baseConfig // {
from = "https://${hostName}";
to = "http://${server}";
preserve_host_header = true;
} // extraConfig;
secureService = server: hostName: extraConfig: service server hostName ({
to = "https://${server}";
tls_server_name = hostName;
} // extraConfig);
public = extraConfig: {
allow_public_unauthenticated_access = true;
policy = null;
} // extraConfig;
in [
(service "localhost:12001" "barf.lukegb.com" (public {}))
(service "cofractal-ams01.int.as205479.net" "int.lukegb.com" {})
(service "cofractal-ams01.int.as205479.net" "logged-out.int.lukegb.com" (public {}))
(service "cofractal-ams01.int.as205479.net" "sonarr.int.lukegb.com" {})
(service "cofractal-ams01.int.as205479.net" "radarr.int.lukegb.com" {})
(service "cofractal-ams01.int.as205479.net" "deluge.int.lukegb.com" {})
(service "cofractal-ams01.int.as205479.net" "content.int.lukegb.com" (public {
timeout = "0"; # Downloads can take a while; bump the timeout.
}))
(service "totoro.int.as205479.net:9090" "prometheus.int.lukegb.com" {})
(service "totoro.int.as205479.net:9093" "alertmanager.int.lukegb.com" {})
(service "totoro.int.as205479.net:3000" "grafana.int.lukegb.com" {})
(service "totoro.int.as205479.net:10908" "tumblrandom.int.lukegb.com" {})
(secureService "swann.int.as205479.net:8443" "unifi.int.lukegb.com" {
tls_skip_verify = true;
allow_websockets = true;
timeout = "0";
})
(secureService "totoro.int.as205479.net" "invoices.lukegb.com" (public {
regex = "^/((third_party|ajax|client_area|pdf)/.*|[a-zA-Z0-9]{8})$";
tls_skip_verify = true;
}))
(secureService "totoro.int.as205479.net" "invoices.lukegb.com" {
tls_skip_verify = true;
})
(baseConfig // {
from = "https://httpbin.int.lukegb.com";
to = "https://verify.pomerium.com";
})
(service "bvm-twitterchiver.int.as205479.net:8080" "twitterchiver.int.lukegb.com" {})
(service "bvm-twitterchiver.int.as205479.net:8080" "twitterchiver.lukegb.com" {})
(service "bvm-nixosmgmt.int.as205479.net:4440" "rundeck.int.lukegb.com" {
set_request_headers = {
"X-Forwarded-Roles" = "pomerium";
};
})
(service "bvm-ipfs.int.as205479.net:5001" "ipfs.int.lukegb.com" {})
(service "bvm-ipfs.int.as205479.net:8080" "ipfs-gw.int.lukegb.com" {})
(service "bvm-netbox.int.as205479.net:80" "netbox.int.lukegb.com" {})
(service "localhost:9901" "envoy-debug.int.lukegb.com" {})
(service "bvm-paperless.int.as205479.net:28981" "paperless.int.lukegb.com" {
regex = "^/ws/.*";
allow_websockets = true;
timeout = "0";
})
(service "bvm-paperless.int.as205479.net:28981" "paperless.int.lukegb.com" {
regex = "^/api/.*";
timeout = "0";
})
(service "bvm-paperless.int.as205479.net:28981" "paperless.int.lukegb.com" {})
(service "totoro.int.as205479.net:1880" "nodered.int.lukegb.com" {
allow_websockets = true;
timeout = "0";
})
(service "totoro.int.as205479.net:3002" "code.int.lukegb.com" {
allow_websockets = true;
timeout = "0";
})
(service "totoro.int.as205479.net:8099" "zigbee2mqtt.int.lukegb.com" {
allow_websockets = true;
timeout = "0";
})
# These services are included for policy reasons only.
# They have their own reverse proxy instances.
(service "localhost:3000" "git.lukegb.com" {
policy = [{
allow.not = [{
http_path.starts_with = "/user/login";
}];
} {
allow.and = [{
domain.is = "lukegb.com";
}];
}];
})
];
};
};
systemd.services.pomerium = {
wants = lib.mkAfter [ "redis.service" ];
after = lib.mkAfter [ "redis.service" ];
serviceConfig = {
SupplementaryGroups = [ "acme" ];
ReadOnlyPaths = [ "/var/lib/acme" ];
};
};
my.vault.acmeCertificates."lukegb.com" = {
hostnames = [
"lukegb.com"
"*.lukegb.com"
"*.int.lukegb.com"
];
reloadOrRestartUnits = [ "pomerium.service" ];
};
users.groups.acme = {};
system.stateVersion = "22.11";
}