# SPDX-FileCopyrightText: 2020 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0

{ depot, lib, pkgs, rebuilder, config, ... }:
let
  inherit (depot.ops) secrets;
  machineSecrets = secrets.machineSpecific.etheroute-lon01;
in {
  imports = [
    ../lib/bgp.nix
    ../lib/zfs.nix

    ../../../nix/pkgs/pomerium/module.nix
  ];

  boot.initrd = {
    availableKernelModules = [
      "ehci_pci"
      "ahci"
      "usbhid"
      "usb_storage"
      "sd_mod"
      "sr_mod"
      "bnx2"  # ethernet
    ];
    network = {
      enable = true;
      ssh = {
        enable = true;
        hostKeys = ["/persist/etc/ssh/ssh_host_ed25519_key"];
        authorizedKeys = map builtins.readFile config.users.users.lukegb.openssh.authorizedKeys.keyFiles;
      };
      postCommands = ''
        echo "zfs load-key -a; killall zfs" >> /root/.profile
      '';
    };
  };
  boot.kernelParams = [
    "ip=83.97.19.68::83.97.19.65:255.255.255.224:etheroute-lon01:eno1:none"
  ];
  boot.kernelModules = [ "kvm-intel" ];

  # Use the systemd-boot EFI boot loader.
  boot.loader.systemd-boot.enable = true;
  boot.loader.efi.canTouchEfiVariables = true;

  powerManagement.cpuFreqGovernor = lib.mkDefault "performance";
  services.zfs.rollbackOnBoot = {
    enable = true;
    snapshot = "tank/local/root@blank";
  };

  fileSystems = let
    zfs = device: {
      device = device;
      fsType = "zfs";
    };
  in {
    "/" = zfs "tank/local/root";
    "/nix" = zfs "tank/local/nix";
    "/tmp" = zfs "tank/local/tmp";

    "/persist" = zfs "tank/safe/persist";
    "/home" = zfs "tank/safe/home";

    "/boot" = {
      device = "/dev/disk/by-partlabel/ESP";
      fsType = "vfat";
    };
  };

  nix.maxJobs = lib.mkDefault 8;

  # Networking!
  networking = {
    hostName = "etheroute-lon01";
    domain = "as205479.net";
    hostId = "420bee1b";

    nameservers = [
      "2001:4860:4860::8888"
      "2001:4860:4860::8844"
      "8.8.8.8"
      "8.8.4.4"
    ];
    useDHCP = false;
    defaultGateway = {
      address = "83.97.19.65";
      interface = "eno1";
    };
    defaultGateway6 = {
      address = "2a07:242:800:64::1";
      interface = "eno1";
    };
    interfaces.eno1 = {
      ipv4.addresses = [{ address = "83.97.19.68"; prefixLength = 27; }];
      ipv6.addresses = [{ address = "2a07:242:800:64::68"; prefixLength = 64; }];
    };
    firewall.allowedTCPPorts = [ 80 443 ];
  };
  my.ip.tailscale = "100.111.191.21";

  services.openssh.hostKeys = [
    {
      path = "/persist/etc/ssh/ssh_host_ed25519_key";
      type = "ed25519";
    }
    {
      path = "/persist/etc/ssh/ssh_host_rsa_key";
      type = "rsa";
      bits = 4096;
    }
  ];

  users.users = {
    lukegb.extraGroups = [ "bird2" ];
  };

  services.lukegbgp = let local = {
    asn = 205479;
  }; in {
    enable = true;
    config = {
      local = {
        routerID = "83.97.19.68";
      };
      peering = {
        etheroute = {
          local = local // {
            v4 = "83.97.19.68";
            v6 = "2a07:242:800:64::68";
          };
          remote = {
            asn = 16089;
            export_community = 4000;
            routers = [{
              v4 = "83.97.19.65";
              v6 = "2a07:242:800:64::1";
            }];
          };
        };
      };
    };
  };

  systemd.mounts = let
    bindMount' = dir: {
      unitConfig.RequiresMountsFor = dir;
      options = "bind";
      what = "/persist${dir}";
      where = dir;
    };
    bindMountSvc = dir: svc: (bindMount' dir) // {
      bindsTo = [svc];
      partOf = [svc];
    };
    bindMountSvcDynamic = dir: svc: (bindMount' "/var/lib/private/${dir}") // {
      requiredBy = [svc];
      before = [svc];
      wantedBy = ["multi-user.target"];
    };
    bindMount = dir: (bindMount' dir) // {
      wantedBy = ["multi-user.target"];
    };
  in [
    (bindMountSvc "/var/lib/tailscale" "tailscaled.service")
  ];

  services.redis = {
    enable = true;
    bind = "127.0.0.1";
  };
  services.pomerium = {
    enable = true;
    secretsFile = machineSecrets.pomeriumSecrets;

    config = {
      address = ":443";
      http_redirect_addr = ":80";

      idp_provider = "google";
      idp_client_id = "136257844546-qsa6hi1oqqoq2bnt93deo4e70ggbn1p8.apps.googleusercontent.com";
      idp_request_params = {
        hd = "lukegb.com";
        login_hint = "lukegb@lukegb.com";
      };

      jwt_claims_headers = [
        "email"
        "user"
      ];

      timeout_read = "0";  # We have some long-lived connections...
      timeout_write = "0";
      timeout_idle = "0";

      databroker_storage_type = "redis";
      databroker_storage_connection_string = "redis://127.0.0.1:6379/15";

      forward_auth_url = "https://fwdauth.int.lukegb.com";
      authenticate_service_url = "https://auth.int.lukegb.com";
      signout_redirect_url = "https://logged-out.int.lukegb.com";

      policy = let
        baseConfig = {
          allowed_domains = [ "lukegb.com" ];
          pass_identity_headers = true;
          timeout = "30s";
        };
        service = server: hostName: extraConfig: baseConfig // {
          from = "https://${hostName}";
          to = "http://${server}";
          preserve_host_header = true;
        } // extraConfig;
        secureService = server: hostName: extraConfig: service server hostName ({
          to = "https://${server}";
          tls_server_name = hostName;
        } // extraConfig);
        public = extraConfig: {
          allow_public_unauthenticated_access = true;
          allowed_domains = null;
        } // extraConfig;
      in [
        (service "clouvider-fra01" "int.lukegb.com" {})
        (service "clouvider-fra01" "logged-out.int.lukegb.com" (public {}))
        (service "clouvider-fra01" "sonarr.int.lukegb.com" {})
        (service "clouvider-fra01" "radarr.int.lukegb.com" {})
        (service "clouvider-fra01" "deluge.int.lukegb.com" {})
        (service "totoro:9090" "prometheus.int.lukegb.com" {})
        (service "totoro:9093" "alertmanager.int.lukegb.com" {})
        (service "totoro:3000" "grafana.int.lukegb.com" {})
        (secureService "swann:8443" "unifi.int.lukegb.com" {
          tls_skip_verify = true;
          allow_websockets = true;
          timeout = "0";
        })
        (service "blade-tuvok:7480" "objdump.zxcvbnm.ninja" (public {
          timeout = "30m";  # Uploads can take a while; bump the timeout.
        }))
        (secureService "totoro" "invoices.lukegb.com" (public {
          regex = "^/((third_party|ajax|client_area|pdf)/.*|[a-zA-Z0-9]{8})$";
        }))
        (secureService "totoro" "invoices.lukegb.com" {})
        (baseConfig // {
          from = "https://httpbin.int.lukegb.com";
          to = "https://verify.pomerium.com";
        })
        (service "bvm-twitterchiver:8080" "twitterchiver.int.lukegb.com" {})
        (service "bvm-twitterchiver:8080" "twitterchiver.lukegb.com" {})
      ];
    };
  };
  systemd.services.pomerium.serviceConfig = {
    After = [ "acme-finished-int.lukegb.com.target" "redis.service" ];
    Wants = [ "acme-finished-int.lukegb.com.target" "redis.service" ];
    LoadCredential = [
      "certfullchain.pem:/var/lib/acme/int.lukegb.com/fullchain.pem"
      "certkey.pem:/var/lib/acme/int.lukegb.com/key.pem"
    ];
    Environment = [
      "CERTIFICATE_FILE=certfullchain.pem"
      "CERTIFICATE_KEY_FILE=certkey.pem"
    ];
  };
  security.acme = {
    acceptTerms = true;
    email = "letsencrypt@lukegb.com";
    certs."int.lukegb.com" = {
      domain = "*.int.lukegb.com";
      dnsProvider = "cloudflare";
      credentialsFile = secrets.cloudflareCredentials;
      extraDomainNames = [
        # "int.lukegb.com"  # redundant with *.lukegb.com
        "lukegb.com"
        "*.lukegb.com"
        "objdump.zxcvbnm.ninja"
      ];
      postRun = ''
        systemctl restart pomerium
      '';
    };
  };

  system.stateVersion = "20.09";
}