This commit is contained in:
Luke Granger-Brown 2024-11-10 21:58:06 +00:00
commit 2f786ed2da
1634 changed files with 66668 additions and 33328 deletions

View file

@ -9,6 +9,7 @@ let
in {
imports = [
../lib/bvm.nix
../lib/pomerium.nix
];
# Networking!
@ -40,8 +41,8 @@ in {
allowedUDPPorts = [ 443 ];
};
};
#my.ip.tailscale = "100.94.23.105";
#my.ip.tailscale6 = "fd7a:115c:a1e0:ab12:4843:cd96:625e:1769";
my.ip.tailscale = "100.103.26.78";
my.ip.tailscale6 = "fd7a:115c:a1e0::8d01:1a4e";
services.openssh.ports = [ 20022 ];
my.deploy.args = "-p 20022";
@ -51,8 +52,8 @@ in {
services.postfix = {
enable = true;
domain = "hg.lukegb.com";
hostname = "hg.lukegb.com";
domain = "git.lukegb.com";
hostname = "git.lukegb.com";
extraConfig = ''
milter_protocol = 2
milter_default_action = accept
@ -63,9 +64,76 @@ in {
};
services.opendkim = {
enable = true;
domains = "csl:hg.lukegb.com";
domains = "csl:git.lukegb.com";
selector = "bvm-forgejo";
};
services.pomerium = {
settings = {
services = "proxy";
autocert = true;
routes = [{
from = "https://git.lukegb.com";
to = "http://localhost:3000";
pass_identity_headers = true;
remove_request_headers = [
"X-WebAuth-User"
"X-WebAuth-Email"
"X-WebAuth-FullName"
];
}];
};
};
environment.systemPackages = with pkgs; [ forgejo-cli forgejo ];
services.forgejo = {
enable = true;
lfs.enable = true;
package = pkgs.forgejo;
secrets = let
customDir = config.services.forgejo.customDir;
in {
storage.MINIO_SECRET_ACCESS_KEY = "${customDir}/conf/s3_secret_key";
};
settings = {
server = {
DOMAIN = "git.lukegb.com";
ROOT_URL = "https://git.lukegb.com/";
};
session = {
COOKIE_SECURE = true;
};
storage = {
STORAGE_TYPE = "minio";
MINIO_ENDPOINT = "objdump.zxcvbnm.ninja";
MINIO_BUCKET = "lukegb-forgejo";
MINIO_LOCATION = "london";
MINIO_USE_SSL = true;
MINIO_BUCKET_LOOKUP = "dns";
MINIO_ACCESS_KEY_ID = "AKIALUKEGBFORGEJO000";
};
security = {
COOKIE_REMEMBER_NAME = "forgejo_remember_me";
REVERSE_PROXY_AUTHENTICATION_EMAIL = "X-Pomerium-Claim-Email";
};
service = {
DISABLE_REGISTRATION = true;
ENABLE_REVERSE_PROXY_AUTHENTICATION = true;
ENABLE_REVERSE_PROXY_EMAIL = true;
};
mailer = {
ENABLED = true;
PROTOCOL = "smtp";
SMTP_ADDR = "localhost";
SMTP_PORT = 25;
FROM = "lukegb.com Forgejo <forgejo@git.lukegb.com>";
};
cron = {
ENABLED = true;
};
log.LEVEL = "Trace";
};
};
system.stateVersion = "24.11";
}

View file

@ -7,6 +7,7 @@
imports = [
../lib/bgp.nix
../lib/zfs.nix
../lib/pomerium.nix
../totoro/barf.nix # eww
];
@ -270,41 +271,18 @@
bind = "127.0.0.1";
};
services.pomerium = {
enable = true;
secretsFile = config.my.vault.secrets.pomerium.path;
settings = {
address = ":443";
http_redirect_addr = ":80";
dns_lookup_family = "AUTO";
idp_provider = "google";
idp_client_id = "136257844546-qsa6hi1oqqoq2bnt93deo4e70ggbn1p8.apps.googleusercontent.com";
idp_request_params = {
hd = "lukegb.com";
login_hint = "lukegb@lukegb.com";
};
jwt_claims_headers = [
"email"
"user"
];
timeout_read = "0"; # We have some long-lived connections...
timeout_write = "0";
timeout_idle = "0";
forward_auth_url = "https://fwdauth.int.lukegb.com";
authenticate_service_url = "https://auth.int.lukegb.com";
signout_redirect_url = "https://logged-out.int.lukegb.com";
certificates = [
{ cert = "/var/lib/acme/lukegb.com/fullchain.pem"; key = "/var/lib/acme/lukegb.com/privkey.pem"; }
];
policy = let
routes = let
baseConfig = {
allowed_domains = [ "lukegb.com" ];
policy = [{
allow.and = [{
domain.is = "lukegb.com";
}];
}];
pass_identity_headers = true;
timeout = "30s";
};
@ -319,7 +297,7 @@
} // extraConfig);
public = extraConfig: {
allow_public_unauthenticated_access = true;
allowed_domains = null;
policy = null;
} // extraConfig;
in [
(service "localhost:12001" "barf.lukegb.com" (public {}))
@ -384,6 +362,20 @@
allow_websockets = true;
timeout = "0";
})
# These services are included for policy reasons only.
# They have their own reverse proxy instances.
(service "localhost:3000" "git.lukegb.com" {
policy = [{
allow.not = [{
http_path.starts_with = "/user/login";
}];
} {
allow.and = [{
domain.is = "lukegb.com";
}];
}];
})
];
};
};
@ -403,19 +395,6 @@
];
reloadOrRestartUnits = [ "pomerium.service" ];
};
my.vault.secrets.pomerium = {
template = ''
{{ with secret "kv/apps/pomerium" }}
COOKIE_SECRET={{ .Data.data.cookieSecret }}
SHARED_SECRET={{ .Data.data.sharedSecret }}
IDP_CLIENT_SECRET={{ .Data.data.idpClientSecret }}
SIGNING_KEY={{ .Data.data.signingKey }}
IDP_SERVICE_ACCOUNT={{ .Data.data.googleServiceAccount }}
{{ end }}
'';
group = "root";
reloadOrRestartUnits = [ "pomerium.service" ];
};
users.groups.acme = {};
system.stateVersion = "22.11";

View file

@ -3,7 +3,7 @@
; SPDX-License-Identifier: Apache-2.0
; MNAME RNAME SERIAL REFRESH RETRY EXPIRE TTL
@ 600 IN SOA frantech-lux01.as205479.net. hostmaster.lukegb.com. 59 600 450 3600 300
@ 600 IN SOA frantech-lux01.as205479.net. hostmaster.lukegb.com. 60 600 450 3600 300
; NB: this are also glue records in Google Domains.
$INCLUDE tmpl.ns
@ -101,6 +101,8 @@ bvm-logger.int 3600 IN A 100.68.134.82
bvm-logger.int 3600 IN AAAA fd7a:115c:a1e0:ab12:4843:cd96:6244:8652
bvm-paperless.int 3600 IN A 100.85.236.121
bvm-paperless.int 3600 IN AAAA fd7a:115c:a1e0:ab12:4843:cd96:6255:ec79
bvm-forgejo.int 3600 IN A 100.103.26.78
bvm-forgejo.int 3600 IN AAAA fd7a:115c:a1e0::8d01:1a4e
mac-mini.int 3600 IN A 100.91.188.84
mac-mini.int 3600 IN AAAA fd7a:115c:a1e0:ab12:4843:cd96:625b:bc54

View file

@ -0,0 +1,61 @@
{ config, ... }:
{
services.pomerium = {
enable = true;
secretsFile = config.my.vault.secrets.pomerium.path;
settings = {
address = ":443";
grpc_address = ":5443";
http_redirect_addr = ":80";
dns_lookup_family = "AUTO";
idp_provider = "google";
idp_client_id = "136257844546-qsa6hi1oqqoq2bnt93deo4e70ggbn1p8.apps.googleusercontent.com";
idp_request_params = {
hd = "lukegb.com";
login_hint = "lukegb@lukegb.com";
};
jwt_claims_headers = [
"email"
"user"
];
# Note autocert = true; not set here.
autocert_ca = "https://dv.acme-v02.api.pki.goog/directory";
autocert_email = "acme@lukegb.com";
autocert_must_staple = true;
autocert_dir = "/var/lib/pomerium/autocert";
grpc_insecure = true;
timeout_read = "0"; # We have some long-lived connections...
timeout_write = "0";
timeout_idle = "0";
forward_auth_url = "https://fwdauth.int.lukegb.com";
authenticate_service_url = "https://auth.int.lukegb.com";
signout_redirect_url = "https://logged-out.int.lukegb.com";
authorize_service_url = "http://etheroute-lon01.int.as205479.net:5443";
databroker_service_url = "http://etheroute-lon01.int.as205479.net:5443";
};
};
my.vault.secrets.pomerium = {
template = ''
{{ with secret "kv/apps/pomerium" }}
COOKIE_SECRET={{ .Data.data.cookieSecret }}
SHARED_SECRET={{ .Data.data.sharedSecret }}
IDP_CLIENT_SECRET={{ .Data.data.idpClientSecret }}
SIGNING_KEY={{ .Data.data.signingKey }}
IDP_SERVICE_ACCOUNT={{ .Data.data.googleServiceAccount }}
AUTOCERT_EAB_KEY_ID={{ .Data.data.eabKeyID }}
AUTOCERT_EAB_MAC_KEY={{ .Data.data.eabMacKey }}
{{ end }}
'';
group = "root";
reloadOrRestartUnits = [ "pomerium.service" ];
};
}

View file

@ -78,6 +78,7 @@
my.apps.bsky-pds = {};
my.servers.etheroute-lon01.apps = [ "pomerium" ];
my.servers.bvm-forgejo.apps = [ "pomerium" ];
my.servers.howl.apps = [ "nixbuild" ];
my.servers.porcorosso.apps = [ "quotesdb" "nixbuild" ];
my.servers.nausicaa.apps = [ "quotesdb" "nixbuild" "hacky-vouchproxy" "hackyplayer" "emfminiserv" ];

View file

@ -40,6 +40,9 @@ d1c1a0c656ccd8bd3b25d3c4287f2d075faf3cf3
# fix indentation in meteor default.nix
a37a6de881ec4c6708e6b88fd16256bbc7f26bbd
# pkgs/stdenv/make-derivation: Reindent
b4cc2a2479a7ab0f6440b2e1319221920ef72699
# treewide: automatically md-convert option descriptions
2e751c0772b9d48ff6923569adfa661b030ab6a2
@ -189,3 +192,14 @@ ce21e97a1f20dee15da85c084f9d1148d84f853b
# percona: apply nixfmt
8d14fa2886fec877690c6d28cfcdba4503dbbcea
# nixos/virtualisation: format image-related files
# Original formatting commit that was reverted
04fadac429ca7d6b92025188652376c230205730
# Revert commit
4cec81a9959ce612b653860dcca53101a36f328a
# Final commit that does the formatting
88b285c01d84de82c0b2b052fd28eaf6709c2d26
# sqlc: format with nixfmt
2bdec131b2bb2c8563f4556d741d34ccb77409e2

View file

@ -8,4 +8,4 @@
## Technical details
Please run `nix-shell -p nix-info --run "nix-info -m"` and paste the result.
<!-- Please insert the output of running `nix-shell -p nix-info --run "nix-info -m"` below this line -->

View file

@ -33,12 +33,8 @@ If in doubt, check `git blame` for whoever last touched something.
-->
### Metadata
Please run `nix-shell -p nix-info --run "nix-info -m"` and paste the result.
```console
[user@system:~]$ nix-shell -p nix-info --run "nix-info -m"
output here
```
<!-- Please insert the output of running `nix-shell -p nix-info --run "nix-info -m"` below this line -->
---

View file

@ -31,12 +31,7 @@ If in doubt, check `git blame` for whoever last touched something.
### Metadata
Please run `nix-shell -p nix-info --run "nix-info -m"` and paste the result.
```console
[user@system:~]$ nix-shell -p nix-info --run "nix-info -m"
output here
```
<!-- Please insert the output of running `nix-shell -p nix-info --run "nix-info -m"` below this line -->
---

View file

@ -217,9 +217,9 @@
- changed-files:
- any-glob-to-any-file:
- doc/languages-frameworks/nim.section.md
- pkgs/development/compilers/nim/*
- pkgs/development/nim-packages/**/*
- pkgs/top-level/nim-packages.nix
- pkgs/build-support/build-nim-package.nix
- pkgs/by-name/ni/nim*
- pkgs/top-level/nim-overrides.nix
"6.topic: nodejs":
- any:
@ -340,6 +340,15 @@
- pkgs/os-specific/linux/systemd/**/*
- nixos/modules/system/boot/systemd*/**/*
"6.topic: tcl":
- any:
- changed-files:
- any-glob-to-any-file:
- doc/languages-frameworks/tcl.section.md
- pkgs/development/interpreters/tcl/*
- pkgs/development/tcl-modules/**/*
- pkgs/top-level/tcl-packages.nix
"6.topic: TeX":
- any:
- changed-files:

View file

@ -20,7 +20,7 @@ jobs:
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs

View file

@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- uses: cachix/cachix-action@ad2ddac53f961de1989924296a1f236fcfbaa4fc # v15
with:

View file

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
filter: blob:none

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -10,7 +10,7 @@ jobs:
name: shell-check-x86_64-linux
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
@ -22,7 +22,7 @@ jobs:
name: shell-check-aarch64-darwin
runs-on: macos-latest
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -1,17 +1,32 @@
name: Codeowners
name: Codeowners v2
# This workflow depends on a GitHub App with the following permissions:
# - Repository > Administration: read-only
# - Organization > Members: read-only
# - Repository > Pull Requests: read-write
# The App needs to be installed on this repository
# the OWNER_APP_ID repository variable needs to be set
# the OWNER_APP_PRIVATE_KEY repository secret needs to be set
# This workflow depends on two GitHub Apps with the following permissions:
# - For checking code owners:
# - Permissions:
# - Repository > Administration: read-only
# - Organization > Members: read-only
# - Install App on this repository, setting these variables:
# - OWNER_RO_APP_ID (variable)
# - OWNER_RO_APP_PRIVATE_KEY (secret)
# - For requesting code owners:
# - Permissions:
# - Repository > Administration: read-only
# - Organization > Members: read-only
# - Repository > Pull Requests: read-write
# - Install App on this repository, setting these variables:
# - OWNER_APP_ID (variable)
# - OWNER_APP_PRIVATE_KEY (secret)
#
# This split is done because checking code owners requires handling untrusted PR input,
# while requesting code owners requires PR write access, and those shouldn't be mixed.
on:
pull_request_target:
types: [opened, ready_for_review, synchronize, reopened, edited]
# We don't need any default GitHub token
permissions: {}
env:
OWNERS_FILE: ci/OWNERS
# Don't do anything on draft PRs
@ -35,7 +50,7 @@ jobs:
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR itself.
# We later build and run code from the base branch with access to secrets,
# so it's important this is not the PRs code.
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: base
@ -45,10 +60,10 @@ jobs:
- uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token
with:
app-id: ${{ vars.OWNER_APP_ID }}
private-key: ${{ secrets.OWNER_APP_PRIVATE_KEY }}
app-id: ${{ vars.OWNER_RO_APP_ID }}
private-key: ${{ secrets.OWNER_RO_APP_PRIVATE_KEY }}
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: refs/pull/${{ github.event.number }}/merge
path: pr
@ -72,7 +87,7 @@ jobs:
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head.
# This is intentional, because we need to request the review of owners as declared in the base branch.
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token

View file

@ -1,6 +1,8 @@
name: "Checking EditorConfig"
name: "Checking EditorConfig v2"
permissions: read-all
permissions:
pull-requests: read
contents: read
on:
# avoids approving first time contributors
@ -25,7 +27,7 @@ jobs:
- name: print list of changed files
run: |
cat "$HOME/changed_files"
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -1,6 +1,7 @@
name: "Build NixOS manual"
name: "Build NixOS manual v2"
permissions: read-all
permissions:
contents: read
on:
pull_request_target:
@ -15,7 +16,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -1,6 +1,7 @@
name: "Build Nixpkgs manual"
name: "Build Nixpkgs manual v2"
permissions: read-all
permissions:
contents: read
on:
pull_request_target:
@ -17,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -1,6 +1,8 @@
name: "Check whether nix files are parseable"
name: "Check whether nix files are parseable v2"
permissions: read-all
permissions:
pull-requests: read
contents: read
on:
# avoids approving first time contributors
@ -25,7 +27,7 @@ jobs:
if [[ -s "$HOME/changed_files" ]]; then
echo "CHANGED_FILES=$HOME/changed_files" > "$GITHUB_ENV"
fi
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -27,7 +27,7 @@ jobs:
timeout-minutes: 10
steps:
# This checks out the base branch because of pull_request_target
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: base
sparse-checkout: ci
@ -42,7 +42,7 @@ jobs:
echo "Skipping the rest..."
fi
rm -rf base
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
if: env.mergedSha
with:
# pull_request_target checks out the base branch by default

View file

@ -41,7 +41,7 @@ jobs:
into: staging-24.05
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0

View file

@ -39,7 +39,7 @@ jobs:
into: staging
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0

View file

@ -392,14 +392,10 @@ Here is a Git history diagram showing the flow of commits between the three bran
} }%%
gitGraph
commit id:" "
branch staging-next
branch staging
commit id:" "
branch staging-next
checkout master
checkout staging
checkout master
commit id:" "
checkout staging-next
merge master id:"automatic"
checkout staging
merge staging-next id:"automatic "
@ -659,9 +655,11 @@ This goes hand in hand with [Writing good commit messages](#writing-good-commit-
For the code quality assessment, you cannot do anything yourself as only the committer can do this and they already have your code to look at.
In order to minimise the need for back and forth though, do take a look over your code changes yourself and try to put yourself into the shoes of someone who didn't just write that code.
Would you immediately know what the code does by glancing at it?
Would you immediately know what the code does or why it is needed by glancing at it?
If not, reviewers will notice this and will ask you to clarify the code by refactoring it and/or adding a few explanations in code comments.
Doing this preemptively can save you and the committer a lot of time.
To better convey the "story" of your change, consider dividing your change into multiple atomic commits.
There is a balance to strike however: over-fragmentation causes friction.
The code artefacts are the hardest for committers to assess because PRs touch all sorts of components: applications, libraries, NixOS modules, editor plugins and many many other things.
Any individual committer can only really assess components that they themselves know how to use however and yet they must still be convinced somehow.
@ -689,7 +687,9 @@ Ask them nicely whether they still intend to review your PR and find yourself an
### How can I get a committer to look at my PR?
- Simply wait. Reviewers frequently browse open PRs and may happen to run across yours and take a look.
- Improve skimmability: use a simple descriptive PR title (details go in commit titles) outlining _what_ is done and _why_.
- Improve discoverability: apply all relevant labels, tick all relevant PR body checkboxes.
- Wait. Reviewers frequently browse open PRs and may happen to run across yours and take a look.
- Get non-committers to review/approve. Many committers filter open PRs for low-hanging fruit that are already been reviewed.
- [@-mention](https://github.blog/news-insights/mention-somebody-they-re-notified/) someone and ask them nicely
- Post in one of the channels made for this purpose if there has been no activity for at least one week
@ -710,7 +710,7 @@ Don't worry about it.
If there is a build failure however and it happened due to a package related to your change, you need to investigate it of course.
If ofBorg reveals the build to be broken on some platform and you don't have access to that platform, you should set your package's `meta.broken` accordingly.
When in any doubt, please simply ask via a comment in your PR or through one of the help channels.
When in any doubt, please ask via a comment in your PR or through one of the help channels.
## I received a review on my PR, how do I get it over the finish line?
@ -730,6 +730,13 @@ There may be constraints you had to work with which they're not aware of or qual
There are some further pitfalls and realities which this section intends to make you aware of.
### Aim to reduce cycles
Please be prepared for it to take a while before the reviewer gets back to you after you respond.
This is simply the reality of community projects at the scale of Nixpkgs.
As such, make sure to respond to _all_ feedback, either by applying suggested changes or argue in favor of something else or no change.
It wastes everyone time waiting for a couple of days just for the reviewer to remind you to address something they asked for.
### A reviewer requested a bunch of insubstantial changes on my PR
The people involved in Nixpkgs care about code quality because, once in Nixpkgs, it needs to be maintained for many years to come.
@ -742,11 +749,11 @@ It is convention to mark review comments that are not critical to the PR as nitp
As the PR author, you should still take a look at these as they will often reveal best practices and unwritten rules that usually have good reasons behind them and you may want to incorporate them into your modus operandi.
Please keep in mind that reviewers almost always mean well here.
Their intent is not to denounce your code, they simply want your code to be as good as it can be.
Their intent is not to denounce your code, they want your code to be as good as it can be.
Through their experience, they may also take notice of a seemingly insignificant issues that have caused significant burden before.
Sometimes however, they can also get a bit carried away and become too perfectionistic.
If you feel some of the requests are unreasonable or merely a matter of personal preference, try to nicely remind the reviewers that you may not intend this code to be 100% perfect or that you have different taste in some regards and press them on whether they think that these requests are *critical* to the PR's success.
If you feel some of the requests are unreasonable, out of scope, or merely a matter of personal preference, try to nicely remind the reviewers that you may not intend this code to be 100% perfect or that you have different taste in some regards and press them on whether they think that these requests are *critical* to the PR's success.
While we do have a set of [official standards for the Nix community](https://github.com/NixOS/rfcs/), we don't have standards for everything and there are often multiple valid ways to achieve the same goal.
Unless there are standards forbidding the patterns used in your code or there are serious technical, maintainability or readability issues with your code, you can insist to keep the code the way you made it and disregard the requests.
@ -768,9 +775,11 @@ If someone left an approving review on your PR and didn't merge a few days later
Please see it as your responsibility to actively remind reviewers of your open PRs.
The easiest way to do so is to simply cause them a Github notification.
Github notifies people involved in the PR when you add a comment to your PR, push your PR or re-request their review.
The easiest way to do so is to cause them a Github notification.
Github notifies people involved in the PR whenever you add a comment to your PR, push your PR or re-request their review.
Doing any of that will get you people's attention again.
Everyone deserves proper attention, and yes that includes you!
However please be mindful that committers can sadly not always give everyone the attention they deserve.
It may very well be the case that you have to do this every time you need the committer to follow up upon your PR.
Again, this is a community project so please be mindful of people's circumstances here; be nice when requesting reviews again.

View file

@ -11,13 +11,13 @@
# - There is no need for user/team listed here to have write access.
# - No reviews will be requested for PRs that target the wrong base branch.
#
# Processing of this file is implemented in workflows/codeowners.yml
# Processing of this file is implemented in workflows/codeowners-v2.yml
# CI
/.github/workflows @NixOS/Security @Mic92 @zowoq
/.github/workflows/check-nix-format.yml @infinisil
/.github/workflows/nixpkgs-vet.yml @infinisil @philiptaron
/.github/workflows/codeowners.yml @infinisil
/.github/workflows/codeowners-v2.yml @infinisil
/ci/OWNERS @infinisil
/ci @infinisil @philiptaron @NixOS/Security
@ -173,10 +173,17 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobeza
/pkgs/development/r-modules @jbedo
# Rust
/pkgs/development/compilers/rust @Mic92 @zowoq @winterqt @figsoda
/pkgs/development/compilers/rust @alyssais @Mic92 @zowoq @winterqt @figsoda
/pkgs/build-support/rust @zowoq @winterqt @figsoda
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
# Tcl
/pkgs/development/interpreters/tcl @fgaz
/pkgs/development/libraries/tk @fgaz
/pkgs/top-level/tcl-packages.nix @fgaz
/pkgs/development/tcl-modules @fgaz
/doc/languages-frameworks/tcl.section.md @fgaz
# C compilers
/pkgs/development/compilers/gcc
/pkgs/development/compilers/llvm @alyssais @RossComputerGuy @NixOS/llvm
@ -332,7 +339,9 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/pkgs/by-name/ne/nemo-* @mkg20001
# nim
/pkgs/development/compilers/nim @ehmry
/doc/languages-frameworks/nim.section.md @ehmry
/pkgs/build-support/build-nim-package.nix @ehmry
/pkgs/top-level/nim-overrides.nix @ehmry
# terraform providers
/pkgs/applications/networking/cluster/terraform-providers @zowoq

View file

@ -125,6 +125,8 @@ A set of functions that build a predefined set of minimal Linux distributions im
* `debian10x86_64`
* `debian11i386`
* `debian11x86_64`
* `debian12i386`
* `debian12x86_64`
### Attributes {#vm-tools-diskImageFuns-attributes}

View file

@ -732,7 +732,7 @@ writeShellApplication {
## `symlinkJoin` {#trivial-builder-symlinkJoin}
This can be used to put many derivations into the same directory structure. It works by creating a new derivation and adding symlinks to each of the paths listed. It expects two arguments, `name`, and `paths`. `name` is the name used in the Nix store path for the created derivation. `paths` is a list of paths that will be symlinked. These paths can be to Nix store derivations or any other subdirectory contained within.
This can be used to put many derivations into the same directory structure. It works by creating a new derivation and adding symlinks to each of the paths listed. It expects two arguments, `name`, and `paths`. `name` (or alternatively `pname` and `version`) is the name used in the Nix store path for the created derivation. `paths` is a list of paths that will be symlinked. These paths can be to Nix store derivations or any other subdirectory contained within.
Here is an example:
```nix
# adds symlinks of hello and stack to current build and prints "links added"

View file

@ -74,7 +74,7 @@
}
{
name = "customisation";
description = "Functions to customise (derivation-related) functions, derivatons, or attribute sets";
description = "Functions to customise (derivation-related) functions, derivations, or attribute sets";
}
{
name = "meta";

View file

@ -93,6 +93,7 @@ ruby.section.md
rust.section.md
scheme.section.md
swift.section.md
tcl.section.md
texlive.section.md
titanium.section.md
vim.section.md

View file

@ -428,7 +428,26 @@ NOTE: It is highly recommended to use a pinned version of pnpm (i.e. `pnpm_8` or
In case you are patching `package.json` or `pnpm-lock.yaml`, make sure to pass `finalAttrs.patches` to the function as well (i.e. `inherit (finalAttrs) patches`.
`pnpm.configHook` supports adding additional `pnpm install` flags via `pnpmInstallFlags` which can be set to a Nix string array.
`pnpm.configHook` supports adding additional `pnpm install` flags via `pnpmInstallFlags` which can be set to a Nix string array:
```nix
{
pnpm,
}:
stdenv.mkDerivation (finalAttrs: {
pname = "foo";
version = "0-unstable-1980-01-01";
src = ...;
pnpmInstallFlags = [ "--shamefully-hoist" ];
pnpmDeps = pnpm.fetchDeps {
inherit (finalAttrs) pnpmInstallFlags;
};
})
```
#### Dealing with `sourceRoot` {#javascript-pnpm-sourceRoot}
@ -459,16 +478,16 @@ Assuming the following directory structure, we can define `sourceRoot` and `pnpm
#### PNPM Workspaces {#javascript-pnpm-workspaces}
If you need to use a PNPM workspace for your project, then set `pnpmWorkspace = "<workspace project name>"` in your `pnpm.fetchDeps` call,
which will make PNPM only install dependencies for that workspace package.
If you need to use a PNPM workspace for your project, then set `pnpmWorkspaces = [ "<workspace project name 1>" "<workspace project name 2>" ]`, etc, in your `pnpm.fetchDeps` call,
which will make PNPM only install dependencies for those workspace packages.
For example:
```nix
...
pnpmWorkspace = "@astrojs/language-server";
pnpmWorkspaces = [ "@astrojs/language-server" ];
pnpmDeps = pnpm.fetchDeps {
inherit (finalAttrs) pnpmWorkspace;
inherit (finalAttrs) pnpmWorkspaces;
...
}
```
@ -476,7 +495,7 @@ pnpmDeps = pnpm.fetchDeps {
The above would make `pnpm.fetchDeps` call only install dependencies for the `@astrojs/language-server` workspace package.
Note that you do not need to set `sourceRoot` to make this work.
Usually in such cases, you'd want to use `pnpm --filter=$pnpmWorkspace build` to build your project, as `npmHooks.npmBuildHook` probably won't work. A `buildPhase` based on the following example will probably fit most workspace projects:
Usually in such cases, you'd want to use `pnpm --filter=<pnpm workspace name> build` to build your project, as `npmHooks.npmBuildHook` probably won't work. A `buildPhase` based on the following example will probably fit most workspace projects:
```nix
buildPhase = ''

View file

@ -0,0 +1,54 @@
# Tcl {#sec-language-tcl}
## User guide {#sec-language-tcl-user-guide}
Tcl interpreters are available under the `tcl` and `tcl-X_Y` attributes, where `X_Y` is the Tcl version.
Tcl libraries are available in the `tclPackages` attribute set.
They are only guaranteed to work with the default Tcl version, but will probably also work with others thanks to the [stubs mechanism](https://wiki.tcl-lang.org/page/Stubs).
## Packaging guide {#sec-language-tcl-packaging}
Tcl packages are typically built with `tclPackages.mkTclDerivation`.
Tcl dependencies go in `buildInputs`/`nativeBuildInputs`/... like other packages.
For more complex package definitions, such as packages with mixed languages, use `tcl.tclPackageHook`.
Where possible, make sure to enable stubs for maximum compatibility, usually with the `--enable-stubs` configure flag.
Here is a simple package example to be called with `tclPackages.callPackage`.
```
{ lib, fetchzip, mkTclDerivation, openssl }:
mkTclDerivation rec {
pname = "tcltls";
version = "1.7.22";
src = fetchzip {
url = "https://core.tcl-lang.org/tcltls/uv/tcltls-${version}.tar.gz";
hash = "sha256-TOouWcQc3MNyJtaAGUGbaQoaCWVe6g3BPERct/V65vk=";
};
buildInputs = [ openssl ];
configureFlags = [
"--with-ssl-dir=${openssl.dev}"
"--enable-stubs"
];
meta = {
homepage = "https://core.tcl-lang.org/tcltls/index";
description = "OpenSSL / RSA-bsafe Tcl extension";
maintainers = [ lib.maintainers.agbrooks ];
license = lib.licenses.tcltk;
platforms = lib.platforms.unix;
};
}
```
All Tcl libraries are declared in `pkgs/top-level/tcl-packages.nix` and are defined in `pkgs/development/tcl-modules/`.
If possible, prefer the by-name hierarchy in `pkgs/development/tcl-modules/by-name/`.
Its use is documented in `pkgs/development/tcl-modules/by-name/README.md`.
All Tcl applications reside elsewhere.
In case a package is used as both a library and an application (for example `expect`), it should be defined in `tcl-packages.nix`, with an alias elsewhere.

View file

@ -942,6 +942,11 @@ lib.mapAttrs mkLicense ({
url = "https://license.coscl.org.cn/MulanPSL2";
};
naist-2003 = {
spdxId = "NAIST-2003";
fullName = "Nara Institute of Science and Technology License (2003)";
};
nasa13 = {
spdxId = "NASA-1.3";
fullName = "NASA Open Source Agreement 1.3";

View file

@ -414,6 +414,12 @@
githubId = 1174810;
name = "Nikolay Amiantov";
};
abcsds = {
email = "abcsds@gmail.com";
github = "abcsds";
githubId = 2694381;
name = "Alberto Barradas";
};
abdiramen = {
email = "abdirahman.osmanthus@gmail.com";
github = "Abdiramen";
@ -731,6 +737,12 @@
githubId = 79667753;
keys = [ { fingerprint = "B0D7 2955 235F 6AB5 ACFA 1619 8C7F F5BB 1ADE F191"; } ];
};
ailsa-sun = {
name = "Ailsa Sun";
email = "jjshenw@gmail.com";
github = "ailsa-sun";
githubId = 135079815;
};
aimpizza = {
email = "rickomo.us@gmail.com";
name = "Rick Omonsky";
@ -743,6 +755,11 @@
githubId = 37664775;
name = "Yuto Oguchi";
};
airrnot = {
name = "airRnot";
github = "airRnot1106";
githubId = 62370527;
};
airwoodix = {
email = "airwoodix@posteo.me";
github = "airwoodix";
@ -961,6 +978,12 @@
githubId = 173595;
name = "Caleb Maclennan";
};
alex = {
email = "alexander.cinnamon927@passmail.net";
github = "alexanderjkslfj";
githubId = 117545308;
name = "Alex";
};
ALEX11BR = {
email = "alexioanpopa11@gmail.com";
github = "ALEX11BR";
@ -1873,6 +1896,12 @@
githubId = 10285250;
name = "Artur E. Ruuge";
};
arunoruto = {
email = "mirza.arnaut45@gmail.com";
github = "arunoruto";
githubId = 21687187;
name = "Mirza Arnaut";
};
asbachb = {
email = "asbachb-nixpkgs-5c2a@impl.it";
matrix = "@asbachb:matrix.org";
@ -2480,6 +2509,7 @@
};
bbenno = {
email = "nix@bbenno.com";
matrix = "@bbenno:matrix.org";
github = "bbenno";
githubId = 32938211;
name = "Benno Bielmeier";
@ -2527,6 +2557,12 @@
githubId = 34620799;
name = "Jacob Bachmann";
};
bcyran = {
email = "bazyli@cyran.dev";
github = "bcyran";
githubId = 8322846;
name = "Bazyli Cyran";
};
bdd = {
email = "bdd@mindcast.org";
github = "bdd";
@ -5870,7 +5906,10 @@
github = "jollheef";
githubId = 1749762;
name = "Mikhail Klementev";
keys = [ { fingerprint = "5DD7 C6F6 0630 F08E DAE7 4711 1525 585D 1B43 C62A"; } ];
keys = [
{ fingerprint = "5AC8 C9A1 68C7 9451 1A91 2295 C990 5BA7 2B5E 02BB"; }
{ fingerprint = "5DD7 C6F6 0630 F08E DAE7 4711 1525 585D 1B43 C62A"; }
];
};
dunxen = {
email = "git@dunxen.dev";
@ -6264,6 +6303,12 @@
github = "elliottslaughter";
githubId = 3129;
};
ElliottSullingeFarrall = {
name = "Elliott Sullinge-Farrall";
email = "elliott.chalford@gmail.com";
github = "ElliottSullingeFarrall";
githubId = 108588212;
};
elliottvillars = {
email = "elliottvillars@gmail.com";
github = "elliottvillars";
@ -7166,6 +7211,11 @@
githubId = 183879;
name = "Florian Klink";
};
florensie = {
github = "florensie";
githubId = 13403842;
name = "Florens Pauwels";
};
florentc = {
github = "florentc";
githubId = 1149048;
@ -7408,7 +7458,7 @@
matrix = "@frontear:matrix.org";
github = "Frontear";
githubId = 31909298;
keys = [ { fingerprint = "C170 11B7 C0AA BB3F 7415 022C BCB5 CEFD E222 82F5"; } ];
keys = [ { fingerprint = "6A25 DEBE 41DB 0C15 3AB5 BB34 5290 E18B 8705 1A83"; } ];
};
frontsideair = {
email = "photonia@gmail.com";
@ -8625,6 +8675,12 @@
githubId = 6430643;
name = "Henry Till";
};
hensoko = {
email = "hensoko@pub.solar";
github = "hensoko";
githubId = 13552930;
name = "hensoko";
};
heph2 = {
email = "srht@mrkeebs.eu";
github = "heph2";
@ -9431,6 +9487,13 @@
github = "istoph";
githubId = 114227790;
};
itepastra = {
name = "Noa Aarts";
github = "itepastra";
githubId = 27058689;
email = "itepastra@gmail.com";
keys = [ { fingerprint = "E681 4CAF 06AE B076 D55D 3E32 A16C DCBF 1472 541F"; } ];
};
ius = {
email = "j.de.gram@gmail.com";
name = "Joerie de Gram";
@ -9493,6 +9556,12 @@
githubId = 1318743;
name = "Ivar";
};
iv-nn = {
name = "iv-nn";
github = "iv-nn";
githubId = 49885246;
keys = [ { fingerprint = "6358 EF87 86E0 EF2F 1628 103F BAB5 F165 1C71 C9C3"; } ];
};
ivyfanchiang = {
email = "dev@ivyfanchiang.ca";
github = "hexadecimalDinosaur";
@ -9726,6 +9795,12 @@
githubId = 3874017;
name = "Jappie Klooster";
};
jappie3 = {
name = "Jappie3";
matrix = "@jappie:jappie.dev";
github = "Jappie3";
githubId = 42720120;
};
jaredmontoya = {
name = "Jared Montoya";
github = "jaredmontoya";
@ -10481,6 +10556,13 @@
githubId = 168684553;
name = "João Marques";
};
joinemm = {
email = "joonas@rautiola.co";
github = "joinemm";
githubId = 26210439;
name = "Joonas Rautiola";
keys = [ { fingerprint = "87EC DD30 6614 E510 5299 F0D4 090E B48A 4669 AA54"; } ];
};
jojosch = {
name = "Johannes Schleifenbaum";
email = "johannes@js-webcoding.de";
@ -12370,6 +12452,13 @@
githubId = 169170;
name = "Mathias Schreck";
};
loc = {
matrix = "@loc:locrealloc.de";
github = "LoCrealloc";
githubId = 64095253;
name = "LoC";
keys = [ { fingerprint = "DCCE F73B 209A 6024 CAE7 F926 5563 EB4A 8634 4F15"; } ];
};
locallycompact = {
email = "dan.firth@homotopic.tech";
github = "locallycompact";
@ -14484,7 +14573,7 @@
};
moraxyc = {
name = "Moraxyc Xu";
email = "nix@qaq.li";
email = "i@qaq.li";
github = "Moraxyc";
githubId = 69713071;
};
@ -14796,6 +14885,11 @@
githubId = 96225281;
name = "Mustafa Çalışkan";
};
musjj = {
name = "musjj";
github = "musjj";
githubId = 72612857;
};
mvisonneau = {
name = "Maxime VISONNEAU";
email = "maxime@visonneau.fr";
@ -16341,6 +16435,11 @@
githubId = 33182938;
name = "Pankaj";
};
PapayaJackal = {
github = "PapayaJackal";
githubId = 145766029;
name = "PapayaJackal";
};
paperdigits = {
email = "mica@silentumbrella.com";
github = "paperdigits";
@ -17996,6 +18095,12 @@
name = "Roland Conybeare";
keys = [ { fingerprint = "bw5Cr/4ul1C2UvxopphbZbFI1i5PCSnOmPID7mJ/Ogo"; } ];
};
rc-zb = {
name = "Xiao Haifan";
email = "rc-zb@outlook.com";
github = "rc-zb";
githubId = 161540043;
};
rdnetto = {
email = "rdnetto@gmail.com";
github = "rdnetto";
@ -18857,6 +18962,12 @@
githubId = 10908495;
name = "Ran Xiao";
};
ryan4yin = {
email = "xiaoyin_c@qq.com";
github = "ryan4yin";
githubId = 22363274;
name = "Ryan Yin";
};
ryanartecona = {
email = "ryanartecona@gmail.com";
github = "ryanartecona";
@ -19146,6 +19257,12 @@
githubId = 695473;
name = "Sascha Grunert";
};
satoqz = {
email = "mail@satoqz.net";
github = "satoqz";
githubId = 40795431;
name = "satoqz";
};
saturn745 = {
email = "git-commits.rk7uq@aleeas.com";
github = "saturn745";
@ -19604,10 +19721,11 @@
};
shadowrz = {
email = "shadowrz+nixpkgs@disroot.org";
matrix = "@ShadowRZ:matrixim.cc";
matrix = "@shadowrz:nixos.dev";
github = "ShadowRZ";
githubId = 23130178;
name = "";
keys = [ { fingerprint = "3237 D49E 8F81 5A45 2133 64EA 4FF3 5790 F405 53A9"; } ];
};
shahrukh330 = {
email = "shahrukh330@gmail.com";
@ -19701,12 +19819,24 @@
github = "shhht";
githubId = 118352823;
};
shift = {
name = "Vincent Palmer";
email = "shift@someone.section.me";
github = "shift";
githubId = 1653;
};
shikanime = {
name = "William Phetsinorath";
email = "deva.shikanime@protonmail.com";
github = "shikanime";
githubId = 22115108;
};
shiphan = {
email = "timlin940511@gmail.com";
name = "Shiphan";
github = "Shiphan";
githubId = 140245703;
};
shiryel = {
email = "contact@shiryel.com";
name = "Shiryel";
@ -20021,6 +20151,12 @@
githubId = 49844593;
name = "skovati";
};
skyesoss = {
name = "Skye Soss";
matrix = "@skyesoss:matrix.org";
github = "Skyb0rg007";
githubId = 30806179;
};
skykanin = {
github = "skykanin";
githubId = 3789764;
@ -20428,6 +20564,12 @@
github = "srounce";
githubId = 60792;
};
Srylax = {
name = "Srylax";
email = "srylax+nixpkgs@srylax.dev";
github = "Srylax";
githubId = 71783705;
};
sshine = {
email = "simon@simonshine.dk";
github = "sshine";
@ -20501,6 +20643,12 @@
githubId = 94006354;
name = "steamwalker";
};
steeleduncan = {
email = "steeleduncan@hotmail.com";
github = "steeleduncan";
githubId = 866573;
name = "Duncan Steele";
};
steell = {
email = "steve@steellworks.com";
github = "Steell";
@ -21795,6 +21943,12 @@
githubId = 2164118;
name = "Tobias Bora";
};
tobifroe = {
email = "hi@froelich.dev";
github = "tobifroe";
githubId = 40638719;
name = "Tobias Frölich";
};
tobim = {
email = "nix@tobim.fastmail.fm";
github = "tobim";
@ -23085,6 +23239,13 @@
githubId = 1215623;
keys = [ { fingerprint = "DA03 D6C6 3F58 E796 AD26 E99B 366A 2940 479A 06FC"; } ];
};
wilhelmines = {
email = "mail@aesz.org";
matrix = "@wilhelmines:matrix.org";
name = "Ronja Schwarz";
github = "wilhelmines";
githubId = 71409721;
};
willbush = {
email = "git@willbush.dev";
matrix = "@willbush:matrix.org";
@ -23159,6 +23320,17 @@
githubId = 36118348;
keys = [ { fingerprint = "69C9 876B 5797 1B2E 11C5 7C39 80A1 F76F C9F9 54AE"; } ];
};
wizardlink = {
name = "wizardlink";
email = "contact@thewizard.link";
github = "wizardlink";
githubId = 26727907;
keys = [
{
fingerprint = "A1D3 A2B4 E14B D7C0 445B B749 A576 7B54 367C FBDF";
}
];
};
wizeman = {
email = "rcorreia@wizy.org";
github = "wizeman";
@ -23238,6 +23410,12 @@
githubId = 28888242;
name = "WORLDofPEACE";
};
WoutSwinkels = {
name = "Wout Swinkels";
email = "nixpkgs@woutswinkels.com";
github = "WoutSwinkels";
githubId = 113464111;
};
wozeparrot = {
email = "wozeparrot@gmail.com";
github = "wozeparrot";
@ -24181,12 +24359,6 @@
githubId = 71881325;
name = "Stefan Bordei";
};
zzamboni = {
email = "diego@zzamboni.org";
github = "zzamboni";
githubId = 32876;
name = "Diego Zamboni";
};
zzzsy = {
email = "me@zzzsy.top";
github = "zzzsyyy";

View file

@ -1,16 +1,16 @@
name,rockspec,ref,server,version,luaversion,maintainers
alt-getopt,,,,,,arobyn
ansicolors,,,,,,Freed-Wu
bit32,,,,5.3.0-1,5.1,lblasc
argparse,,,,,,
basexx,,,,,,
binaryheap,,,,,,vcunat
bit32,,,,5.3.0-1,5.1,lblasc
busted,,,,,,
busted-htest,,,,,,mrcjkb
cassowary,,,,,,alerque
cldr,,,,,,alerque
compat53,,,,,,vcunat
commons.nvim,,,,,,mrcjkb
compat53,,,,,,vcunat
cosmo,,,,,,
coxpcall,,,,1.17.0-1,,
cqueues,,,,,,vcunat
@ -50,6 +50,7 @@ lua-cjson,,,,,,
lua-cmsgpack,,,,,,
lua-curl,,,,,,
lua-ffi-zlib,,,,,,
lua-iconv,,,,7.0.0,,
lua-lsp,,,,,,
lua-messagepack,,,,,,
lua-protobuf,,,,,,lockejan
@ -62,6 +63,8 @@ lua-rtoml,https://raw.githubusercontent.com/lblasc/lua-rtoml/main/lua-rtoml-0.2-
lua-subprocess,https://raw.githubusercontent.com/0x0ade/lua-subprocess/master/subprocess-scm-1.rockspec,,,,5.1,scoder12
lua-term,,,,,,
lua-toml,,,,,,
lua-utils.nvim,,,,,,mrcjkb
lua-yajl,,,,,,pstn
lua-zlib,,,,,,koral
lua_cliargs,,,,,,
luabitop,https://raw.githubusercontent.com/teto/luabitop/master/luabitop-1.0.2-3.rockspec,,,,,
@ -97,12 +100,9 @@ luaunbound,,,,,,
luaunit,,,,,,lockejan
luautf8,,,,,,pstn
luazip,,,,,,
lua-utils.nvim,,,,,,mrcjkb
lua-yajl,,,,,,pstn
lua-iconv,,,,7.0.0,,
lush.nvim,,,https://luarocks.org/dev,,,teto
luuid,,,,20120509-2,,
luv,,,,1.48.0-2,,
lush.nvim,,,https://luarocks.org/dev,,,teto
lyaml,,,,,,lblasc
lz.n,,,,,,mrcjkb
lze,,,,,,birdee
@ -112,8 +112,8 @@ markdown,,,,,,
mediator_lua,,,,,,
middleclass,,,,,,
mimetypes,,,,,,
mpack,,,,,,
moonscript,https://raw.githubusercontent.com/leafo/moonscript/master/moonscript-dev-1.rockspec,,,,,arobyn
mpack,,,,,,
neorg,,,,,,GaetanLepage
neotest,,,,,,mrcjkb
nlua,,,,,,teto
@ -126,10 +126,10 @@ plenary.nvim,https://raw.githubusercontent.com/nvim-lua/plenary.nvim/master/plen
psl,,,,0.3,,
rapidjson,,,,,,
rest.nvim,,,,,5.1,teto
rocks.nvim,,,,,,mrcjkb
rocks-git.nvim,,,,,,mrcjkb
rocks-config.nvim,,,,,,mrcjkb
rocks-dev.nvim,,,,,,mrcjkb
rocks-git.nvim,,,,,,mrcjkb
rocks.nvim,,,,,,mrcjkb
rtp.nvim,,,,,,mrcjkb
rustaceanvim,,,,,,mrcjkb
say,,,,,,
@ -139,10 +139,10 @@ std._debug,,,,,,
std.normalize,,,,,,
stdlib,,,,41.2.2,,vyp
teal-language-server,,,http://luarocks.org/dev,,,
telescope.nvim,,,,,5.1,
telescope-manix,,,,,,
telescope.nvim,,,,,5.1,
tiktoken_core,,,,,,natsukium
tl,,,,,,mephistophiles
tl,,,,0.15.3-1,,mephistophiles
toml-edit,,,,,5.1,mrcjkb
tree-sitter-norg,,,,,5.1,mrcjkb
vstruct,,,,,,

1 name rockspec ref server version luaversion maintainers
2 alt-getopt arobyn
3 ansicolors Freed-Wu
bit32 5.3.0-1 5.1 lblasc
4 argparse
5 basexx
6 binaryheap vcunat
7 bit32 5.3.0-1 5.1 lblasc
8 busted
9 busted-htest mrcjkb
10 cassowary alerque
11 cldr alerque
compat53 vcunat
12 commons.nvim mrcjkb
13 compat53 vcunat
14 cosmo
15 coxpcall 1.17.0-1
16 cqueues vcunat
50 lua-cmsgpack
51 lua-curl
52 lua-ffi-zlib
53 lua-iconv 7.0.0
54 lua-lsp
55 lua-messagepack
56 lua-protobuf lockejan
63 lua-subprocess https://raw.githubusercontent.com/0x0ade/lua-subprocess/master/subprocess-scm-1.rockspec 5.1 scoder12
64 lua-term
65 lua-toml
66 lua-utils.nvim mrcjkb
67 lua-yajl pstn
68 lua-zlib koral
69 lua_cliargs
70 luabitop https://raw.githubusercontent.com/teto/luabitop/master/luabitop-1.0.2-3.rockspec
100 luaunit lockejan
101 luautf8 pstn
102 luazip
103 lua-utils.nvim lush.nvim https://luarocks.org/dev mrcjkb teto
lua-yajl pstn
lua-iconv 7.0.0
104 luuid 20120509-2
105 luv 1.48.0-2
lush.nvim https://luarocks.org/dev teto
106 lyaml lblasc
107 lz.n mrcjkb
108 lze birdee
112 mediator_lua
113 middleclass
114 mimetypes
mpack
115 moonscript https://raw.githubusercontent.com/leafo/moonscript/master/moonscript-dev-1.rockspec arobyn
116 mpack
117 neorg GaetanLepage
118 neotest mrcjkb
119 nlua teto
126 psl 0.3
127 rapidjson
128 rest.nvim 5.1 teto
rocks.nvim mrcjkb
rocks-git.nvim mrcjkb
129 rocks-config.nvim mrcjkb
130 rocks-dev.nvim mrcjkb
131 rocks-git.nvim mrcjkb
132 rocks.nvim mrcjkb
133 rtp.nvim mrcjkb
134 rustaceanvim mrcjkb
135 say
139 std.normalize
140 stdlib 41.2.2 vyp
141 teal-language-server http://luarocks.org/dev
telescope.nvim 5.1
142 telescope-manix
143 telescope.nvim 5.1
144 tiktoken_core natsukium
145 tl 0.15.3-1 mephistophiles
146 toml-edit 5.1 mrcjkb
147 tree-sitter-norg 5.1 mrcjkb
148 vstruct

View file

@ -81,11 +81,13 @@ newPkgs() {
# could eat too much memory for a standard 4GiB machine.
local -a list
for i in 1 2; do
local l="$($MKTEMP)"
local l
l="$($MKTEMP)"
list[$i]="$l"
toRemove+=("$l")
local expr="$($MKTEMP)"
local expr
expr="$($MKTEMP)"
toRemove+=("$expr")
nixexpr "${!i}" > "$expr"

View file

@ -90,16 +90,30 @@ Hello, world!
Most pre-built executables will not work on NixOS. There are two notable
exceptions: flatpaks and AppImages. For flatpaks see the [dedicated
section](#module-services-flatpak). AppImages will not run "as-is" on NixOS.
First you need to install `appimage-run`: add to `/etc/nixos/configuration.nix`
section](#module-services-flatpak). AppImages can run "as-is" on NixOS.
First you need to enable AppImage support: add to `/etc/nixos/configuration.nix`
```nix
{
environment.systemPackages = [ pkgs.appimage-run ];
programs.appimage.enable = true;
programs.appimage.binfmt = true;
}
```
Then instead of running the AppImage "as-is", run `appimage-run foo.appimage`.
Then you can run the AppImage "as-is" or with `appimage-run foo.appimage`.
If there are shared libraries missing add them with
```nix
{
programs.appimage.package = pkgs.appimage-run.override {
extraPkgs = pkgs: [
# missing libraries here, e.g.: `pkgs.libepoxy`
];
}
}
```
To make other pre-built executables work on NixOS, you need to package them
with Nix and special helpers like `autoPatchelfHook` or `buildFHSEnv`. See

View file

@ -71,20 +71,20 @@ nix-build -A nixosTests.hostname
### Testing outside the NixOS project {#sec-call-nixos-test-outside-nixos}
Outside the `nixpkgs` repository, you can instantiate the test by first importing the NixOS library,
Outside the `nixpkgs` repository, you can use the `runNixOSTest` function from
`pkgs.testers`:
```nix
let nixos-lib = import (nixpkgs + "/nixos/lib") { };
let pkgs = import <nixpkgs> {};
in
nixos-lib.runTest {
pkgs.testers.runNixOSTest {
imports = [ ./test.nix ];
hostPkgs = pkgs; # the Nixpkgs package set used outside the VMs
defaults.services.foo.package = mypkg;
}
```
`runTest` returns a derivation that runs the test.
`runNixOSTest` returns a derivation that runs the test.
## Configuring the nodes {#sec-nixos-test-nodes}

View file

@ -48,7 +48,7 @@
- The Rust rewrite of the `switch-to-configuration` program is now used for system activation by default.
If you experience any issues, please report them.
The original Perl script can still be used for now by setting `system.switch.enableNg` to `false`.
The original Perl script is deprecated and is planned for removal in the 25.05 release. It will remain accessible until then by setting `system.switch.enableNg` to `false`.
- Support for mounting filesystems from block devices protected with [dm-verity](https://docs.kernel.org/admin-guide/device-mapper/verity.html)
was added through the `boot.initrd.systemd.dmVerity` option.
@ -104,6 +104,8 @@
- [Flood](https://flood.js.org/), a beautiful WebUI for various torrent clients. Available as [services.flood](options.html#opt-services.flood).
- [Niri](https://github.com/YaLTeR/niri), a scrollable-tiling Wayland compositor. Available as [programs.niri](options.html#opt-programs.niri.enable).
- [Firefly-iii Data Importer](https://github.com/firefly-iii/data-importer), a data importer for Firefly-III. Available as [services.firefly-iii-data-importer](options.html#opt-services.firefly-iii-data-importer)
- [QGroundControl], a ground station support and configuration manager for the PX4 and APM Flight Stacks. Available as [programs.qgroundcontrol](options.html#opt-programs.qgroundcontrol.enable).
@ -188,7 +190,9 @@
- [Fedimint](https://github.com/fedimint/fedimint), a module based system for building federated applications (Federated E-Cash Mint). Available as [services.fedimintd](#opt-services.fedimintd).
- [Zapret](https://github.com/bol-van/zapret), a DPI bypass tool. Available as [services.zapret](option.html#opt-services.zapret).
- [Zapret](https://github.com/bol-van/zapret), a DPI bypass tool. Available as [services.zapret](options.html#opt-services.zapret).
- [tiny-dfr](https://github.com/WhatAmISupposedToPutHere/tiny-dfr), a dynamic function row daemon for the Touch Bar found on some Apple laptops. Available as [hardware.apple.touchBar.enable](options.html#opt-hardware.apple.touchBar.enable).
## Backward Incompatibilities {#sec-release-24.11-incompatibilities}
@ -308,6 +312,14 @@
- The method to safely handle secrets in the `networking.wireless` module has been changed to benefit from a [new feature](https://w1.fi/cgit/hostap/commit/?id=e680a51e94a33591f61edb210926bcb71217a21a) of wpa_supplicant.
The syntax to refer to secrets has changed slightly and the option `networking.wireless.environmentFile` has been replaced by `networking.wireless.secretsFile`; see the description of the latter for how to upgrade.
- NetBox was updated to `>= 4.1.0`.
Have a look at the breaking changes
of the [4.0 release](https://github.com/netbox-community/netbox/releases/tag/v4.0.0)
and the [4.1 release](https://github.com/netbox-community/netbox/releases/tag/v4.1.0),
make the required changes to your database, if needed,
then upgrade by setting `services.netbox.package = pkgs.netbox_4_1;`
in your configuration.
- `services.cgit` now runs as the cgit user by default instead of root.
This change requires granting access to the repositories to this user or
setting the appropriate one through `services.cgit.some-instance.user`.
@ -372,6 +384,8 @@
`nodePackages.coc-eslint` and `vimPlugins.coc-eslint` packages offer comparable
features for `eslint`, which replaced `tslint`.
- Tcl packages have been moved into the `tclPackages` scope.
- `teleport` has been upgraded from major version 15 to major version 16.
Refer to upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/)
and [release notes for v16](https://goteleport.com/docs/changelog/#1600-061324).
@ -708,6 +722,9 @@
- Mikutter was removed because the package was broken and had no maintainers.
- The new option `services.getty.autologinOnce` was added to limit the automatic login to once per boot and on the first tty only.
When using full disk encryption, this option allows to unlock the system without retyping the passphrase while keeping the other ttys protected.
- Gollum was upgraded to major version 6. Read their [migration notes](https://github.com/gollum/gollum/wiki/6.0-Release-Notes).
- The hooks `yarnConfigHook` and `yarnBuildHook` were added. These should replace `yarn2nix.mkYarnPackage` and other `yarn2nix` related tools. The motivation to get rid of `yarn2nix` tools is the fact that they are too complex and hard to maintain, and they rely upon too much Nix evaluation which is problematic if import-from-derivation is not allowed (see more details at [#296856](https://github.com/NixOS/nixpkgs/issues/296856). The transition from `mkYarnPackage` to `yarn{Config,Build}Hook` is tracked at [#324246](https://github.com/NixOS/nixpkgs/issues/324246).
@ -731,6 +748,8 @@
- The arguments from [](#opt-services.postgresql.initdbArgs) now get shell-escaped.
- Mattermost has been updated from 9.5 to 9.11 ESR. See the [changelog](https://docs.mattermost.com/about/mattermost-v9-changelog.html#release-v9-11-extended-support-release) for more details.
- `cargo-tauri.hook` was introduced to help users build [Tauri](https://tauri.app/) projects. It is meant to be used alongside
`rustPlatform.buildRustPackage` and Node hooks such as `npmConfigHook`, `pnpm.configHook`, and the new `yarnConfig`
@ -759,6 +778,8 @@
- `services.localtimed.enable = true` will now set `time.timeZone = null`.
This is to avoid silently shadowing a user's explicitly defined timezone without recognition on the user's part.
- `qgis` and `qgis-ltr` are now built without `grass` by default. `grass` support can be enabled with `qgis.override { withGrass = true; }`.
## Detailed migration information {#sec-release-24.11-migration}
### `sound` options removal {#sec-release-24.11-migration-sound}

View file

@ -13,7 +13,7 @@ let
types
;
inherit (hostPkgs) hostPlatform;
inherit (hostPkgs.stdenv) hostPlatform;
guestSystem =
if hostPlatform.isLinux

View file

@ -1,24 +1,47 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkOption optionalString types versionAtLeast;
inherit (lib)
mkOption
optionalString
types
versionAtLeast
;
inherit (lib.options) literalExpression;
cfg = config.amazonImage;
amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios";
in {
imports = [ ../../../modules/virtualisation/amazon-image.nix ];
in
{
imports = [
../../../modules/virtualisation/amazon-image.nix
../../../modules/virtualisation/disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"amazonImage"
"sizeMB"
];
to = [
"virtualisation"
"diskSize"
];
})
];
# Amazon recommends setting this to the highest possible value for a good EBS
# experience, which prior to 4.15 was 255.
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
config.boot.kernelParams =
let timeout =
if versionAtLeast config.boot.kernelPackages.kernel.version "4.15"
then "4294967295"
else "255";
in [ "nvme_core.io_timeout=${timeout}" ];
let
timeout =
if versionAtLeast config.boot.kernelPackages.kernel.version "4.15" then "4294967295" else "255";
in
[ "nvme_core.io_timeout=${timeout}" ];
options.amazonImage = {
name = mkOption {
@ -34,30 +57,32 @@ in {
}
]
'';
default = [];
default = [ ];
description = ''
This option lists files to be copied to fixed locations in the
generated image. Glob patterns work.
'';
};
sizeMB = mkOption {
type = with types; either (enum [ "auto" ]) int;
default = 3072;
example = 8192;
description = "The size in MB of the image";
};
format = mkOption {
type = types.enum [ "raw" "qcow2" "vpc" ];
type = types.enum [
"raw"
"qcow2"
"vpc"
];
default = "vpc";
description = "The image format to output";
};
};
config.system.build.amazonImage = let
configFile = pkgs.writeText "configuration.nix"
''
# Use a priority just below mkOptionDefault (1500) instead of lib.mkDefault
# to avoid breaking existing configs using that.
config.virtualisation.diskSize = lib.mkOverride 1490 (3 * 1024);
config.virtualisation.diskSizeAutoSupported = !config.ec2.zfs.enable;
config.system.build.amazonImage =
let
configFile = pkgs.writeText "configuration.nix" ''
{ modulesPath, ... }: {
imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ];
${optionalString config.ec2.efi ''
@ -70,91 +95,102 @@ in {
}
'';
zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
inherit lib config configFile pkgs;
inherit (cfg) contents format name;
zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
inherit
lib
config
configFile
pkgs
;
inherit (cfg) contents format name;
includeChannel = true;
includeChannel = true;
bootSize = 1000; # 1G is the minimum EBS volume
bootSize = 1000; # 1G is the minimum EBS volume
rootSize = cfg.sizeMB;
rootPoolProperties = {
ashift = 12;
autoexpand = "on";
rootSize = config.virtualisation.diskSize;
rootPoolProperties = {
ashift = 12;
autoexpand = "on";
};
datasets = config.ec2.zfs.datasets;
postVM = ''
extension=''${rootDiskImage##*.}
friendlyName=$out/${cfg.name}
rootDisk="$friendlyName.root.$extension"
bootDisk="$friendlyName.boot.$extension"
mv "$rootDiskImage" "$rootDisk"
mv "$bootDiskImage" "$bootDisk"
mkdir -p $out/nix-support
echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
${pkgs.jq}/bin/jq -n \
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg root "$rootDisk" \
--arg boot "$bootDisk" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .disks.boot.logical_bytes = $boot_logical_bytes
| .disks.boot.file = $boot
| .disks.root.logical_bytes = $root_logical_bytes
| .disks.root.file = $root
' > $out/nix-support/image-info.json
'';
};
datasets = config.ec2.zfs.datasets;
extBuilder = import ../../../lib/make-disk-image.nix {
inherit
lib
config
configFile
pkgs
;
postVM = ''
extension=''${rootDiskImage##*.}
friendlyName=$out/${cfg.name}
rootDisk="$friendlyName.root.$extension"
bootDisk="$friendlyName.boot.$extension"
mv "$rootDiskImage" "$rootDisk"
mv "$bootDiskImage" "$bootDisk"
inherit (cfg) contents format name;
mkdir -p $out/nix-support
echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
fsType = "ext4";
partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
${pkgs.jq}/bin/jq -n \
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg root "$rootDisk" \
--arg boot "$bootDisk" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .disks.boot.logical_bytes = $boot_logical_bytes
| .disks.boot.file = $boot
| .disks.root.logical_bytes = $root_logical_bytes
| .disks.root.file = $root
' > $out/nix-support/image-info.json
'';
};
inherit (config.virtualisation) diskSize;
extBuilder = import ../../../lib/make-disk-image.nix {
inherit lib config configFile pkgs;
postVM = ''
extension=''${diskImage##*.}
friendlyName=$out/${cfg.name}.$extension
mv "$diskImage" "$friendlyName"
diskImage=$friendlyName
inherit (cfg) contents format name;
mkdir -p $out/nix-support
echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
fsType = "ext4";
partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
diskSize = cfg.sizeMB;
postVM = ''
extension=''${diskImage##*.}
friendlyName=$out/${cfg.name}.$extension
mv "$diskImage" "$friendlyName"
diskImage=$friendlyName
mkdir -p $out/nix-support
echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
${pkgs.jq}/bin/jq -n \
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg file "$diskImage" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .logical_bytes = $logical_bytes
| .file = $file
| .disks.root.logical_bytes = $logical_bytes
| .disks.root.file = $file
' > $out/nix-support/image-info.json
'';
};
in if config.ec2.zfs.enable then zfsBuilder else extBuilder;
${pkgs.jq}/bin/jq -n \
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg file "$diskImage" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .logical_bytes = $logical_bytes
| .file = $file
| .disks.root.logical_bytes = $logical_bytes
| .disks.root.file = $file
' > $out/nix-support/image-info.json
'';
};
in
if config.ec2.zfs.enable then zfsBuilder else extBuilder;
meta.maintainers = with lib.maintainers; [ arianvp ];
}

View file

@ -1,6 +1,11 @@
# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkOption types;
copyChannel = true;
@ -10,9 +15,20 @@ in
{
imports = [
../../../modules/virtualisation/openstack-config.nix
../../../modules/virtualisation/disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"openstackImage"
"sizeMB"
];
to = [
"virtualisation"
"diskSize"
];
})
] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
options.openstackImage = {
name = mkOption {
type = types.str;
@ -22,18 +38,15 @@ in
ramMB = mkOption {
type = types.int;
default = 1024;
default = (3 * 1024);
description = "RAM allocation for build VM";
};
sizeMB = mkOption {
type = types.int;
default = 8192;
description = "The size in MB of the image";
};
format = mkOption {
type = types.enum [ "raw" "qcow2" ];
type = types.enum [
"raw"
"qcow2"
];
default = "qcow2";
description = "The image format to output";
};
@ -54,24 +67,28 @@ in
};
};
# Use a priority just below mkOptionDefault (1500) instead of lib.mkDefault
# to avoid breaking existing configs using that.
virtualisation.diskSize = lib.mkOverride 1490 (8 * 1024);
virtualisation.diskSizeAutoSupported = false;
system.build.openstackImage = import ../../../lib/make-single-disk-zfs-image.nix {
inherit lib config;
inherit (cfg) contents format name;
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
configFile = pkgs.writeText "configuration.nix"
''
{ modulesPath, ... }: {
imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
openstack.zfs.enable = true;
}
'';
configFile = pkgs.writeText "configuration.nix" ''
{ modulesPath, ... }: {
imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
openstack.zfs.enable = true;
}
'';
includeChannel = copyChannel;
bootSize = 1000;
memSize = cfg.ramMB;
rootSize = cfg.sizeMB;
rootSize = config.virtualisation.diskSize;
rootPoolProperties = {
ashift = 12;
autoexpand = "on";

View file

@ -161,9 +161,12 @@ in
script = ''
${lib.getExe cfg.package} -u
files=(/run/resolvconf ${lib.escapeShellArgs cfg.subscriberFiles})
chgrp -R resolvconf "''${files[@]}"
chmod -R g=u "''${files[@]}"
chgrp resolvconf ${lib.escapeShellArgs cfg.subscriberFiles}
chmod g=u ${lib.escapeShellArgs cfg.subscriberFiles}
${lib.getExe' pkgs.acl "setfacl"} -R \
-m group:resolvconf:rwx \
-m default:group:resolvconf:rwx \
/run/resolvconf
'';
};

View file

@ -0,0 +1,43 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.hardware.apple.touchBar;
format = pkgs.formats.toml { };
cfgFile = format.generate "config.toml" cfg.settings;
in
{
options.hardware.apple.touchBar = {
enable = lib.mkEnableOption "support for the Touch Bar on some Apple laptops using tiny-dfr";
package = lib.mkPackageOption pkgs "tiny-dfr" { };
settings = lib.mkOption {
type = format.type;
default = { };
description = ''
Configuration for tiny-dfr. See [example configuration][1] for available options.
[1]: https://github.com/WhatAmISupposedToPutHere/tiny-dfr/blob/master/share/tiny-dfr/config.toml
'';
example = lib.literalExpression ''
{
MediaLayerDefault = true;
ShowButtonOutlines = false;
EnablePixelShift = true;
}
'';
};
};
config = lib.mkIf cfg.enable {
systemd.packages = [ cfg.package ];
services.udev.packages = [ cfg.package ];
environment.etc."tiny-dfr/config.toml".source = cfgFile;
systemd.services.tiny-dfr.restartTriggers = [ cfgFile ];
};
}

View file

@ -48,6 +48,7 @@
./config/zram.nix
./hardware/acpilight.nix
./hardware/all-firmware.nix
./hardware/apple-touchbar.nix
./hardware/bladeRF.nix
./hardware/brillo.nix
./hardware/ckb-next.nix
@ -171,6 +172,7 @@
./programs/cpu-energy-meter.nix
./programs/command-not-found/command-not-found.nix
./programs/coolercontrol.nix
./programs/corefreq.nix
./programs/criu.nix
./programs/darling.nix
./programs/dconf.nix
@ -311,6 +313,7 @@
./programs/wayland/hyprland.nix
./programs/wayland/labwc.nix
./programs/wayland/miracle-wm.nix
./programs/wayland/niri.nix
./programs/wayland/river.nix
./programs/wayland/sway.nix
./programs/wayland/uwsm.nix
@ -1547,6 +1550,7 @@
./services/web-servers/phpfpm/default.nix
./services/web-servers/pomerium.nix
./services/web-servers/rustus.nix
./services/web-servers/send.nix
./services/web-servers/stargazer.nix
./services/web-servers/static-web-server.nix
./services/web-servers/tomcat.nix

View file

@ -17,7 +17,7 @@ in
enable = lib.mkEnableOption "the 1Password CLI tool";
package = lib.mkPackageOption pkgs "1Password CLI" {
default = [ "_1password" ];
default = [ "_1password-cli" ];
};
};
};

View file

@ -0,0 +1,42 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.corefreq;
kernelPackages = config.boot.kernelPackages;
in
{
options = {
programs.corefreq = {
enable = lib.mkEnableOption "Whether to enable the corefreq daemon and kernel module";
package = lib.mkOption {
type = lib.types.package;
default = kernelPackages.corefreq;
defaultText = lib.literalExpression "config.boot.kernelPackages.corefreq";
description = ''
The corefreq package to use.
'';
};
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
boot.extraModulePackages = [ cfg.package ];
boot.kernelModules = [ "corefreqk" ];
# Create a systemd service for the corefreq daemon
systemd.services.corefreq = {
description = "CoreFreq daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = lib.getExe' cfg.package "corefreqd";
};
};
};
}

View file

@ -0,0 +1,55 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.niri;
in
{
options.programs.niri = {
enable = lib.mkEnableOption "Niri, a scrollable-tiling Wayland compositor";
package = lib.mkPackageOption pkgs "niri" { };
};
config = lib.mkIf cfg.enable (
lib.mkMerge [
{
environment.systemPackages = [ cfg.package ];
services = {
displayManager.sessionPackages = [ cfg.package ];
# Recommended by upstream
# https://github.com/YaLTeR/niri/wiki/Important-Software#portals
gnome.gnome-keyring.enable = lib.mkDefault true;
};
systemd.packages = [ cfg.package ];
xdg.portal = {
enable = lib.mkDefault true;
configPackages = [ cfg.package ];
# Recommended by upstream, required for screencast support
# https://github.com/YaLTeR/niri/wiki/Important-Software#portals
extraPortals = [ pkgs.xdg-desktop-portal-gnome ];
};
}
(import ./wayland-session.nix {
inherit lib pkgs;
enableWlrPortal = false;
enableXWayland = false;
})
]
);
meta.maintainers = with lib.maintainers; [
getchoo
sodiboo
];
}

View file

@ -144,7 +144,19 @@ in
services.displayManager.sessionPackages = lib.optional (cfg.package != null) cfg.package;
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050913
xdg.portal.config.sway.default = lib.mkDefault [ "wlr" "gtk" ];
# https://github.com/emersion/xdg-desktop-portal-wlr/blob/master/contrib/wlroots-portals.conf
# https://github.com/emersion/xdg-desktop-portal-wlr/pull/315
xdg.portal.config.sway = {
# Use xdg-desktop-portal-gtk for every portal interface...
default = "gtk";
# ... except for the ScreenCast, Screenshot and Secret
"org.freedesktop.impl.portal.ScreenCast" = "wlr";
"org.freedesktop.impl.portal.Screenshot" = "wlr";
# ignore inhibit bc gtk portal always returns as success,
# despite sway/the wlr portal not having an implementation,
# stopping firefox from using wayland idle-inhibit
"org.freedesktop.impl.portal.Inhibit" = "none";
};
}
(import ./wayland-session.nix {

View file

@ -108,18 +108,19 @@ in
systemd.packages = [ cfg.package ];
environment.pathsToLink = [ "/share/uwsm" ];
services.graphical-desktop.enable = true;
# UWSM recommends dbus broker for better compatibility
services.dbus.implementation = "broker";
services.displayManager.sessionPackages = lib.mapAttrsToList (
name: value:
mk_uwsm_desktop_entry {
inherit name;
inherit (value) prettyName comment binPath;
}
) cfg.waylandCompositors;
services.displayManager = {
enable = true;
sessionPackages = lib.mapAttrsToList (
name: value:
mk_uwsm_desktop_entry {
inherit name;
inherit (value) prettyName comment binPath;
}
) cfg.waylandCompositors;
};
};
meta.maintainers = with lib.maintainers; [

View file

@ -87,6 +87,15 @@ in
Without this option it would default to the read-only nix store.
'';
};
preLoaded = lib.mkOption {
type = lib.types.lines;
default = "";
description = ''
Shell commands executed before the `oh-my-zsh` is loaded.
For example, to disable async git prompt write `zstyle ':omz:alpha:lib:git' async-prompt force` (more information https://github.com/ohmyzsh/ohmyzsh?tab=readme-ov-file#async-git-prompt)
'';
};
};
};
@ -120,6 +129,7 @@ in
ZSH_CACHE_DIR=${cfg.cacheDir}
''}
${cfg.preLoaded}
source $ZSH/oh-my-zsh.sh
'';

View file

@ -24,7 +24,8 @@ in
internal = true;
};
security.pki.useCompatibleBundle = mkEnableOption ''usage of a compatibility bundle.
security.pki.useCompatibleBundle = mkEnableOption ''
usage of a compatibility bundle.
Such a bundle consists exclusively of `BEGIN CERTIFICATE` and no `BEGIN TRUSTED CERTIFICATE`,
which is an OpenSSL specific PEM format.

View file

@ -165,6 +165,10 @@ in
###### interface
options = {
security.enableWrappers = lib.mkEnableOption "SUID/SGID wrappers" // {
default = true;
};
security.wrappers = lib.mkOption {
type = lib.types.attrsOf wrapperType;
default = {};
@ -227,7 +231,7 @@ in
};
###### implementation
config = {
config = lib.mkIf config.security.enableWrappers {
assertions = lib.mapAttrsToList
(name: opts:

View file

@ -260,7 +260,7 @@ in {
systemd.services.jack-session = {
description = "JACK session";
script = ''
jack_wait -w
${pkgs.jack-example-tools}/bin/jack_wait -w
${cfg.jackd.session}
${lib.optionalString cfg.loopback.enable cfg.loopback.session}
'';

View file

@ -4,7 +4,7 @@ let
cfg = config.services.buildbot-master;
opt = options.services.buildbot-master;
package = pkgs.python3.pkgs.toPythonModule cfg.package;
package = cfg.package.python.pkgs.toPythonModule cfg.package;
python = package.pythonModule;
escapeStr = lib.escape [ "'" ];

View file

@ -36,7 +36,9 @@ let
} // cfg.extraSettings;
serverSettingsString = builtins.toJSON (lib.filterAttrsRecursive (n: v: v != null) serverSettings);
serverSettingsFile = pkgs.writeText "server-settings.json" serverSettingsString;
serverAdminsFile = pkgs.writeText "server-adminlist.json" (builtins.toJSON cfg.admins);
playerListOption = name: list:
lib.optionalString (list != [])
"--${name}=${pkgs.writeText "${name}.json" (builtins.toJSON list)}";
modDir = pkgs.factorio-utils.mkModDirDrv cfg.mods cfg.mods-dat;
in
{
@ -59,6 +61,30 @@ in
'';
};
allowedPlayers = lib.mkOption {
# I would personally prefer for `allowedPlayers = []` to mean "no-one
# can connect" but Factorio seems to ignore empty whitelists (even with
# --use-server-whitelist) so we can't implement that behaviour, so we
# might as well match theirs.
type = lib.types.listOf lib.types.str;
default = [];
example = [ "Rseding91" "Oxyd" ];
description = ''
If non-empty, only these player names are allowed to connect. The game
will not be able to save any changes made in-game with the /whitelist
console command, though they will still take effect until the server
is restarted.
If empty, the whitelist defaults to open, but can be managed with the
in-game /whitelist console command (see: /help whitelist), which will
cause changes to be saved to the game's state directory (see also:
`stateDirName`).
'';
};
# Opting not to include the banlist in addition the the whitelist because:
# - banlists are not as often known in advance,
# - losing banlist changes on restart seems much more of a headache.
admins = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [];
@ -177,7 +203,7 @@ in
extraSettings = lib.mkOption {
type = lib.types.attrs;
default = {};
example = { admins = [ "username" ];};
example = { max_players = 64; };
description = ''
Extra game configuration that will go into server-settings.json
'';
@ -298,7 +324,9 @@ in
}"
(lib.optionalString cfg.loadLatestSave "--start-server-load-latest")
(lib.optionalString (cfg.mods != []) "--mod-directory=${modDir}")
(lib.optionalString (cfg.admins != []) "--server-adminlist=${serverAdminsFile}")
(playerListOption "server-adminlist" cfg.admins)
(playerListOption "server-whitelist" cfg.allowedPlayers)
(lib.optionalString (cfg.allowedPlayers != []) "--use-server-whitelist")
];
# Sandboxing

View file

@ -42,6 +42,15 @@ Here, `passwordFile` is the path to a file containing just the password in
plaintext. Make sure to set permissions to make this file unreadable to any
user besides root.
By default, synced data are stored in */var/lib/anki-sync-server/*ankiuser**.
You can change the directory by using `services.anki-sync-server.baseDirectory`
```nix
{
services.anki-sync-server.baseDirectory = "/home/anki/data";
}
```
By default, the server listen address {option}`services.anki-sync-server.host`
is set to localhost, listening on port
{option}`services.anki-sync-server.port`, and does not open the firewall. This

View file

@ -59,6 +59,13 @@ in {
description = "Port number anki-sync-server listens to.";
};
baseDirectory = mkOption {
type = types.str;
default = "%S/%N";
description = "Base directory where user(s) synchronized data will be stored.";
};
openFirewall = mkOption {
default = false;
type = types.bool;
@ -114,7 +121,7 @@ in {
wantedBy = ["multi-user.target"];
path = [cfg.package];
environment = {
SYNC_BASE = "%S/%N";
SYNC_BASE = cfg.baseDirectory;
SYNC_HOST = specEscape cfg.address;
SYNC_PORT = toString cfg.port;
};

View file

@ -37,7 +37,7 @@ in
config = lib.mkIf cfg.enable {
systemd.services.bazarr = {
description = "bazarr";
description = "Bazarr";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
@ -54,6 +54,8 @@ in
--no-update True
'';
Restart = "on-failure";
KillSignal = "SIGINT";
SuccessExitStatus = "0 156";
};
};

View file

@ -12,7 +12,7 @@ let
"--port" = cfg.port;
"--auth-mode" = cfg.auth.mode;
"--userdb" = cfg.auth.userDb;
}) ++ [(lib.optionalString (cfg.auth.enable == true) "--enable-auth")])
}) ++ [ (lib.optionalString (cfg.auth.enable == true) "--enable-auth") ] ++ cfg.extraFlags)
);
in
@ -42,6 +42,15 @@ in
'';
};
extraFlags = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
description = ''
Extra flags to pass to the calibre-server command.
See the [calibre-server documentation](${generatedDocumentationLink}) for details.
'';
};
user = lib.mkOption {
type = lib.types.str;
default = "calibre-server";
@ -73,6 +82,13 @@ in
'';
};
openFirewall = lib.mkOption {
type = lib.types.bool;
default = false;
description =
"Open ports in the firewall for the Calibre Server web interface.";
};
auth = {
enable = lib.mkOption {
type = lib.types.bool;
@ -137,6 +153,9 @@ in
};
};
networking.firewall =
lib.mkIf cfg.openFirewall { allowedTCPPorts = [ cfg.port ]; };
};
meta.maintainers = with lib.maintainers; [ gaelreyrol ];

View file

@ -40,7 +40,7 @@ in
###### implementation
config = lib.mkIf cfg.enable {
dysnomia.enable = true;
services.dysnomia.enable = true;
environment.systemPackages = [ pkgs.disnix ] ++ lib.optional cfg.useWebServiceInterface pkgs.DisnixWebService;
environment.variables.PATH = lib.optionals cfg.enableProfilePath (map (profileName: "/nix/var/nix/profiles/disnix/${profileName}/bin" ) cfg.profiles);
@ -74,7 +74,7 @@ in
restartIfChanged = false;
path = [ config.nix.package cfg.package config.dysnomia.package "/run/current-system/sw" ];
path = [ config.nix.package cfg.package config.services.dysnomia.package "/run/current-system/sw" ];
environment = {
HOME = "/root";

View file

@ -1,6 +1,6 @@
{pkgs, lib, config, ...}:
let
cfg = config.dysnomia;
cfg = config.services.dysnomia;
printProperties = properties:
lib.concatMapStrings (propertyName:
@ -79,7 +79,7 @@ let
in
{
options = {
dysnomia = {
services.dysnomia = {
enable = lib.mkOption {
type = lib.types.bool;
@ -142,6 +142,10 @@ in
};
};
imports = [
(lib.mkRenamedOptionModule ["dysnomia"] ["services" "dysnomia"])
];
config = lib.mkIf cfg.enable {
environment.etc = {
@ -164,7 +168,7 @@ in
environment.systemPackages = [ cfg.package ];
dysnomia.package = pkgs.dysnomia.override (origArgs: dysnomiaFlags // lib.optionalAttrs (cfg.enableLegacyModules) {
services.dysnomia.package = pkgs.dysnomia.override (origArgs: dysnomiaFlags // lib.optionalAttrs (cfg.enableLegacyModules) {
enableLegacy = builtins.trace ''
WARNING: Dysnomia has been configured to use the legacy 'process' and 'wrapper'
modules for compatibility reasons! If you rely on these modules, consider
@ -181,7 +185,7 @@ in
'' true;
});
dysnomia.properties = {
services.dysnomia.properties = {
hostname = config.networking.hostName;
inherit (pkgs.stdenv.hostPlatform) system;
@ -208,7 +212,7 @@ in
++ lib.optional (dysnomiaFlags.enableSubversionRepository) "subversion-repository";
};
dysnomia.containers = lib.recursiveUpdate ({
services.dysnomia.containers = lib.recursiveUpdate ({
process = {};
wrapper = {};
}

View file

@ -113,6 +113,7 @@ in
'';
serial = lib.mkOption {
type = lib.types.nullOr path;
default = null;
description = "Path to serial port this printer is connected to. Leave `null` to derive it from `service.klipper.settings`.";
};
configFile = lib.mkOption {

View file

@ -26,7 +26,7 @@ in
How often or when garbage collection is performed. For most desktop and server systems
a sufficient garbage collection is once a week.
The format is described in
This value must be a calendar event in the format specified by
{manpage}`systemd.time(7)`.
'';
};

View file

@ -290,11 +290,12 @@ in
''
+ optionalString (cfg.passwordFile != null) ''
export PAPERLESS_ADMIN_USER="''${PAPERLESS_ADMIN_USER:-admin}"
export PAPERLESS_ADMIN_PASSWORD=$(cat $CREDENTIALS_DIRECTORY/PAPERLESS_ADMIN_PASSWORD)
PAPERLESS_ADMIN_PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/PAPERLESS_ADMIN_PASSWORD")
export PAPERLESS_ADMIN_PASSWORD
superuserState="$PAPERLESS_ADMIN_USER:$PAPERLESS_ADMIN_PASSWORD"
superuserStateFile="${cfg.dataDir}/superuser-state"
if [[ $(cat "$superuserStateFile" 2>/dev/null) != $superuserState ]]; then
if [[ $(cat "$superuserStateFile" 2>/dev/null) != "$superuserState" ]]; then
${cfg.package}/bin/paperless-ngx manage_superuser
echo "$superuserState" > "$superuserStateFile"
fi
@ -353,7 +354,8 @@ in
tr -dc A-Za-z0-9 < /dev/urandom | head -c64 | ${pkgs.moreutils}/bin/sponge '${secretKeyFile}'
)
fi
export PAPERLESS_SECRET_KEY=$(cat '${secretKeyFile}')
PAPERLESS_SECRET_KEY="$(cat '${secretKeyFile}')"
export PAPERLESS_SECRET_KEY
if [[ ! $PAPERLESS_SECRET_KEY ]]; then
echo "PAPERLESS_SECRET_KEY is empty, refusing to start."
exit 1

View file

@ -31,6 +31,20 @@ in
Enable GPU monitoring.
'';
};
temperature = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Enable temperature monitoring.
'';
};
useIPv6CountryCode = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Use ipv6 countrycode to report location.
'';
};
disableCommandExecute = lib.mkOption {
type = lib.types.bool;
default = true;
@ -78,6 +92,14 @@ in
Address to the dashboard
'';
};
extraFlags = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [ "--gpu" ];
description = ''
Extra command-line flags passed to nezha-agent.
'';
};
};
};
@ -96,7 +118,7 @@ in
startLimitBurst = 3;
script = lib.concatStringsSep " " (
[
"${cfg.package}/bin/agent"
"${lib.getExe cfg.package}"
"--disable-auto-update"
"--disable-force-update"
"--password $(cat ${cfg.passwordFile})"
@ -109,6 +131,9 @@ in
++ lib.optional cfg.skipProcess "--skip-procs"
++ lib.optional cfg.tls "--tls"
++ lib.optional cfg.gpu "--gpu"
++ lib.optional cfg.temperature "--temperature"
++ lib.optional cfg.useIPv6CountryCode "--use-ipv6-countrycode"
++ cfg.extraFlags
);
wantedBy = [ "multi-user.target" ];
};

View file

@ -201,6 +201,26 @@ let
};
};
promTypes.sigv4 = types.submodule {
options = {
region = mkOpt types.str ''
The AWS region.
'';
access_key = mkOpt types.str ''
The Access Key ID.
'';
secret_key = mkOpt types.str ''
The Secret Access Key.
'';
profile = mkOpt types.str ''
The named AWS profile used to authenticate.
'';
role_arn = mkOpt types.str ''
The AWS role ARN.
'';
};
};
promTypes.tls_config = types.submodule {
options = {
ca_file = mkOpt types.str ''
@ -1464,6 +1484,9 @@ let
Sets the `Authorization` header on every remote write request with the bearer token
read from the configured file. It is mutually exclusive with `bearer_token`.
'';
sigv4 = mkOpt promTypes.sigv4 ''
Configures AWS Signature Version 4 settings.
'';
tls_config = mkOpt promTypes.tls_config ''
Configures the remote write request's TLS settings.
'';

View file

@ -50,6 +50,7 @@ let
"junos-czerwonk"
"kea"
"keylight"
"klipper"
"knot"
"lnd"
"mail"

View file

@ -0,0 +1,55 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.prometheus.exporters.klipper;
inherit (lib)
mkOption
mkMerge
mkIf
types
concatStringsSep
any
optionalString
;
moonraker = config.services.moonraker;
in
{
port = 9101;
extraOpts = {
package = lib.mkPackageOption pkgs "prometheus-klipper-exporter" { };
moonrakerApiKey = mkOption {
type = types.str;
default = "";
description = ''
API Key to authenticate with the Moonraker APIs.
Only needed if the host running the exporter is not a trusted client to Moonraker.
'';
};
};
serviceOpts = mkMerge (
[
{
serviceConfig = {
ExecStart = concatStringsSep " " [
"${cfg.package}/bin/prometheus-klipper-exporter"
(optionalString (cfg.moonrakerApiKey != "") "--moonraker.apikey ${cfg.moonrakerApiKey}")
"--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
"${concatStringsSep " " cfg.extraFlags}"
];
};
}
]
++ [
(mkIf config.services.moonraker.enable {
after = [ "moonraker.service" ];
requires = [ "moonraker.service" ];
})
]
);
}

View file

@ -317,6 +317,47 @@ in
Type = "dbus";
ExecStart = "${cfg.package}/sbin/avahi-daemon --syslog -f ${avahiDaemonConf}";
ConfigurationDirectory = "avahi/services";
# Hardening
CapabilityBoundingSet = [
# https://github.com/avahi/avahi/blob/v0.9-rc1/avahi-daemon/caps.c#L38
"CAP_SYS_CHROOT"
"CAP_SETUID"
"CAP_SETGID"
];
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
"@chown setgroups setresuid"
];
UMask = "0077";
};
};

View file

@ -1,41 +1,41 @@
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, utils, ... }:
let
cfg = config.services.coturn;
pidfile = "/run/turnserver/turnserver.pid";
configFile = pkgs.writeText "turnserver.conf" ''
listening-port=${toString cfg.listening-port}
tls-listening-port=${toString cfg.tls-listening-port}
alt-listening-port=${toString cfg.alt-listening-port}
alt-tls-listening-port=${toString cfg.alt-tls-listening-port}
${lib.concatStringsSep "\n" (map (x: "listening-ip=${x}") cfg.listening-ips)}
${lib.concatStringsSep "\n" (map (x: "relay-ip=${x}") cfg.relay-ips)}
min-port=${toString cfg.min-port}
max-port=${toString cfg.max-port}
${lib.optionalString cfg.lt-cred-mech "lt-cred-mech"}
${lib.optionalString cfg.no-auth "no-auth"}
${lib.optionalString cfg.use-auth-secret "use-auth-secret"}
${lib.optionalString (cfg.static-auth-secret != null) ("static-auth-secret=${cfg.static-auth-secret}")}
${lib.optionalString (cfg.static-auth-secret-file != null) ("static-auth-secret=#static-auth-secret#")}
realm=${cfg.realm}
${lib.optionalString cfg.no-udp "no-udp"}
${lib.optionalString cfg.no-tcp "no-tcp"}
${lib.optionalString cfg.no-tls "no-tls"}
${lib.optionalString cfg.no-dtls "no-dtls"}
${lib.optionalString cfg.no-udp-relay "no-udp-relay"}
${lib.optionalString cfg.no-tcp-relay "no-tcp-relay"}
${lib.optionalString (cfg.cert != null) "cert=${cfg.cert}"}
${lib.optionalString (cfg.pkey != null) "pkey=${cfg.pkey}"}
${lib.optionalString (cfg.dh-file != null) ("dh-file=${cfg.dh-file}")}
no-stdout-log
syslog
pidfile=${pidfile}
${lib.optionalString cfg.secure-stun "secure-stun"}
${lib.optionalString cfg.no-cli "no-cli"}
cli-ip=${cfg.cli-ip}
cli-port=${toString cfg.cli-port}
${lib.optionalString (cfg.cli-password != null) ("cli-password=${cfg.cli-password}")}
${cfg.extraConfig}
'';
listening-port=${toString cfg.listening-port}
tls-listening-port=${toString cfg.tls-listening-port}
alt-listening-port=${toString cfg.alt-listening-port}
alt-tls-listening-port=${toString cfg.alt-tls-listening-port}
${lib.concatStringsSep "\n" (map (x: "listening-ip=${x}") cfg.listening-ips)}
${lib.concatStringsSep "\n" (map (x: "relay-ip=${x}") cfg.relay-ips)}
min-port=${toString cfg.min-port}
max-port=${toString cfg.max-port}
${lib.optionalString cfg.lt-cred-mech "lt-cred-mech"}
${lib.optionalString cfg.no-auth "no-auth"}
${lib.optionalString cfg.use-auth-secret "use-auth-secret"}
${lib.optionalString (cfg.static-auth-secret != null) "static-auth-secret=${cfg.static-auth-secret}"}
${lib.optionalString (cfg.static-auth-secret-file != null) "static-auth-secret=#static-auth-secret#"}
realm=${cfg.realm}
${lib.optionalString cfg.no-udp "no-udp"}
${lib.optionalString cfg.no-tcp "no-tcp"}
${lib.optionalString cfg.no-tls "no-tls"}
${lib.optionalString cfg.no-dtls "no-dtls"}
${lib.optionalString cfg.no-udp-relay "no-udp-relay"}
${lib.optionalString cfg.no-tcp-relay "no-tcp-relay"}
${lib.optionalString (cfg.cert != null) "cert=${cfg.cert}"}
${lib.optionalString (cfg.pkey != null) "pkey=${cfg.pkey}"}
${lib.optionalString (cfg.dh-file != null) "dh-file=${cfg.dh-file}"}
no-stdout-log
syslog
pidfile=${pidfile}
${lib.optionalString cfg.secure-stun "secure-stun"}
${lib.optionalString cfg.no-cli "no-cli"}
cli-ip=${cfg.cli-ip}
cli-port=${toString cfg.cli-port}
${lib.optionalString (cfg.cli-password != null) "cli-password=${cfg.cli-password}"}
${cfg.extraConfig}
'';
in {
options = {
services.coturn = {
@ -301,7 +301,7 @@ in {
};
};
config = lib.mkIf cfg.enable (lib.mkMerge ([
config = lib.mkIf cfg.enable (lib.mkMerge [
{ assertions = [
{ assertion = cfg.static-auth-secret != null -> cfg.static-auth-secret-file == null ;
message = "static-auth-secret and static-auth-secret-file cannot be set at the same time";
@ -341,25 +341,66 @@ in {
'' }
chmod 640 ${runConfig}
'';
serviceConfig = {
serviceConfig = rec {
Type = "simple";
ExecStart = "${pkgs.coturn}/bin/turnserver -c ${runConfig}";
RuntimeDirectory = "turnserver";
ExecStart = utils.escapeSystemdExecArgs [
(lib.getExe' pkgs.coturn "turnserver")
"-c"
runConfig
];
User = "turnserver";
Group = "turnserver";
AmbientCapabilities =
lib.mkIf (
cfg.listening-port < 1024 ||
cfg.alt-listening-port < 1024 ||
cfg.tls-listening-port < 1024 ||
cfg.alt-tls-listening-port < 1024 ||
cfg.min-port < 1024
) "cap_net_bind_service";
RuntimeDirectory = [
"coturn"
"turnserver"
];
RuntimeDirectoryMode = "0700";
Restart = "on-abort";
# Hardening
AmbientCapabilities = if
cfg.listening-port < 1024 ||
cfg.alt-listening-port < 1024 ||
cfg.tls-listening-port < 1024 ||
cfg.alt-tls-listening-port < 1024 ||
cfg.min-port < 1024
then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
CapabilityBoundingSet = AmbientCapabilities;
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
] ++ lib.optionals (cfg.listening-ips == [ ]) [
# only used for interface discovery when no listening ips are configured
"AF_NETLINK"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged @resources"
];
UMask = "0077";
};
};
systemd.tmpfiles.rules = [
"d /run/coturn 0700 turnserver turnserver - -"
];
}]));
}]);
}

View file

@ -249,7 +249,7 @@ in
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always";
AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_RAW" "CAP_NET_BIND_SERVICE" ];
ReadWritePaths = [ "/proc/sys/net/ipv6" ]
ReadWritePaths = [ "/proc/sys/net/ipv4" "/proc/sys/net/ipv6" ]
++ lib.optionals useResolvConf ([ "/run/resolvconf" ] ++ config.networking.resolvconf.subscriberFiles);
DeviceAllow = "";
LockPersonality = true;

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
@ -8,10 +13,8 @@ let
cfg = config.services.ntp;
stateDir = "/var/lib/ntp";
configFile = pkgs.writeText "ntp.conf" ''
driftfile ${stateDir}/ntp.drift
driftfile /var/lib/ntp/ntp.drift
restrict default ${toString cfg.restrictDefault}
restrict -6 default ${toString cfg.restrictDefault}
@ -25,7 +28,12 @@ let
${cfg.extraConfig}
'';
ntpFlags = [ "-c" "${configFile}" "-u" "ntp:ntp" ] ++ cfg.extraFlags;
ntpFlags = [
"-c"
"${configFile}"
"-u"
"ntp:ntp"
] ++ cfg.extraFlags;
in
@ -58,7 +66,14 @@ in
recommended in section 6.5.1.1.3, answer "No" of
https://support.ntp.org/Support/AccessRestrictions
'';
default = [ "limited" "kod" "nomodify" "notrap" "noquery" "nopeer" ];
default = [
"limited"
"kod"
"nomodify"
"notrap"
"noquery"
"nopeer"
];
};
restrictSource = mkOption {
@ -69,7 +84,13 @@ in
The default flags allow peers to be added by ntpd from configured
pool(s), but not by other means.
'';
default = [ "limited" "kod" "nomodify" "notrap" "noquery" ];
default = [
"limited"
"kod"
"nomodify"
"notrap"
"noquery"
];
};
servers = mkOption {
@ -96,14 +117,13 @@ in
type = types.listOf types.str;
description = "Extra flags passed to the ntpd command.";
example = literalExpression ''[ "--interface=eth0" ]'';
default = [];
default = [ ];
};
};
};
###### implementation
config = mkIf config.services.ntp.enable {
@ -113,34 +133,57 @@ in
environment.systemPackages = [ pkgs.ntp ];
services.timesyncd.enable = mkForce false;
systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
systemd.services.systemd-timedated.environment = {
SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service";
};
users.users.ntp =
{ isSystemUser = true;
group = "ntp";
description = "NTP daemon user";
home = stateDir;
};
users.groups.ntp = {};
systemd.services.ntpd =
{ description = "NTP Daemon";
wantedBy = [ "multi-user.target" ];
wants = [ "time-sync.target" ];
before = [ "time-sync.target" ];
preStart =
''
mkdir -m 0755 -p ${stateDir}
chown ntp ${stateDir}
'';
serviceConfig = {
ExecStart = "@${ntp}/bin/ntpd ntpd -g ${builtins.toString ntpFlags}";
Type = "forking";
};
users.users.ntp = {
isSystemUser = true;
group = "ntp";
description = "NTP daemon user";
home = "/var/lib/ntp";
};
users.groups.ntp = { };
systemd.services.ntpd = {
description = "NTP Daemon";
wantedBy = [ "multi-user.target" ];
wants = [ "time-sync.target" ];
before = [ "time-sync.target" ];
serviceConfig = {
ExecStart = "@${ntp}/bin/ntpd ntpd -g ${builtins.toString ntpFlags}";
Type = "forking";
StateDirectory = "ntp";
# Hardening options
PrivateDevices = true;
PrivateIPC = true;
PrivateTmp = true;
ProtectClock = false;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = true;
RestrictNamespaces = true;
RestrictRealtime = true;
LockPersonality = true;
MemoryDenyWriteExecute = true;
AmbientCapabilities = [
"CAP_SYS_TIME"
];
ProtectControlGroups = true;
ProtectProc = "invisible";
ProcSubset = "pid";
RestrictSUIDSGID = true;
};
};
};

View file

@ -101,15 +101,12 @@ with lib;
secrets="/etc/ppp-pptpd/chap-secrets"
[ -f "$secrets" ] || cat > "$secrets" << EOF
[ -f "$secrets" ] || install -m 600 -o root -g root /dev/stdin "$secrets" << EOF
# From: pptpd-1.4.0/samples/chap-secrets
# Secrets for authentication using CHAP
# client server secret IP addresses
#username pptpd password *
EOF
chown root:root "$secrets"
chmod 600 "$secrets"
'';
serviceConfig = {

View file

@ -52,7 +52,7 @@ in {
default = { };
description = ''
Configuration for Radicale. See
<https://radicale.org/3.0.html#documentation/configuration>.
<https://radicale.org/v3.html#configuration>.
This option is mutually exclusive with {option}`config`.
'';
example = literalExpression ''
@ -74,7 +74,7 @@ in {
type = format.type;
description = ''
Configuration for Radicale's rights file. See
<https://radicale.org/3.0.html#documentation/authentication-and-rights>.
<https://radicale.org/v3.html#authentication-and-rights>.
This option only works in conjunction with {option}`settings`.
Setting this will also set {option}`settings.rights.type` and
{option}`settings.rights.file` to appropriate values.

View file

@ -12,7 +12,7 @@ let
tlsCfg = optionalString (cfg.tlsCertificate != null)
"tls ${cfg.tlsCertificate} ${cfg.tlsCertificateKey}";
logCfg = optionalString cfg.enableMessageLogging
"log fs ${stateDir}/logs";
"message-store fs ${stateDir}/logs";
configFile = pkgs.writeText "soju.conf" ''
${listenCfg}

View file

@ -26,12 +26,15 @@ let
ifaceArg = concatStringsSep " -N " (map (i: "-i${i}") (splitString " " iface));
driverArg = optionalString (suppl.driver != null) "-D${suppl.driver}";
bridgeArg = optionalString (suppl.bridge!="") "-b${suppl.bridge}";
confFileArg = optionalString (suppl.configFile.path!=null) "-c${suppl.configFile.path}";
extraConfFile = pkgs.writeText "supplicant-extra-conf-${replaceStrings [" "] ["-"] iface}" ''
${optionalString suppl.userControlled.enable "ctrl_interface=DIR=${suppl.userControlled.socketDir} GROUP=${suppl.userControlled.group}"}
${optionalString suppl.configFile.writable "update_config=1"}
${suppl.extraConf}
'';
confArgs = escapeShellArgs
(if suppl.configFile.path == null
then [ "-c${extraConfFile}" ]
else [ "-c${suppl.configFile.path}" "-I${extraConfFile}" ]);
in
{ description = "Supplicant ${iface}${optionalString (iface=="WLAN"||iface=="LAN") " %I"}";
wantedBy = [ "multi-user.target" ] ++ deps;
@ -51,7 +54,7 @@ let
''}
'';
serviceConfig.ExecStart = "${pkgs.wpa_supplicant}/bin/wpa_supplicant -s ${driverArg} ${confFileArg} -I${extraConfFile} ${bridgeArg} ${suppl.extraCmdArgs} ${if (iface=="WLAN"||iface=="LAN") then "-i%I" else (if (iface=="DBUS") then "-u" else ifaceArg)}";
serviceConfig.ExecStart = "${pkgs.wpa_supplicant}/bin/wpa_supplicant -s ${driverArg} ${confArgs} ${bridgeArg} ${suppl.extraCmdArgs} ${if (iface=="WLAN"||iface=="LAN") then "-i%I" else (if (iface=="DBUS") then "-u" else ifaceArg)}";
};

View file

@ -29,6 +29,12 @@ in {
description = "Username or user ID of the user allowed to to fetch Tailscale TLS certificates for the node.";
};
disableTaildrop = mkOption {
default = false;
type = types.bool;
description = "Whether to disable the Taildrop feature for sending files between nodes.";
};
package = lib.mkPackageOption pkgs "tailscale" {};
openFirewall = mkOption {
@ -129,6 +135,8 @@ in {
''"FLAGS=--tun ${lib.escapeShellArg cfg.interfaceName} ${lib.concatStringsSep " " cfg.extraDaemonFlags}"''
] ++ (lib.optionals (cfg.permitCertUid != null) [
"TS_PERMIT_CERT_UID=${cfg.permitCertUid}"
]) ++ (lib.optionals (cfg.disableTaildrop) [
"TS_DISABLE_TAILDROP=true"
]);
# Restart tailscaled with a single `systemctl restart` at the
# end of activation, rather than a `stop` followed by a later

View file

@ -104,31 +104,18 @@ with lib;
wantedBy = [ "multi-user.target" ];
preStart = ''
mkdir -p -m 700 /etc/xl2tpd
install -m 700 -d /etc/xl2tpd/ppp
pushd /etc/xl2tpd > /dev/null
mkdir -p -m 700 ppp
[ -f ppp/chap-secrets ] || cat > ppp/chap-secrets << EOF
[ -f /etc/xl2tpd/ppp/chap-secrets ] || install -m 600 -o root -g root /dev/stdin /etc/xl2tpd/ppp/chap-secrets <<EOF
# Secrets for authentication using CHAP
# client server secret IP addresses
#username xl2tpd password *
EOF
chown root:root ppp/chap-secrets
chmod 600 ppp/chap-secrets
# The documentation says this file should be present but doesn't explain why and things work even if not there:
[ -f l2tp-secrets ] || (echo -n "* * "; ${pkgs.apg}/bin/apg -n 1 -m 32 -x 32 -a 1 -M LCN) > l2tp-secrets
chown root:root l2tp-secrets
chmod 600 l2tp-secrets
[ -f /etc/xl2tpd/l2tp-secrets ] || install -m 600 -o root -g root <(echo -n "* * "; ${pkgs.apg}/bin/apg -n 1 -m 32 -x 32 -a 1 -M LCN) /etc/xl2tpd/l2tp-secrets
popd > /dev/null
mkdir -p /run/xl2tpd
chown root:root /run/xl2tpd
chmod 700 /run/xl2tpd
install -m 701 -o root -g root -d /run/xl2tpd
'';
serviceConfig = {

View file

@ -62,7 +62,7 @@ let
} // lib.optionalAttrs (cfg.passBasicAuth) {
basic-auth-password = cfg.basicAuthPassword;
} // lib.optionalAttrs (cfg.htpasswd.file != null) {
display-htpasswd-file = cfg.htpasswd.displayForm;
display-htpasswd-form = cfg.htpasswd.displayForm;
} // lib.optionalAttrs tls.enable {
tls-cert-file = tls.certificate;
tls-key-file = tls.key;

View file

@ -7,14 +7,26 @@ let
baseArgs = [
"--login-program" "${cfg.loginProgram}"
] ++ optionals (cfg.autologinUser != null) [
] ++ optionals (cfg.autologinUser != null && !cfg.autologinOnce) [
"--autologin" cfg.autologinUser
] ++ optionals (cfg.loginOptions != null) [
"--login-options" cfg.loginOptions
] ++ cfg.extraArgs;
gettyCmd = args:
"@${pkgs.util-linux}/sbin/agetty agetty ${escapeShellArgs baseArgs} ${args}";
"${lib.getExe' pkgs.util-linux "agetty"} ${escapeShellArgs baseArgs} ${args}";
autologinScript = ''
otherArgs="--noclear --keep-baud $TTY 115200,38400,9600 $TERM";
${lib.optionalString cfg.autologinOnce ''
autologged="/run/agetty.autologged"
if test "$TTY" = tty1 && ! test -f "$autologged"; then
touch "$autologged"
exec ${gettyCmd "$otherArgs --autologin ${cfg.autologinUser}"}
fi
''}
exec ${gettyCmd "$otherArgs"}
'';
in
@ -40,6 +52,16 @@ in
'';
};
autologinOnce = mkOption {
type = types.bool;
default = false;
description = ''
If enabled the automatic login will only happen in the first tty
once per boot. This can be useful to avoid retyping the account
password on systems with full disk encrypted.
'';
};
loginProgram = mkOption {
type = types.path;
default = "${pkgs.shadow}/bin/login";
@ -106,9 +128,11 @@ in
systemd.services."getty@" =
{ serviceConfig.ExecStart = [
"" # override upstream default with an empty ExecStart
(gettyCmd "--noclear --keep-baud %I 115200,38400,9600 $TERM")
# override upstream default with an empty ExecStart
""
(pkgs.writers.writeDash "getty" autologinScript)
];
environment.TTY = "%I";
restartIfChanged = false;
};

View file

@ -523,6 +523,7 @@ in
intel-gpu-tools
];
serviceConfig = {
ExecStartPre = "-rm /var/cache/frigate/*.mp4";
ExecStart = "${cfg.package.python.interpreter} -m frigate";
Restart = "on-failure";

View file

@ -258,7 +258,7 @@ in
postgresEnv
// redisEnv
// {
HOST = cfg.host;
IMMICH_HOST = cfg.host;
IMMICH_PORT = toString cfg.port;
IMMICH_MEDIA_LOCATION = cfg.mediaLocation;
IMMICH_MACHINE_LEARNING_URL = "http://localhost:3003";
@ -282,6 +282,7 @@ in
ExecStart = lib.getExe cfg.package;
EnvironmentFile = mkIf (cfg.secretsFile != null) cfg.secretsFile;
StateDirectory = "immich";
SyslogIdentifier = "immich";
RuntimeDirectory = "immich";
User = cfg.user;
Group = cfg.group;

View file

@ -75,21 +75,17 @@ in {
package = lib.mkOption {
type = lib.types.package;
default =
if lib.versionAtLeast config.system.stateVersion "24.05"
if lib.versionAtLeast config.system.stateVersion "24.11"
then pkgs.netbox_4_1
else if lib.versionAtLeast config.system.stateVersion "24.05"
then pkgs.netbox_3_7
else if lib.versionAtLeast config.system.stateVersion "23.11"
then pkgs.netbox_3_6
else if lib.versionAtLeast config.system.stateVersion "23.05"
then pkgs.netbox_3_5
else pkgs.netbox_3_3;
else pkgs.netbox_3_6;
defaultText = lib.literalExpression ''
if lib.versionAtLeast config.system.stateVersion "24.05"
if lib.versionAtLeast config.system.stateVersion "24.11"
then pkgs.netbox_4_1
else if lib.versionAtLeast config.system.stateVersion "24.05"
then pkgs.netbox_3_7
else if lib.versionAtLeast config.system.stateVersion "23.11"
then pkgs.netbox_3_6
else if lib.versionAtLeast config.system.stateVersion "23.05"
then pkgs.netbox_3_5
else pkgs.netbox_3_3;
else pkgs.netbox_3_6;
'';
description = ''
NetBox package to use.
@ -328,6 +324,7 @@ in {
--pythonpath ${pkg}/opt/netbox/netbox
'';
PrivateTmp = true;
TimeoutStartSec = lib.mkDefault "5min";
};
};

View file

@ -35,13 +35,21 @@ in
};
dataDir = mkOption {
type = types.str;
type = types.path;
default = "/var/lib/sftpgo";
description = ''
The directory where SFTPGo stores its data files.
'';
};
extraReadWriteDirs = mkOption {
type = types.listOf types.path;
default = [];
description = ''
Extra directories where SFTPGo is allowed to write to.
'';
};
user = mkOption {
type = types.str;
default = defaultUser;
@ -63,7 +71,7 @@ in
type = with types; nullOr path;
description = ''
Path to a json file containing users and folders to load (or update) on startup.
Check the [documentation](https://github.com/drakkan/sftpgo/blob/main/docs/full-configuration.md)
Check the [documentation](https://sftpgo.github.io/latest/config-file/)
for the `--loaddata-from` command line argument for more info.
'';
};
@ -72,7 +80,7 @@ in
default = {};
description = ''
The primary sftpgo configuration. See the
[configuration reference](https://github.com/drakkan/sftpgo/blob/main/docs/full-configuration.md)
[configuration reference](https://sftpgo.github.io/latest/config-file/)
for possible values.
'';
type = with types; submodule {
@ -324,7 +332,7 @@ in
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.dataDir;
ReadWritePaths = [ cfg.dataDir ];
ReadWritePaths = [ cfg.dataDir ] ++ cfg.extraReadWriteDirs;
LimitNOFILE = 8192; # taken from upstream
KillMode = "mixed";
ExecStart = "${cfg.package}/bin/sftpgo serve ${utils.escapeSystemdExecArgs cfg.extraArgs}";

View file

@ -9,6 +9,8 @@ in
(lib.mkRenamedOptionModule [ "services" "youtrack" "port" ] [ "services" "youtrack" "environmentalParameters" "listen-port" ])
(lib.mkRemovedOptionModule [ "services" "youtrack" "maxMemory" ] "Please instead use `services.youtrack.generalParameters`.")
(lib.mkRemovedOptionModule [ "services" "youtrack" "maxMetaspaceSize" ] "Please instead use `services.youtrack.generalParameters`.")
(lib.mkRemovedOptionModule [ "services" "youtrack" "extraParams" ] "Please migrate to `services.youtrack.generalParameters`.")
(lib.mkRemovedOptionModule [ "services" "youtrack" "jvmOpts" ] "Please migrate to `services.youtrack.generalParameters`.")
];
options.services.youtrack = {
@ -22,33 +24,15 @@ in
type = lib.types.str;
};
extraParams = lib.mkOption {
default = {};
description = ''
Extra parameters to pass to youtrack.
Use to configure YouTrack 2022.x, deprecated with YouTrack 2023.x. Use `services.youtrack.generalParameters`.
https://www.jetbrains.com/help/youtrack/standalone/YouTrack-Java-Start-Parameters.html
for more information.
'';
example = lib.literalExpression ''
{
"jetbrains.youtrack.overrideRootPassword" = "tortuga";
}
'';
type = lib.types.attrsOf lib.types.str;
visible = false;
};
package = lib.mkOption {
description = ''
Package to use.
'';
type = lib.types.package;
default = null;
relatedPackages = [ "youtrack_2022_3" "youtrack" ];
default = pkgs.youtrack;
defaultText = lib.literalExpression "pkgs.youtrack";
};
statePath = lib.mkOption {
description = ''
Path were the YouTrack state is stored.
@ -67,19 +51,6 @@ in
type = lib.types.nullOr lib.types.str;
};
jvmOpts = lib.mkOption {
description = ''
Extra options to pass to the JVM.
Only has a use with YouTrack 2022.x, deprecated with YouTrack 2023.x. Use `serivces.youtrack.generalParameters`.
See https://www.jetbrains.com/help/youtrack/standalone/Configure-JVM-Options.html
for more information.
'';
type = lib.types.separatedString " ";
example = "--J-XX:MetaspaceSize=250m";
default = "";
visible = false;
};
autoUpgrade = lib.mkOption {
type = lib.types.bool;
default = true;
@ -90,7 +61,6 @@ in
type = with lib.types; listOf str;
description = ''
General configuration parameters and other JVM options.
Only has an effect for YouTrack 2023.x.
See https://www.jetbrains.com/help/youtrack/server/2023.3/youtrack-java-start-parameters.html#general-parameters
for more information.
'';
@ -121,7 +91,6 @@ in
};
description = ''
Environmental configuration parameters, set imperatively. The values doesn't get removed, when removed in Nix.
Only has an effect for YouTrack 2023.x.
See https://www.jetbrains.com/help/youtrack/server/2023.3/youtrack-java-start-parameters.html#environmental-parameters
for more information.
'';
@ -135,90 +104,47 @@ in
};
config = lib.mkIf cfg.enable {
warnings = lib.optional (lib.versions.major cfg.package.version <= "2022")
"YouTrack 2022.x is deprecated. See https://nixos.org/manual/nixos/unstable/index.html#module-services-youtrack for details on how to upgrade."
++ lib.optional (cfg.extraParams != {} && (lib.versions.major cfg.package.version >= "2023"))
"'services.youtrack.extraParams' is deprecated and has no effect on YouTrack 2023.x and newer. Please migrate to 'services.youtrack.generalParameters'"
++ lib.optional (cfg.jvmOpts != "" && (lib.versions.major cfg.package.version >= "2023"))
"'services.youtrack.jvmOpts' is deprecated and has no effect on YouTrack 2023.x and newer. Please migrate to 'services.youtrack.generalParameters'";
# XXX: Drop all version feature switches at the point when we consider YT 2022.3 as outdated.
services.youtrack.package = lib.mkDefault (
if lib.versionAtLeast config.system.stateVersion "24.11" then pkgs.youtrack
else pkgs.youtrack_2022_3
);
services.youtrack.generalParameters = lib.optional (lib.versions.major cfg.package.version >= "2023")
"-Ddisable.configuration.wizard.on.upgrade=${lib.boolToString cfg.autoUpgrade}"
++ (lib.mapAttrsToList (k: v: "-D${k}=${v}") cfg.extraParams);
services.youtrack.generalParameters = [ "-Ddisable.configuration.wizard.on.upgrade=${lib.boolToString cfg.autoUpgrade}" ];
systemd.services.youtrack = let
service_jar = let
mergeAttrList = lib.foldl' lib.mergeAttrs {};
stdParams = mergeAttrList [
(lib.optionalAttrs (cfg.environmentalParameters ? base-url && cfg.environmentalParameters.base-url != null) {
"jetbrains.youtrack.baseUrl" = cfg.environmentalParameters.base-url;
})
{
"java.aws.headless" = "true";
"jetbrains.youtrack.disableBrowser" = "true";
}
];
extraAttr = lib.concatStringsSep " " (lib.mapAttrsToList (k: v: "-D${k}=${v}") (stdParams // cfg.extraParams));
in {
environment.HOME = cfg.statePath;
environment.YOUTRACK_JVM_OPTS = "${extraAttr}";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ unixtools.hostname ];
serviceConfig = {
jvmoptions = pkgs.writeTextFile {
name = "youtrack.jvmoptions";
text = (lib.concatStringsSep "\n" cfg.generalParameters);
};
package = cfg.package.override {
statePath = cfg.statePath;
};
in {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ unixtools.hostname ];
preStart = ''
# This detects old (i.e. <= 2022.3) installations that were not migrated yet
# and migrates them to the new state directory style
if [[ -d ${cfg.statePath}/teamsysdata ]] && [[ ! -d ${cfg.statePath}/2022_3 ]]
then
mkdir -p ${cfg.statePath}/2022_3
mv ${cfg.statePath}/teamsysdata ${cfg.statePath}/2022_3
mv ${cfg.statePath}/.youtrack ${cfg.statePath}/2022_3
fi
mkdir -p ${cfg.statePath}/{backups,conf,data,logs,temp}
${pkgs.coreutils}/bin/ln -fs ${jvmoptions} ${cfg.statePath}/conf/youtrack.jvmoptions
${package}/bin/youtrack configure ${lib.concatStringsSep " " (lib.mapAttrsToList (name: value: "--${name}=${toString value}") cfg.environmentalParameters )}
'';
serviceConfig = lib.mkMerge [
{
Type = "simple";
User = "youtrack";
Group = "youtrack";
Restart = "on-failure";
ExecStart = ''${cfg.package}/bin/youtrack ${cfg.jvmOpts} ${cfg.environmentalParameters.listen-address}:${toString cfg.environmentalParameters.listen-port}'';
};
};
service_zip = let
jvmoptions = pkgs.writeTextFile {
name = "youtrack.jvmoptions";
text = (lib.concatStringsSep "\n" cfg.generalParameters);
};
package = cfg.package.override {
statePath = cfg.statePath;
};
in {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ unixtools.hostname ];
preStart = ''
# This detects old (i.e. <= 2022.3) installations that were not migrated yet
# and migrates them to the new state directory style
if [[ -d ${cfg.statePath}/teamsysdata ]] && [[ ! -d ${cfg.statePath}/2022_3 ]]
then
mkdir -p ${cfg.statePath}/2022_3
mv ${cfg.statePath}/teamsysdata ${cfg.statePath}/2022_3
mv ${cfg.statePath}/.youtrack ${cfg.statePath}/2022_3
fi
mkdir -p ${cfg.statePath}/{backups,conf,data,logs,temp}
${pkgs.coreutils}/bin/ln -fs ${jvmoptions} ${cfg.statePath}/conf/youtrack.jvmoptions
${package}/bin/youtrack configure ${lib.concatStringsSep " " (lib.mapAttrsToList (name: value: "--${name}=${toString value}") cfg.environmentalParameters )}
'';
serviceConfig = lib.mkMerge [
{
Type = "simple";
User = "youtrack";
Group = "youtrack";
Restart = "on-failure";
ExecStart = "${package}/bin/youtrack run";
}
(lib.mkIf (cfg.statePath == "/var/lib/youtrack") {
StateDirectory = "youtrack";
})
];
};
in if (lib.versions.major cfg.package.version >= "2023") then service_zip else service_jar;
ExecStart = "${package}/bin/youtrack run";
}
(lib.mkIf (cfg.statePath == "/var/lib/youtrack") {
StateDirectory = "youtrack";
})
];
};
users.users.youtrack = {
description = "Youtrack service user";

View file

@ -1218,6 +1218,7 @@ in
++ lib.optional cfg.recommendedZstdSettings pkgs.nginxModules.zstd;
services.nginx.virtualHosts.localhost = mkIf cfg.statusPage {
serverAliases = [ "127.0.0.1" ] ++ lib.optional config.networking.enableIPv6 "[::1]";
listenAddresses = lib.mkDefault ([
"0.0.0.0"
] ++ lib.optional enableIPv6 "[::]");
@ -1373,7 +1374,7 @@ in
];
services.logrotate.settings.nginx = mapAttrs (_: mkDefault) {
files = "/var/log/nginx/*.log";
files = [ "/var/log/nginx/*.log" ];
frequency = "weekly";
su = "${cfg.user} ${cfg.group}";
rotate = 26;

View file

@ -0,0 +1,228 @@
{
config,
lib,
pkgs,
...
}:
let
inherit (lib) mkOption types;
cfg = config.services.send;
in
{
options = {
services.send = {
enable = lib.mkEnableOption "Send, a file sharing web sevice for ffsend.";
package = lib.mkPackageOption pkgs "send" { };
environment = mkOption {
type =
with types;
attrsOf (
nullOr (oneOf [
bool
int
str
(listOf int)
])
);
description = ''
All the available config options and their defaults can be found here: https://github.com/timvisee/send/blob/master/server/config.js,
some descriptions can found here: https://github.com/timvisee/send/blob/master/docs/docker.md#environment-variables
Values under {option}`services.send.environment` will override the predefined values in the Send service.
- Time/duration should be in seconds
- Filesize values should be in bytes
'';
example = {
DEFAULT_DOWNLOADS = 1;
DETECT_BASE_URL = true;
EXPIRE_TIMES_SECONDS = [
300
3600
86400
604800
];
};
};
dataDir = lib.mkOption {
type = types.path;
readOnly = true;
default = "/var/lib/send";
description = ''
Directory for uploaded files.
Due to limitations in {option}`systemd.services.send.serviceConfig.DynamicUser`, this item is read only.
'';
};
baseUrl = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Base URL for the Send service.
Leave it blank to automatically detect the base url.
'';
};
host = lib.mkOption {
type = types.str;
default = "127.0.0.1";
description = "The hostname or IP address for Send to bind to.";
};
port = lib.mkOption {
type = types.port;
default = 1443;
description = "Port the Send service listens on.";
};
openFirewall = lib.mkOption {
type = types.bool;
default = false;
description = "Whether to open firewall ports for send";
};
redis = {
createLocally = lib.mkOption {
type = types.bool;
default = true;
description = "Whether to create a local redis automatically.";
};
name = lib.mkOption {
type = types.str;
default = "send";
description = ''
Name of the redis server.
Only used if {option}`services.send.redis.createLocally` is set to true.
'';
};
host = lib.mkOption {
type = types.str;
default = "localhost";
description = "Redis server address.";
};
port = lib.mkOption {
type = types.port;
default = 6379;
description = "Port of the redis server.";
};
passwordFile = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/agenix/send-redis-password";
description = ''
The path to the file containing the Redis password.
If {option}`services.send.redis.createLocally` is set to true,
the content of this file will be used as the password for the locally created Redis instance.
Leave it blank if no password is required.
'';
};
};
};
};
config = lib.mkIf cfg.enable {
services.send.environment.DETECT_BASE_URL = cfg.baseUrl == null;
assertions = [
{
assertion = cfg.redis.createLocally -> cfg.redis.host == "localhost";
message = "the redis host must be localhost if services.send.redis.createLocally is set to true";
}
];
networking.firewall.allowedTCPPorts = lib.optional cfg.openFirewall cfg.port;
services.redis = lib.optionalAttrs cfg.redis.createLocally {
servers."${cfg.redis.name}" = {
enable = true;
bind = "localhost";
port = cfg.redis.port;
};
};
systemd.services.send = {
serviceConfig = {
Type = "simple";
Restart = "always";
StateDirectory = "send";
WorkingDirectory = cfg.dataDir;
ReadWritePaths = cfg.dataDir;
LoadCredential = lib.optionalString (
cfg.redis.passwordFile != null
) "redis-password:${cfg.redis.passwordFile}";
# Hardening
RestrictAddressFamilies = [
"AF_UNIX"
"AF_INET"
"AF_INET6"
];
AmbientCapabilities = lib.optionalString (cfg.port < 1024) "cap_net_bind_service";
DynamicUser = true;
CapabilityBoundingSet = "";
NoNewPrivileges = true;
RemoveIPC = true;
PrivateTmp = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "full";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
UMask = "0077";
};
environment =
{
IP_ADDRESS = cfg.host;
PORT = toString cfg.port;
BASE_URL = if (cfg.baseUrl == null) then "http://${cfg.host}:${toString cfg.port}" else cfg.baseUrl;
FILE_DIR = cfg.dataDir + "/uploads";
REDIS_HOST = cfg.redis.host;
REDIS_PORT = toString cfg.redis.port;
}
// (lib.mapAttrs (
name: value:
if lib.isList value then
"[" + lib.concatStringsSep ", " (map (x: toString x) value) + "]"
else if lib.isBool value then
lib.boolToString value
else
toString value
) cfg.environment);
after =
[
"network.target"
]
++ lib.optionals cfg.redis.createLocally [
"redis-${cfg.redis.name}.service"
];
description = "Send web service";
wantedBy = [ "multi-user.target" ];
script = ''
${lib.optionalString (cfg.redis.passwordFile != null) ''
export REDIS_PASSWORD="$(cat $CREDENTIALS_DIRECTORY/redis-password)"
''}
${lib.getExe cfg.package}
'';
};
};
meta.maintainers = with lib.maintainers; [ moraxyc ];
}

View file

@ -1,13 +1,19 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
perlWrapped = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
perlWrapped = pkgs.perl.withPackages (
p: with p; [
ConfigIniFiles
FileSlurp
]
);
in
{
options.system.switch = {
enable = lib.mkOption {
type = lib.types.bool;
@ -36,6 +42,17 @@ in
config = lib.mkMerge [
(lib.mkIf (config.system.switch.enable && !config.system.switch.enableNg) {
warnings = [
''
The Perl implementation of switch-to-configuration will be deprecated
and removed in the 25.05 release of NixOS. Please migrate to the
newer implementation by removing `system.switch.enableNg = false`
from your configuration. If you are unable to migrate due to any
issues with the new implementation, please create an issue and tag
the maintainers of `switch-to-configuration-ng`.
''
];
system.activatableSystemBuilderCommands = ''
mkdir $out/bin
substitute ${./switch-to-configuration.pl} $out/bin/switch-to-configuration \

View file

@ -160,6 +160,7 @@ let
# Misc.
"systemd-sysctl.service"
"systemd-machine-id-commit.service"
] ++ optionals cfg.package.withTimedated [
"dbus-org.freedesktop.timedate1.service"
"systemd-timedated.service"

View file

@ -1,22 +1,33 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
cfg = config.virtualisation.azureImage;
in
{
imports = [ ./azure-common.nix ];
imports = [
./azure-common.nix
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"virtualisation"
"azureImage"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options.virtualisation.azureImage = {
diskSize = mkOption {
type = with types; either (enum [ "auto" ]) int;
default = "auto";
example = 2048;
description = ''
Size of disk image. Unit is MB.
'';
};
bootSize = mkOption {
type = types.int;
default = 256;
@ -35,7 +46,12 @@ in
};
vmGeneration = mkOption {
type = with types; enum [ "v1" "v2" ];
type =
with types;
enum [
"v1"
"v2"
];
default = "v1";
description = ''
VM Generation to use.
@ -57,7 +73,8 @@ in
bootSize = "${toString cfg.bootSize}M";
partitionTableType = if cfg.vmGeneration == "v2" then "efi" else "legacy";
inherit (cfg) diskSize contents;
inherit (cfg) contents;
inherit (config.virtualisation) diskSize;
inherit config lib pkgs;
};
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
@ -6,18 +11,24 @@ let
in
{
imports = [ ./digital-ocean-config.nix ];
imports = [
./digital-ocean-config.nix
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"virtualisation"
"digitalOceanImage"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options = {
virtualisation.digitalOceanImage.diskSize = mkOption {
type = with types; either (enum [ "auto" ]) int;
default = "auto";
example = 4096;
description = ''
Size of disk image. Unit is MB.
'';
};
virtualisation.digitalOceanImage.configFile = mkOption {
type = with types; nullOr path;
default = null;
@ -31,7 +42,10 @@ in
};
virtualisation.digitalOceanImage.compressionMethod = mkOption {
type = types.enum [ "gzip" "bzip2" ];
type = types.enum [
"gzip"
"bzip2"
];
default = "gzip";
example = "bzip2";
description = ''
@ -44,27 +58,35 @@ in
#### implementation
config = {
system.build.digitalOceanImage = import ../../lib/make-disk-image.nix {
name = "digital-ocean-image";
format = "qcow2";
postVM = let
compress = {
"gzip" = "${pkgs.gzip}/bin/gzip";
"bzip2" = "${pkgs.bzip2}/bin/bzip2";
}.${cfg.compressionMethod};
in ''
${compress} $diskImage
'';
configFile = if cfg.configFile == null
then config.virtualisation.digitalOcean.defaultConfigFile
else cfg.configFile;
inherit (cfg) diskSize;
postVM =
let
compress =
{
"gzip" = "${pkgs.gzip}/bin/gzip";
"bzip2" = "${pkgs.bzip2}/bin/bzip2";
}
.${cfg.compressionMethod};
in
''
${compress} $diskImage
'';
configFile =
if cfg.configFile == null then
config.virtualisation.digitalOcean.defaultConfigFile
else
cfg.configFile;
inherit (config.virtualisation) diskSize;
inherit config lib pkgs;
};
};
meta.maintainers = with maintainers; [ arianvp eamsden ];
meta.maintainers = with maintainers; [
arianvp
eamsden
];
}

View file

@ -0,0 +1,38 @@
{ lib, config, ... }:
let
t = lib.types;
in
{
options = {
virtualisation.diskSizeAutoSupported = lib.mkOption {
type = t.bool;
default = true;
description = ''
Whether the current image builder or vm runner supports `virtualisation.diskSize = "auto".`
'';
internal = true;
};
virtualisation.diskSize = lib.mkOption {
type = t.either (t.enum [ "auto" ]) t.ints.positive;
default = if config.virtualisation.diskSizeAutoSupported then "auto" else 1024;
defaultText = "\"auto\" if diskSizeAutoSupported, else 1024";
description = ''
The disk size in megabytes of the virtual machine.
'';
};
};
config =
let
inherit (config.virtualisation) diskSize diskSizeAutoSupported;
in
{
assertions = [
{
assertion = diskSize != "auto" || diskSizeAutoSupported;
message = "Setting virtualisation.diskSize to `auto` is not supported by the current image build or vm runner; use an explicit size.";
}
];
};
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
@ -14,18 +19,24 @@ let
in
{
imports = [ ./google-compute-config.nix ];
imports = [
./google-compute-config.nix
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"virtualisation"
"googleComputeImage"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options = {
virtualisation.googleComputeImage.diskSize = mkOption {
type = with types; either (enum [ "auto" ]) int;
default = "auto";
example = 1536;
description = ''
Size of disk image. Unit is MB.
'';
};
virtualisation.googleComputeImage.configFile = mkOption {
type = with types; nullOr str;
default = null;
@ -64,7 +75,13 @@ in
system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
name = "google-compute-image";
postVM = ''
PATH=$PATH:${with pkgs; lib.makeBinPath [ gnutar gzip ]}
PATH=$PATH:${
with pkgs;
lib.makeBinPath [
gnutar
gzip
]
}
pushd $out
mv $diskImage disk.raw
tar -Sc disk.raw | gzip -${toString cfg.compressionLevel} > \
@ -75,7 +92,7 @@ in
format = "raw";
configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
partitionTableType = if cfg.efi then "efi" else "legacy";
inherit (cfg) diskSize;
inherit (config.virtualisation) diskSize;
inherit config lib pkgs;
};

View file

@ -1,21 +1,34 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
with lib;
let
cfg = config.hyperv;
in
{
imports = [
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"hyperv"
"baseImageSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
in {
options = {
hyperv = {
baseImageSize = mkOption {
type = with types; either (enum [ "auto" ]) int;
default = "auto";
example = 2048;
description = ''
The size of the hyper-v base image in MiB.
'';
};
vmDerivationName = mkOption {
type = types.str;
default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
@ -34,6 +47,10 @@ in {
};
config = {
# Use a priority just below mkOptionDefault (1500) instead of lib.mkDefault
# to avoid breaking existing configs using that.
virtualisation.diskSize = lib.mkOverride 1490 (4 * 1024);
system.build.hypervImage = import ../../lib/make-disk-image.nix {
name = cfg.vmDerivationName;
postVM = ''
@ -41,7 +58,7 @@ in {
rm $diskImage
'';
format = "raw";
diskSize = cfg.baseImageSize;
inherit (config.virtualisation) diskSize;
partitionTableType = "efi";
inherit config lib pkgs;
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
with lib;
let
@ -12,17 +17,24 @@ let
'';
in
{
imports = [ ./linode-config.nix ];
imports = [
./linode-config.nix
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"virtualisation"
"linodeImage"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options = {
virtualisation.linodeImage.diskSize = mkOption {
type = with types; either (enum (singleton "auto")) ints.positive;
default = "auto";
example = 1536;
description = ''
Size of disk image in MB.
'';
};
virtualisation.linodeImage.configFile = mkOption {
type = with types; nullOr str;
@ -57,7 +69,7 @@ in
format = "raw";
partitionTableType = "none";
configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
inherit (cfg) diskSize;
inherit (config.virtualisation) diskSize;
inherit config lib pkgs;
};
};

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ... }:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.oci;
@ -7,9 +12,14 @@ in
imports = [ ./oci-common.nix ];
config = {
# Use a priority just below mkOptionDefault (1500) instead of lib.mkDefault
# to avoid breaking existing configs using that.
virtualisation.diskSize = lib.mkOverride 1490 (8 * 1024);
virtualisation.diskSizeAutoSupported = false;
system.build.OCIImage = import ../../lib/make-disk-image.nix {
inherit config lib pkgs;
inherit (cfg) diskSize;
inherit (config.virtualisation) diskSize;
name = "oci-image";
configFile = ./oci-config-user.nix;
format = "qcow2";
@ -25,7 +35,10 @@ in
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
path = [ pkgs.coreutils pkgs.curl ];
path = [
pkgs.coreutils
pkgs.curl
];
script = ''
mkdir -m 0700 -p /root/.ssh
if [ -f /root/.ssh/authorized_keys ]; then

View file

@ -1,5 +1,23 @@
{ config, lib, pkgs, ... }:
{
lib,
...
}:
{
imports = [
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"oci"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options = {
oci = {
efi = lib.mkOption {
@ -9,12 +27,6 @@
Whether the OCI instance is using EFI.
'';
};
diskSize = lib.mkOption {
type = lib.types.int;
default = 8192;
description = "Size of the disk image created in MB.";
example = "diskSize = 12 * 1024; # 12GiB";
};
};
};
}

View file

@ -1,8 +1,28 @@
{ config, pkgs, lib, ... }:
{
config,
pkgs,
lib,
...
}:
with lib;
{
imports = [
./disk-size-option.nix
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2411;
from = [
"proxmox"
"qemuConf"
"diskSize"
];
to = [
"virtualisation"
"diskSize"
];
})
];
options.proxmox = {
qemuConf = {
# essential configs
@ -54,7 +74,10 @@ with lib;
'';
};
bios = mkOption {
type = types.enum [ "seabios" "ovmf" ];
type = types.enum [
"seabios"
"ovmf"
];
default = "seabios";
description = ''
Select BIOS implementation (seabios = Legacy BIOS, ovmf = UEFI).
@ -87,16 +110,6 @@ with lib;
either "efi" or "hybrid".
'';
};
diskSize = mkOption {
type = types.str;
default = "auto";
example = "20480";
description = ''
The size of the disk, in megabytes.
if "auto" size is calculated based on the contents copied to it and
additionalSpace is taken into account.
'';
};
net0 = mkOption {
type = types.commas;
default = "virtio=00:00:00:00:00:00,bridge=vmbr0,firewall=1";
@ -124,8 +137,13 @@ with lib;
};
};
qemuExtraConf = mkOption {
type = with types; attrsOf (oneOf [ str int ]);
default = {};
type =
with types;
attrsOf (oneOf [
str
int
]);
default = { };
example = literalExpression ''
{
cpu = "host";
@ -137,7 +155,12 @@ with lib;
'';
};
partitionTableType = mkOption {
type = types.enum [ "efi" "hybrid" "legacy" "legacy+gpt" ];
type = types.enum [
"efi"
"hybrid"
"legacy"
"legacy+gpt"
];
description = ''
Partition table type to use. See make-disk-image.nix partitionTableType for details.
Defaults to 'legacy' for 'proxmox.qemuConf.bios="seabios"' (default), other bios values defaults to 'efi'.
@ -185,142 +208,163 @@ with lib;
};
};
config = let
cfg = config.proxmox;
cfgLine = name: value: ''
${name}: ${builtins.toString value}
'';
virtio0Storage = builtins.head (builtins.split ":" cfg.qemuConf.virtio0);
cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
# generated by NixOS
${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
#qmdump#map:virtio0:drive-virtio0:${virtio0Storage}:raw:
'';
inherit (cfg) partitionTableType;
supportEfi = partitionTableType == "efi" || partitionTableType == "hybrid";
supportBios = partitionTableType == "legacy" || partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
hasBootPartition = partitionTableType == "efi" || partitionTableType == "hybrid";
hasNoFsPartition = partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
in {
assertions = [
{
assertion = config.boot.loader.systemd-boot.enable -> config.proxmox.qemuConf.bios == "ovmf";
message = "systemd-boot requires 'ovmf' bios";
}
{
assertion = partitionTableType == "efi" -> config.proxmox.qemuConf.bios == "ovmf";
message = "'efi' disk partitioning requires 'ovmf' bios";
}
{
assertion = partitionTableType == "legacy" -> config.proxmox.qemuConf.bios == "seabios";
message = "'legacy' disk partitioning requires 'seabios' bios";
}
{
assertion = partitionTableType == "legacy+gpt" -> config.proxmox.qemuConf.bios == "seabios";
message = "'legacy+gpt' disk partitioning requires 'seabios' bios";
}
];
system.build.VMA = import ../../lib/make-disk-image.nix {
name = "proxmox-${cfg.filenameSuffix}";
inherit (cfg) partitionTableType;
postVM = let
# Build qemu with PVE's patch that adds support for the VMA format
vma = (pkgs.qemu_kvm.override {
alsaSupport = false;
pulseSupport = false;
sdlSupport = false;
jackSupport = false;
gtkSupport = false;
vncSupport = false;
smartcardSupport = false;
spiceSupport = false;
ncursesSupport = false;
libiscsiSupport = false;
tpmSupport = false;
numaSupport = false;
seccompSupport = false;
guestAgentSupport = false;
}).overrideAttrs ( super: rec {
# Check https://github.com/proxmox/pve-qemu/tree/master for the version
# of qemu and patch to use
version = "9.0.0";
src = pkgs.fetchurl {
url = "https://download.qemu.org/qemu-${version}.tar.xz";
hash = "sha256-MnCKxmww2MiSYz6paMdxwcdtWX1w3erSGg0izPOG2mk=";
};
patches = [
# Proxmox' VMA tool is published as a particular patch upon QEMU
"${pkgs.fetchFromGitHub {
owner = "proxmox";
repo = "pve-qemu";
rev = "14afbdd55f04d250bd679ca1ad55d3f47cd9d4c8";
hash = "sha256-lSJQA5SHIHfxJvMLIID2drv2H43crTPMNIlIT37w9Nc=";
}}/debian/patches/pve/0027-PVE-Backup-add-vma-backup-format-code.patch"
];
buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
nativeBuildInputs = super.nativeBuildInputs ++ [ pkgs.perl ];
});
in
''
${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
-c ${cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)}/qemu-server.conf drive-virtio0=$diskImage
rm $diskImage
${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
mkdir -p $out/nix-support
echo "file vma $out/vzdump-qemu-${cfg.filenameSuffix}.vma.zst" > $out/nix-support/hydra-build-products
config =
let
cfg = config.proxmox;
cfgLine = name: value: ''
${name}: ${builtins.toString value}
'';
inherit (cfg.qemuConf) additionalSpace diskSize bootSize;
format = "raw";
inherit config lib pkgs;
};
virtio0Storage = builtins.head (builtins.split ":" cfg.qemuConf.virtio0);
cfgFile =
fileName: properties:
pkgs.writeTextDir fileName ''
# generated by NixOS
${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
#qmdump#map:virtio0:drive-virtio0:${virtio0Storage}:raw:
'';
inherit (cfg) partitionTableType;
supportEfi = partitionTableType == "efi" || partitionTableType == "hybrid";
supportBios =
partitionTableType == "legacy"
|| partitionTableType == "hybrid"
|| partitionTableType == "legacy+gpt";
hasBootPartition = partitionTableType == "efi" || partitionTableType == "hybrid";
hasNoFsPartition = partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
in
{
assertions = [
{
assertion = config.boot.loader.systemd-boot.enable -> config.proxmox.qemuConf.bios == "ovmf";
message = "systemd-boot requires 'ovmf' bios";
}
{
assertion = partitionTableType == "efi" -> config.proxmox.qemuConf.bios == "ovmf";
message = "'efi' disk partitioning requires 'ovmf' bios";
}
{
assertion = partitionTableType == "legacy" -> config.proxmox.qemuConf.bios == "seabios";
message = "'legacy' disk partitioning requires 'seabios' bios";
}
{
assertion = partitionTableType == "legacy+gpt" -> config.proxmox.qemuConf.bios == "seabios";
message = "'legacy+gpt' disk partitioning requires 'seabios' bios";
}
];
system.build.VMA = import ../../lib/make-disk-image.nix {
name = "proxmox-${cfg.filenameSuffix}";
inherit (cfg) partitionTableType;
postVM =
let
# Build qemu with PVE's patch that adds support for the VMA format
vma =
(pkgs.qemu_kvm.override {
alsaSupport = false;
pulseSupport = false;
sdlSupport = false;
jackSupport = false;
gtkSupport = false;
vncSupport = false;
smartcardSupport = false;
spiceSupport = false;
ncursesSupport = false;
libiscsiSupport = false;
tpmSupport = false;
numaSupport = false;
seccompSupport = false;
guestAgentSupport = false;
}).overrideAttrs
(super: rec {
# Check https://github.com/proxmox/pve-qemu/tree/master for the version
# of qemu and patch to use
version = "9.0.0";
src = pkgs.fetchurl {
url = "https://download.qemu.org/qemu-${version}.tar.xz";
hash = "sha256-MnCKxmww2MiSYz6paMdxwcdtWX1w3erSGg0izPOG2mk=";
};
patches = [
# Proxmox' VMA tool is published as a particular patch upon QEMU
"${
pkgs.fetchFromGitHub {
owner = "proxmox";
repo = "pve-qemu";
rev = "14afbdd55f04d250bd679ca1ad55d3f47cd9d4c8";
hash = "sha256-lSJQA5SHIHfxJvMLIID2drv2H43crTPMNIlIT37w9Nc=";
}
}/debian/patches/pve/0027-PVE-Backup-add-vma-backup-format-code.patch"
];
boot = {
growPartition = true;
kernelParams = [ "console=ttyS0" ];
loader.grub = {
device = lib.mkDefault (if (hasNoFsPartition || supportBios) then
# Even if there is a separate no-fs partition ("/dev/disk/by-partlabel/no-fs" i.e. "/dev/vda2"),
# which will be used the bootloader, do not set it as loader.grub.device.
# GRUB installation fails, unless the whole disk is selected.
"/dev/vda"
else
"nodev");
efiSupport = lib.mkDefault supportEfi;
efiInstallAsRemovable = lib.mkDefault supportEfi;
buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
nativeBuildInputs = super.nativeBuildInputs ++ [ pkgs.perl ];
});
in
''
${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
-c ${
cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)
}/qemu-server.conf drive-virtio0=$diskImage
rm $diskImage
${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
mkdir -p $out/nix-support
echo "file vma $out/vzdump-qemu-${cfg.filenameSuffix}.vma.zst" > $out/nix-support/hydra-build-products
'';
inherit (cfg.qemuConf) additionalSpace bootSize;
inherit (config.virtualisation) diskSize;
format = "raw";
inherit config lib pkgs;
};
loader.timeout = 0;
initrd.availableKernelModules = [ "uas" "virtio_blk" "virtio_pci" ];
};
boot = {
growPartition = true;
kernelParams = [ "console=ttyS0" ];
loader.grub = {
device = lib.mkDefault (
if (hasNoFsPartition || supportBios) then
# Even if there is a separate no-fs partition ("/dev/disk/by-partlabel/no-fs" i.e. "/dev/vda2"),
# which will be used the bootloader, do not set it as loader.grub.device.
# GRUB installation fails, unless the whole disk is selected.
"/dev/vda"
else
"nodev"
);
efiSupport = lib.mkDefault supportEfi;
efiInstallAsRemovable = lib.mkDefault supportEfi;
};
fileSystems."/" = {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
fileSystems."/boot" = lib.mkIf hasBootPartition {
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
};
networking = mkIf cfg.cloudInit.enable {
hostName = mkForce "";
useDHCP = false;
};
services = {
cloud-init = mkIf cfg.cloudInit.enable {
enable = true;
network.enable = true;
loader.timeout = 0;
initrd.availableKernelModules = [
"uas"
"virtio_blk"
"virtio_pci"
];
};
sshd.enable = mkDefault true;
qemuGuest.enable = true;
};
proxmox.qemuExtraConf.${cfg.cloudInit.device} = "${cfg.cloudInit.defaultStorage}:vm-9999-cloudinit,media=cdrom";
};
fileSystems."/" = {
device = "/dev/disk/by-label/nixos";
autoResize = true;
fsType = "ext4";
};
fileSystems."/boot" = lib.mkIf hasBootPartition {
device = "/dev/disk/by-label/ESP";
fsType = "vfat";
};
networking = mkIf cfg.cloudInit.enable {
hostName = mkForce "";
useDHCP = false;
};
services = {
cloud-init = mkIf cfg.cloudInit.enable {
enable = true;
network.enable = true;
};
sshd.enable = mkDefault true;
qemuGuest.enable = true;
};
proxmox.qemuExtraConf.${cfg.cloudInit.device} = "${cfg.cloudInit.defaultStorage}:vm-9999-cloudinit,media=cdrom";
};
}

Some files were not shown because too many files have changed in this diff Show more