Project import generated by Copybara.

GitOrigin-RevId: 4c2fcb090b1f3e5b47eaa7bd33913b574a11e0a0
This commit is contained in:
Default email 2024-10-11 07:15:48 +02:00
parent a40189fd38
commit ae2dc6aea6
2150 changed files with 53379 additions and 41698 deletions

View file

@ -186,3 +186,6 @@ cffc27daf06c77c0d76bc35d24b929cb9d68c3c9
# fetchurl: nixfmt-rfc-style # fetchurl: nixfmt-rfc-style
ce21e97a1f20dee15da85c084f9d1148d84f853b ce21e97a1f20dee15da85c084f9d1148d84f853b
# percona: apply nixfmt
8d14fa2886fec877690c6d28cfcdba4503dbbcea

View file

@ -16,3 +16,5 @@ nixos/doc/default.nix linguist-documentation=false
nixos/modules/module-list.nix merge=union nixos/modules/module-list.nix merge=union
# pkgs/top-level/all-packages.nix merge=union # pkgs/top-level/all-packages.nix merge=union
ci/OWNERS linguist-language=CODEOWNERS

View file

@ -1,391 +1,4 @@
# CODEOWNERS file # Use ci/OWNERS instead
# #
# This file is used to describe who owns what in this repository. This file does not # This file would be for the native code owner feature of GitHub,
# replace `meta.maintainers` but is instead used for other things than derivations # but is not being used because of its problems, see ci/OWNERS
# and modules, like documentation, package sets, and other assets.
#
# For documentation on this file, see https://help.github.com/articles/about-codeowners/
# Mentioned users will get code review requests.
#
# IMPORTANT NOTE: in order to actually get pinged, commit access is required.
# This also holds true for GitHub teams. Since almost none of our teams have write
# permissions, you need to list all members of the team with commit access individually.
# CI
/.github/workflows @NixOS/Security @Mic92 @zowoq
/.github/workflows/check-nix-format.yml @infinisil
/.github/workflows/nixpkgs-vet.yml @infinisil @philiptaron
/.github/workflows/codeowners.yml @infinisil
/.github/OWNERS @infinisil
/ci @infinisil @philiptaron @NixOS/Security
# Development support
/.editorconfig @Mic92 @zowoq
/shell.nix @infinisil @NixOS/Security
# Libraries
/lib @infinisil
/lib/systems @alyssais @ericson2314
/lib/generators.nix @infinisil @Profpatsch
/lib/cli.nix @infinisil @Profpatsch
/lib/debug.nix @infinisil @Profpatsch
/lib/asserts.nix @infinisil @Profpatsch
/lib/path/* @infinisil
/lib/fileset @infinisil
## Libraries / Module system
/lib/modules.nix @infinisil @roberth
/lib/types.nix @infinisil @roberth
/lib/options.nix @infinisil @roberth
/lib/tests/modules.sh @infinisil @roberth
/lib/tests/modules @infinisil @roberth
# Nixpkgs Internals
/default.nix @Ericson2314
/pkgs/top-level/default.nix @Ericson2314
/pkgs/top-level/impure.nix @Ericson2314
/pkgs/top-level/stage.nix @Ericson2314
/pkgs/top-level/splice.nix @Ericson2314
/pkgs/top-level/release-cross.nix @Ericson2314
/pkgs/top-level/by-name-overlay.nix @infinisil @philiptaron
/pkgs/stdenv @philiptaron
/pkgs/stdenv/generic @Ericson2314
/pkgs/stdenv/generic/check-meta.nix @Ericson2314
/pkgs/stdenv/cross @Ericson2314
/pkgs/build-support @philiptaron
/pkgs/build-support/cc-wrapper @Ericson2314
/pkgs/build-support/bintools-wrapper @Ericson2314
/pkgs/build-support/setup-hooks @Ericson2314
/pkgs/build-support/setup-hooks/auto-patchelf.sh @layus
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/pkgs-lib @infinisil
## Format generators/serializers
/pkgs/pkgs-lib/formats/libconfig @h7x4
/pkgs/pkgs-lib/formats/hocon @h7x4
# Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch
# Nixpkgs make-disk-image
/doc/build-helpers/images/makediskimage.section.md @raitobezarius
/nixos/lib/make-disk-image.nix @raitobezarius
# Nix, the package manager
# @raitobezarius is not "code owner", but is listed here to be notified of changes
# pertaining to the Nix package manager.
# i.e. no authority over those files.
pkgs/tools/package-management/nix/ @NixOS/nix-team @raitobezarius
nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobezarius
# Nixpkgs documentation
/maintainers/scripts/db-to-md.sh @jtojnar @ryantm
/maintainers/scripts/doc @jtojnar @ryantm
# Contributor documentation
/CONTRIBUTING.md @infinisil
/.github/PULL_REQUEST_TEMPLATE.md @infinisil
/doc/contributing/ @infinisil
/doc/contributing/contributing-to-documentation.chapter.md @jtojnar @infinisil
/lib/README.md @infinisil
/doc/README.md @infinisil
/nixos/README.md @infinisil
/pkgs/README.md @infinisil
/pkgs/by-name/README.md @infinisil
/maintainers/README.md @infinisil
# User-facing development documentation
/doc/development.md @infinisil
/doc/development @infinisil
# NixOS Internals
/nixos/default.nix @infinisil
/nixos/lib/from-env.nix @infinisil
/nixos/lib/eval-config.nix @infinisil
/nixos/modules/system/activation/bootspec.nix @grahamc @cole-h @raitobezarius
/nixos/modules/system/activation/bootspec.cue @grahamc @cole-h @raitobezarius
# NixOS integration test driver
/nixos/lib/test-driver @tfc
# NixOS QEMU virtualisation
/nixos/modules/virtualisation/qemu-vm.nix @raitobezarius
# ACME
/nixos/modules/security/acme @arianvp @flokli @aanderse @emilazy # no merge permission: @m1cr0man
# Systemd
/nixos/modules/system/boot/systemd.nix @NixOS/systemd
/nixos/modules/system/boot/systemd @NixOS/systemd
/nixos/lib/systemd-*.nix @NixOS/systemd
/pkgs/os-specific/linux/systemd @NixOS/systemd
# Systemd-boot
/nixos/modules/system/boot/loader/systemd-boot @JulienMalka
# Images and installer media
/nixos/modules/profiles/installation-device.nix @ElvishJerricco
/nixos/modules/installer/cd-dvd/ @ElvishJerricco
/nixos/modules/installer/sd-card/
# Updaters
## update.nix
/maintainers/scripts/update.nix @jtojnar
/maintainers/scripts/update.py @jtojnar
## common-updater-scripts
/pkgs/common-updater/scripts/update-source-version @jtojnar
# Python-related code and docs
/doc/languages-frameworks/python.section.md @mweinelt @natsukium
/maintainers/scripts/update-python-libraries @natsukium
/pkgs/development/interpreters/python @natsukium
/pkgs/top-level/python-packages.nix @natsukium
/pkgs/top-level/release-python.nix @natsukium
# Haskell
/doc/languages-frameworks/haskell.section.md @sternenseemann @maralorn
/maintainers/scripts/haskell @sternenseemann @maralorn
/pkgs/development/compilers/ghc @sternenseemann @maralorn
/pkgs/development/haskell-modules @sternenseemann @maralorn
/pkgs/test/haskell @sternenseemann @maralorn
/pkgs/top-level/release-haskell.nix @sternenseemann @maralorn
/pkgs/top-level/haskell-packages.nix @sternenseemann @maralorn
# Perl
/pkgs/development/interpreters/perl @stigtsp @zakame @marcusramberg
/pkgs/top-level/perl-packages.nix @stigtsp @zakame @marcusramberg
/pkgs/development/perl-modules @stigtsp @zakame @marcusramberg
# R
/pkgs/applications/science/math/R @jbedo
/pkgs/development/r-modules @jbedo
# Rust
/pkgs/development/compilers/rust @Mic92 @zowoq @winterqt @figsoda
/pkgs/build-support/rust @zowoq @winterqt @figsoda
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
# C compilers
/pkgs/development/compilers/gcc
/pkgs/development/compilers/llvm @alyssais @RossComputerGuy
/pkgs/development/compilers/emscripten @raitobezarius
/doc/languages-frameworks/emscripten.section.md @raitobezarius
# Audio
/nixos/modules/services/audio/botamusique.nix @mweinelt
/nixos/modules/services/audio/snapserver.nix @mweinelt
/nixos/tests/botamusique.nix @mweinelt
/nixos/tests/snapcast.nix @mweinelt
# Browsers
/pkgs/applications/networking/browsers/firefox @mweinelt
/pkgs/applications/networking/browsers/chromium @emilylange
/nixos/tests/chromium.nix @emilylange
# Certificate Authorities
pkgs/data/misc/cacert/ @ajs124 @lukegb @mweinelt
pkgs/development/libraries/nss/ @ajs124 @lukegb @mweinelt
pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# Jetbrains
/pkgs/applications/editors/jetbrains @edwtjo
# Licenses
/lib/licenses.nix @alyssais
# Qt
/pkgs/development/libraries/qt-5 @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/development/libraries/qt-6 @K900 @NickCao @SuperSandro2000 @ttuegel
# KDE / Plasma 5
/pkgs/applications/kde @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/desktops/plasma-5 @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/development/libraries/kde-frameworks @K900 @NickCao @SuperSandro2000 @ttuegel
# KDE / Plasma 6
/pkgs/kde @K900 @NickCao @SuperSandro2000 @ttuegel
/maintainers/scripts/kde @K900 @NickCao @SuperSandro2000 @ttuegel
# PostgreSQL and related stuff
/pkgs/servers/sql/postgresql @thoughtpolice
/nixos/modules/services/databases/postgresql.md @thoughtpolice
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
/nixos/tests/postgresql.nix @thoughtpolice
# Hardened profile & related modules
/nixos/modules/profiles/hardened.nix @joachifm
/nixos/modules/security/lock-kernel-modules.nix @joachifm
/nixos/modules/security/misc.nix @joachifm
/nixos/tests/hardened.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened/config.nix @joachifm
# Home Automation
/nixos/modules/services/home-automation/home-assistant.nix @mweinelt
/nixos/modules/services/home-automation/zigbee2mqtt.nix @mweinelt
/nixos/tests/home-assistant.nix @mweinelt
/nixos/tests/zigbee2mqtt.nix @mweinelt
/pkgs/servers/home-assistant @mweinelt
/pkgs/tools/misc/esphome @mweinelt
# Network Time Daemons
/pkgs/tools/networking/chrony @thoughtpolice
/pkgs/tools/networking/ntp @thoughtpolice
/pkgs/tools/networking/openntpd @thoughtpolice
/nixos/modules/services/networking/ntp @thoughtpolice
# Network
/pkgs/tools/networking/kea/default.nix @mweinelt
/pkgs/tools/networking/babeld/default.nix @mweinelt
/nixos/modules/services/networking/babeld.nix @mweinelt
/nixos/modules/services/networking/kea.nix @mweinelt
/nixos/modules/services/networking/knot.nix @mweinelt
/nixos/modules/services/monitoring/prometheus/exporters/kea.nix @mweinelt
/nixos/tests/babeld.nix @mweinelt
/nixos/tests/kea.nix @mweinelt
/nixos/tests/knot.nix @mweinelt
# Web servers
/doc/packages/nginx.section.md @raitobezarius
/pkgs/servers/http/nginx/ @raitobezarius
/nixos/modules/services/web-servers/nginx/ @raitobezarius
# Dhall
/pkgs/development/dhall-modules @Gabriella439 @Profpatsch @ehmry
/pkgs/development/interpreters/dhall @Gabriella439 @Profpatsch @ehmry
# Idris
/pkgs/development/idris-modules @Infinisil
/pkgs/development/compilers/idris2 @mattpolzin
# Bazel
/pkgs/development/tools/build-managers/bazel @Profpatsch
# NixOS modules for e-mail and dns services
/nixos/modules/services/mail/mailman.nix @peti
/nixos/modules/services/mail/postfix.nix @peti
/nixos/modules/services/networking/bind.nix @peti
/nixos/modules/services/mail/rspamd.nix @peti
# Emacs
/pkgs/applications/editors/emacs/elisp-packages @adisbladis
/pkgs/applications/editors/emacs @adisbladis
/pkgs/top-level/emacs-packages.nix @adisbladis
# Kakoune
/pkgs/applications/editors/kakoune @philiptaron
# Neovim
/pkgs/applications/editors/neovim @figsoda @teto
# VimPlugins
/pkgs/applications/editors/vim/plugins @figsoda
# VsCode Extensions
/pkgs/applications/editors/vscode/extensions
# PHP interpreter, packages, extensions, tests and documentation
/doc/languages-frameworks/php.section.md @aanderse @drupol @globin @ma27 @talyz
/nixos/tests/php @aanderse @drupol @globin @ma27 @talyz
/pkgs/build-support/php/build-pecl.nix @aanderse @drupol @globin @ma27 @talyz
/pkgs/build-support/php @drupol
/pkgs/development/interpreters/php @jtojnar @aanderse @drupol @globin @ma27 @talyz
/pkgs/development/php-packages @aanderse @drupol @globin @ma27 @talyz
/pkgs/top-level/php-packages.nix @jtojnar @aanderse @drupol @globin @ma27 @talyz
# Docker tools
/pkgs/build-support/docker @roberth
/nixos/tests/docker-tools* @roberth
/doc/build-helpers/images/dockertools.section.md @roberth
# Blockchains
/pkgs/applications/blockchains @mmahut @RaghavSood
# Go
/doc/languages-frameworks/go.section.md @kalbasit @katexochen @Mic92 @zowoq
/pkgs/build-support/go @kalbasit @katexochen @Mic92 @zowoq
/pkgs/development/compilers/go @kalbasit @katexochen @Mic92 @zowoq
# GNOME
/pkgs/desktops/gnome @jtojnar
/pkgs/desktops/gnome/extensions @jtojnar
/pkgs/build-support/make-hardcode-gsettings-patch @jtojnar
# Cinnamon
/pkgs/by-name/ci/cinnamon-* @mkg20001
/pkgs/by-name/cj/cjs @mkg20001
/pkgs/by-name/mu/muffin @mkg20001
/pkgs/by-name/ne/nemo @mkg20001
/pkgs/by-name/ne/nemo-* @mkg20001
# nim
/pkgs/development/compilers/nim @ehmry
# terraform providers
/pkgs/applications/networking/cluster/terraform-providers @zowoq
# Forgejo
nixos/modules/services/misc/forgejo.nix @adamcstephens @bendlas @emilylange
pkgs/by-name/fo/forgejo/ @adamcstephens @bendlas @emilylange
# Dotnet
/pkgs/build-support/dotnet @corngood
/pkgs/development/compilers/dotnet @corngood
/pkgs/test/dotnet @corngood
/doc/languages-frameworks/dotnet.section.md @corngood
# Node.js
/pkgs/build-support/node/build-npm-package @winterqt
/pkgs/build-support/node/fetch-npm-deps @winterqt
/doc/languages-frameworks/javascript.section.md @winterqt
# OCaml
/pkgs/build-support/ocaml @ulrikstrid
/pkgs/development/compilers/ocaml @ulrikstrid
/pkgs/development/ocaml-modules @ulrikstrid
# ZFS
pkgs/os-specific/linux/zfs/2_1.nix @raitobezarius
pkgs/os-specific/linux/zfs/generic.nix @raitobezarius
nixos/modules/tasks/filesystems/zfs.nix @raitobezarius
nixos/tests/zfs.nix @raitobezarius
# Zig
/pkgs/development/compilers/zig @figsoda
/doc/hooks/zig.section.md @figsoda
# Buildbot
nixos/modules/services/continuous-integration/buildbot @Mic92 @zowoq
nixos/tests/buildbot.nix @Mic92 @zowoq
pkgs/development/tools/continuous-integration/buildbot @Mic92 @zowoq
# Pretix
pkgs/by-name/pr/pretix/ @mweinelt
pkgs/by-name/pr/pretalx/ @mweinelt
nixos/modules/services/web-apps/pretix.nix @mweinelt
nixos/modules/services/web-apps/pretalx.nix @mweinelt
nixos/tests/web-apps/pretix.nix @mweinelt
nixos/tests/web-apps/pretalx.nix @mweinelt
# incus/lxc
nixos/maintainers/scripts/incus/ @adamcstephens
nixos/modules/virtualisation/incus.nix @adamcstephens
nixos/modules/virtualisation/lxc* @adamcstephens
nixos/tests/incus/ @adamcstephens
pkgs/by-name/in/incus/ @adamcstephens
pkgs/by-name/lx/lxc* @adamcstephens
# ExpidusOS, Flutter
/pkgs/development/compilers/flutter @RossComputerGuy
/pkgs/desktops/expidus @RossComputerGuy
# GNU Tar & Zip
/pkgs/tools/archivers/gnutar @RossComputerGuy
/pkgs/tools/archivers/zip @RossComputerGuy
# SELinux
/pkgs/os-specific/linux/checkpolicy @RossComputerGuy
/pkgs/os-specific/linux/libselinux @RossComputerGuy
/pkgs/os-specific/linux/libsepol @RossComputerGuy
# installShellFiles
/pkgs/by-name/in/installShellFiles/* @Ericson2314
/pkgs/test/install-shell-files/* @Ericson2314
/doc/hooks/installShellFiles.section.md @Ericson2314

View file

@ -1,19 +0,0 @@
#
# Currently unused! Use CODEOWNERS for now, see workflows/codeowners.yml
#
####################
#
# This file is used to describe who owns what in this repository.
# Users/teams will get review requests for PRs that change their files.
#
# This file does not replace `meta.maintainers`
# but is instead used for other things than derivations and modules,
# like documentation, package sets, and other assets.
#
# This file uses the same syntax as the natively supported CODEOWNERS file,
# see https://help.github.com/articles/about-codeowners/ for documentation.
# However it comes with some notable differences:
# - There is no need for user/team listed here to have write access.
# - No reviews will be requested for PRs that target the wrong base branch.
#
# Processing of this file is implemented in workflows/codeowners.yml

View file

@ -20,7 +20,7 @@ jobs:
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs - name: Create backport PRs

View file

@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback # we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30 - uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- uses: cachix/cachix-action@ad2ddac53f961de1989924296a1f236fcfbaa4fc # v15 - uses: cachix/cachix-action@ad2ddac53f961de1989924296a1f236fcfbaa4fc # v15
with: with:

View file

@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
fetch-depth: 0 fetch-depth: 0
filter: blob:none filter: blob:none

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: "!contains(github.event.pull_request.title, '[skip treewide]')" if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: "!contains(github.event.pull_request.title, '[skip treewide]')" if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -10,7 +10,7 @@ jobs:
name: shell-check-x86_64-linux name: shell-check-x86_64-linux
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
@ -22,7 +22,7 @@ jobs:
name: shell-check-aarch64-darwin name: shell-check-aarch64-darwin
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -13,13 +13,9 @@ on:
types: [opened, ready_for_review, synchronize, reopened, edited] types: [opened, ready_for_review, synchronize, reopened, edited]
env: env:
# TODO: Once confirmed that this works by seeing that the action would request OWNERS_FILE: ci/OWNERS
# reviews from the same people (or refuse for wrong base branches), # Don't do anything on draft PRs
# move all entries from CODEOWNERS to OWNERS and change this value here DRY_MODE: ${{ github.event.pull_request.draft && '1' || '' }}
# OWNERS_FILE: .github/OWNERS
OWNERS_FILE: .github/CODEOWNERS
# Also remove this
DRY_MODE: 1
jobs: jobs:
# Check that code owners is valid # Check that code owners is valid
@ -29,10 +25,17 @@ jobs:
steps: steps:
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30 - uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
- uses: cachix/cachix-action@ad2ddac53f961de1989924296a1f236fcfbaa4fc # v15
if: github.repository_owner == 'NixOS'
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR itself. # Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR itself.
# We later build and run code from the base branch with access to secrets, # We later build and run code from the base branch with access to secrets,
# so it's important this is not the PRs code. # so it's important this is not the PRs code.
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
path: base path: base
@ -45,7 +48,7 @@ jobs:
app-id: ${{ vars.OWNER_APP_ID }} app-id: ${{ vars.OWNER_APP_ID }}
private-key: ${{ secrets.OWNER_APP_PRIVATE_KEY }} private-key: ${{ secrets.OWNER_APP_PRIVATE_KEY }}
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
ref: refs/pull/${{ github.event.number }}/merge ref: refs/pull/${{ github.event.number }}/merge
path: pr path: pr
@ -69,7 +72,7 @@ jobs:
# Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head. # Important: Because we use pull_request_target, this checks out the base branch of the PR, not the PR head.
# This is intentional, because we need to request the review of owners as declared in the base branch. # This is intentional, because we need to request the review of owners as declared in the base branch.
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 - uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0
id: app-token id: app-token
@ -84,5 +87,3 @@ jobs:
run: result/bin/request-reviews.sh ${{ github.repository }} ${{ github.event.number }} "$OWNERS_FILE" run: result/bin/request-reviews.sh ${{ github.repository }} ${{ github.event.number }} "$OWNERS_FILE"
env: env:
GH_TOKEN: ${{ steps.app-token.outputs.token }} GH_TOKEN: ${{ steps.app-token.outputs.token }}
# Don't do anything on draft PRs
DRY_MODE: ${{ github.event.pull_request.draft && '1' || '' }}

View file

@ -25,7 +25,7 @@ jobs:
- name: print list of changed files - name: print list of changed files
run: | run: |
cat "$HOME/changed_files" cat "$HOME/changed_files"
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -25,7 +25,7 @@ jobs:
if [[ -s "$HOME/changed_files" ]]; then if [[ -s "$HOME/changed_files" ]]; then
echo "CHANGED_FILES=$HOME/changed_files" > "$GITHUB_ENV" echo "CHANGED_FILES=$HOME/changed_files" > "$GITHUB_ENV"
fi fi
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge

View file

@ -26,53 +26,23 @@ jobs:
# This should take 1 minute at most, but let's be generous. The default of 6 hours is definitely too long. # This should take 1 minute at most, but let's be generous. The default of 6 hours is definitely too long.
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
# This step has to be in this file, because it's needed to determine which revision of the repository to fetch, and we can only use other files from the repository once it's fetched. # This checks out the base branch because of pull_request_target
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
with:
path: base
sparse-checkout: ci
- name: Resolving the merge commit - name: Resolving the merge commit
env: env:
GH_TOKEN: ${{ github.token }} GH_TOKEN: ${{ github.token }}
run: | run: |
# This checks for mergeability of a pull request as recommended in if mergedSha=$(base/ci/get-merge-commit.sh ${{ github.repository }} ${{ github.event.number }}); then
# https://docs.github.com/en/rest/guides/using-the-rest-api-to-interact-with-your-git-database?apiVersion=2022-11-28#checking-mergeability-of-pull-requests echo "Checking the merge commit $mergedSha"
# Retry the API query this many times
retryCount=5
# Start with 5 seconds, but double every retry
retryInterval=5
while true; do
echo "Checking whether the pull request can be merged"
prInfo=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/"$GITHUB_REPOSITORY"/pulls/${{ github.event.pull_request.number }})
mergeable=$(jq -r .mergeable <<< "$prInfo")
mergedSha=$(jq -r .merge_commit_sha <<< "$prInfo")
if [[ "$mergeable" == "null" ]]; then
if (( retryCount == 0 )); then
echo "Not retrying anymore. It's likely that GitHub is having internal issues: check https://www.githubstatus.com/"
exit 1
else
(( retryCount -= 1 )) || true
# null indicates that GitHub is still computing whether it's mergeable
# Wait a couple seconds before trying again
echo "GitHub is still computing whether this PR can be merged, waiting $retryInterval seconds before trying again ($retryCount retries left)"
sleep "$retryInterval"
(( retryInterval *= 2 )) || true
fi
else
break
fi
done
if [[ "$mergeable" == "true" ]]; then
echo "The PR can be merged, checking the merge commit $mergedSha"
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV" echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
else else
echo "The PR cannot be merged, it has a merge conflict, skipping the rest.." echo "Skipping the rest..."
fi fi
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 rm -rf base
- uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
if: env.mergedSha if: env.mergedSha
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default

View file

@ -41,7 +41,7 @@ jobs:
into: staging-24.05 into: staging-24.05
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }} name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }} - name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0 uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0

View file

@ -39,7 +39,7 @@ jobs:
into: staging into: staging
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }} name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps: steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }} - name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0 uses: devmasx/merge-branch@854d3ac71ed1e9deb668e0074781b81fdd6e771f # 1.4.0

View file

@ -1,69 +0,0 @@
name: "Update terraform-providers"
on:
#schedule:
# - cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
tf-providers:
permissions:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: cachix/install-nix-action@08dcb3a5e62fa31e2da3d490afc4176ef55ecd72 # v30
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup
id: setup
run: |
echo "title=terraform-providers: update $(date -u +"%Y-%m-%d")" >> $GITHUB_OUTPUT
- name: update terraform-providers
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config user.name "github-actions[bot]"
echo | nix-shell \
maintainers/scripts/update.nix \
--argstr commit true \
--argstr keep-going true \
--argstr max-workers 2 \
--argstr path terraform-providers
- name: get failed updates
run: |
echo 'FAILED<<EOF' >> $GITHUB_ENV
git ls-files --others >> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
# cleanup logs of failed updates so they aren't included in the PR
- name: clean repo
run: |
git clean -f
- name: create PR
uses: peter-evans/create-pull-request@6cd32fd93684475c31847837f87bb135d40a2b79 # v7.0.3
with:
body: |
Automatic update by [update-terraform-providers](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/update-terraform-providers.yml) action.
https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}
These providers failed to update:
```
${{ env.FAILED }}
```
Check that all providers build with:
```
@ofborg build opentofu.full
```
If there is more than ten commits in the PR `ofborg` won't build it automatically and you will need to use the above command.
branch: terraform-providers-update
delete-branch: false
title: ${{ steps.setup.outputs.title }}
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -93,6 +93,8 @@ This section describes in some detail how changes can be made and proposed with
7. Respond to review comments, potential CI failures and potential merge conflicts by updating the pull request. 7. Respond to review comments, potential CI failures and potential merge conflicts by updating the pull request.
Always keep the pull request in a mergeable state. Always keep the pull request in a mergeable state.
This process is covered in more detail from the non-technical side in [I opened a PR, how do I get it merged?](#i-opened-a-pr-how-do-i-get-it-merged).
The custom [OfBorg](https://github.com/NixOS/ofborg) CI system will perform various checks to help ensure code quality, whose results you can see at the bottom of the pull request. The custom [OfBorg](https://github.com/NixOS/ofborg) CI system will perform various checks to help ensure code quality, whose results you can see at the bottom of the pull request.
See [the OfBorg Readme](https://github.com/NixOS/ofborg#readme) for more details. See [the OfBorg Readme](https://github.com/NixOS/ofborg#readme) for more details.
@ -193,19 +195,12 @@ The last checkbox is about whether it fits the guidelines in this `CONTRIBUTING.
[rebase]: #rebasing-between-branches-ie-from-master-to-staging [rebase]: #rebasing-between-branches-ie-from-master-to-staging
From time to time, changes between branches must be rebased, for example, if the From time to time, changes between branches must be rebased, for example, if the
number of new rebuilds they would cause is too large for the target branch. When number of new rebuilds they would cause is too large for the target branch.
rebasing, care must be taken to include only the intended changes, otherwise
many CODEOWNERS will be inadvertently requested for review. To achieve this,
rebasing should not be performed directly on the target branch, but on the merge
base between the current and target branch. As an additional precautionary measure,
you should temporarily mark the PR as draft for the duration of the operation.
This reduces the probability of mass-pinging people. (OfBorg might still
request a couple of persons for reviews though.)
In the following example, we assume that the current branch, called `feature`, In the following example, we assume that the current branch, called `feature`,
is based on `master`, and we rebase it onto the merge base between is based on `master`, and we rebase it onto the merge base between
`master` and `staging` so that the PR can eventually be retargeted to `master` and `staging` so that the PR can be retargeted to
`staging` without causing a mess. The example uses `upstream` as the remote for `NixOS/nixpkgs.git` `staging`. The example uses `upstream` as the remote for `NixOS/nixpkgs.git`
while `origin` is the remote you are pushing to. while `origin` is the remote you are pushing to.
@ -234,36 +229,6 @@ git status
git push origin feature --force-with-lease git push origin feature --force-with-lease
``` ```
#### Something went wrong and a lot of people were pinged
It happens. Remember to be kind, especially to new contributors.
There is no way back, so the pull request should be closed and locked
(if possible). The changes should be re-submitted in a new PR, in which the people
originally involved in the conversation need to manually be pinged again.
No further discussion should happen on the original PR, as a lot of people
are now subscribed to it.
The following message (or a version thereof) might be left when closing to
describe the situation, since closing and locking without any explanation
is kind of rude:
```markdown
It looks like you accidentally mass-pinged a bunch of people, which are now subscribed
and getting notifications for everything in this pull request. Unfortunately, they
cannot be automatically unsubscribed from the issue (removing review request does not
unsubscribe), therefore development cannot continue in this pull request anymore.
Please open a new pull request with your changes, link back to this one and ping the
people actually involved in here over there.
In order to avoid this in the future, there are instructions for how to properly
rebase between branches in our [contribution guidelines](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#rebasing-between-branches-ie-from-master-to-staging).
Setting your pull request to draft prior to rebasing is strongly recommended.
In draft status, you can preview the list of people that are about to be requested
for review, which allows you to sidestep this issue.
This is not a bulletproof method though, as OfBorg still does review requests even on draft PRs.
```
## How to backport pull requests ## How to backport pull requests
[pr-backport]: #how-to-backport-pull-requests [pr-backport]: #how-to-backport-pull-requests
@ -343,7 +308,7 @@ If you consider having enough knowledge and experience in a topic and would like
Container system, boot system and library changes are some examples of the pull requests fitting this category. Container system, boot system and library changes are some examples of the pull requests fitting this category.
## How to merge pull requests ## How to merge pull requests yourself
[pr-merge]: #how-to-merge-pull-requests [pr-merge]: #how-to-merge-pull-requests
To streamline automated updates, leverage the nixpkgs-merge-bot by simply commenting `@NixOS/nixpkgs-merge-bot merge`. The bot will verify if the following conditions are met, refusing to merge otherwise: To streamline automated updates, leverage the nixpkgs-merge-bot by simply commenting `@NixOS/nixpkgs-merge-bot merge`. The bot will verify if the following conditions are met, refusing to merge otherwise:
@ -353,10 +318,7 @@ To streamline automated updates, leverage the nixpkgs-merge-bot by simply commen
Further, nixpkgs-merge-bot will ensure all ofBorg checks (except the Darwin-related ones) are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofBorg to finish before attempting the merge again. Further, nixpkgs-merge-bot will ensure all ofBorg checks (except the Darwin-related ones) are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofBorg to finish before attempting the merge again.
For other pull requests, the *Nixpkgs committers* are people who have been given For other pull requests, please see [I opened a PR, how do I get it merged?](#i-opened-a-pr-how-do-i-get-it-merged).
permission to merge.
It is possible for community members that have enough knowledge and experience on a special topic to contribute by merging pull requests.
In case the PR is stuck waiting for the original author to apply a trivial In case the PR is stuck waiting for the original author to apply a trivial
change (a typo, capitalisation change, etc.) and the author allowed the members change (a typo, capitalisation change, etc.) and the author allowed the members
@ -553,6 +515,7 @@ To get a sense for what changes are considered mass rebuilds, see [previously me
- [Commit conventions](./doc/README.md#commit-conventions) for changes to `doc`, the Nixpkgs manual. - [Commit conventions](./doc/README.md#commit-conventions) for changes to `doc`, the Nixpkgs manual.
### Writing good commit messages ### Writing good commit messages
[writing-good-commit-messages]: #writing-good-commit-messages
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work. In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work.
@ -651,3 +614,174 @@ Names of files and directories should be in lowercase, with dashes between words
As an exception, an explicit conditional expression with null can be used when fixing a important bug without triggering a mass rebuild. As an exception, an explicit conditional expression with null can be used when fixing a important bug without triggering a mass rebuild.
If this is done a follow up pull request _should_ be created to change the code to `lib.optional(s)`. If this is done a follow up pull request _should_ be created to change the code to `lib.optional(s)`.
## I opened a PR, how do I get it merged?
[i-opened-a-pr-how-do-i-get-it-merged]:#i-opened-a-pr-how-do-i-get-it-merged
In order for your PR to be merged, someone with merge permissions on the repository ("committer") needs to review and merge it.
Because the group of people with merge permissions is mostly a collection of independent unpaid volunteers who do this in their own free time, this can take some time to happen.
It is entirely normal for your PR to sit around without any feedback for days, weeks or sometimes even months.
We strive to avoid the latter cases of course but the reality of it is that this does happen quite frequently.
Even when you get feedback, follow-up feedback may take similarly long.
Don't be intimidated by this and kindly ask for feedback again every so often.
If your change is good it will eventually be merged at some point.
There are some things you can do to help speed up the process of your PR being merged though.
In order to speed the process up, you need to know what needs to happen before a committer will actually hit the merge button.
This section intends to give a little overview and insight of what happens after you create your PR.
### The committer's perspective
PRs have varying quality and even the best people make mistakes.
It is the role of the committer team to assess whether any PR's changes are good changes or not.
In order for any PR to be merged, at least one committer needs to be convinced of its quality enough to merge it.
Committers typically assess three aspects of your PR:
1. Whether the change's intention is necessary and desirable
2. Whether the code quality of your changes is good
3. Whether the artefacts produced by the code are good
If you want your PR to get merged quickly and smoothly, it is in your best interest to help convince committers in these three aspects.
### How to help committers assess your PR
For the committer to judge your intention, it's best to explain why you've made your change.
This does not apply to trivial changes like version updates because the intention is obvious (though linking the changelog is appreciated).
For any more nuanced changed or even major version upgrades, it helps if you explain the background behind your change a bit.
E.g. if you're adding a package, explain what it is and why it should be in Nixpkgs.
This goes hand in hand with [Writing good commit messages](#writing-good-commit-messages).
For the code quality assessment, you cannot do anything yourself as only the committer can do this and they already have your code to look at.
In order to minimise the need for back and forth though, do take a look over your code changes yourself and try to put yourself into the shoes of someone who didn't just write that code.
Would you immediately know what the code does by glancing at it?
If not, reviewers will notice this and will ask you to clarify the code by refactoring it and/or adding a few explanations in code comments.
Doing this preemptively can save you and the committer a lot of time.
The code artefacts are the hardest for committers to assess because PRs touch all sorts of components: applications, libraries, NixOS modules, editor plugins and many many other things.
Any individual committer can only really assess components that they themselves know how to use however and yet they must still be convinced somehow.
There isn't a good generic solution to this but there are some ways easing the committer's job here:
- Provide smoke tests that the committer can run without much research or setup.
Committers usually don't have the time or interest to learn how your component works and how they could test its functionality.
If you can provide a quick guide on how to use the component in a meaningful way or a ready-made command that demonstrates that the component works as expected, the committer can easily convince themselves that your change is good.
If it can be automated, you could even turn this smoke test into an automated NixOS test which reviewers could simply run via Nix.
- Invite other users of the component to try it out and report their findings.
If a committer sees the testimonials of other users trying your change and it works as expected for them, that too can convince the committer of your PR's quality.
- Describe what you have done to test your PR.
If you can convince the committer that you have done sufficient quality assurance on your changes and they trust your report, this too can convince them of your PR's quality, albeit not as strongly as the methods above.
- Become a maintainer of the component.
This isn't something you can do on your first few PRs touching a component but listed maintainers generally receive more trust when it comes to changes to their maintained components and committers may opt to merge changes without deeper review when they see they're done by their respective maintainer.
Even if you adhere to all of these recommendations, it is still quite possible for your PR to be forgotten or abandoned by any given committer.
Please remain mindful of the fact that they are doing this on their own volition and unpaid in their free time and therefore [owe you nothing](https://mikemcquaid.com/open-source-maintainers-owe-you-nothing/).
Causing a stink in such a situation is a surefire way to get any other potential committer to not want to look at your PR either.
Ask them nicely whether they still intend to review your PR and find yourself another committer to look at your PR if not.
### How can I get a committer to look at my PR?
- Simply wait. Reviewers frequently browse open PRs and may happen to run across yours and take a look.
- Get non-committers to review/approve. Many committers filter open PRs for low-hanging fruit that are already been reviewed.
- [@-mention](https://github.blog/news-insights/mention-somebody-they-re-notified/) someone and ask them nicely
- Post in one of the channels made for this purpose if there has been no activity for at least one week
- The current "PRs ready for review" or "PRs already reviewed" threads in the [NixOS Discourse](https://discourse.nixos.org/c/dev/14) (of course choose the one that applies to your situation)
- The [Nixpkgs Review Requests Matrix room](https://matrix.to/#/#review-requests:nixos.org).
### CI failed or got stuck on my PR, what do I do?
First ensure that the failure is actually related to your change.
Sometimes, the CI system simply has a hiccup or the check was broken by someone else before you made your changes.
Read through the error message; it's usually quite easy to tell whether it is caused by anything you did by checking whether it mentions the component you touched anywhere.
If it is indeed caused by your change, obviously try to fix it.
Don't be afraid of asking for advice if you're uncertain how to do that, others have likely fixed such issues dozens of times and can help you out.
Your PR is unlikely to be merged if it has a known issue and it is the purpose of CI to alert you aswell as reviewers to these issues.
ofBorg builds can often get stuck, particularly in PRs targeting `staging` and in builders for the Darwin platform. Reviewers will know how to handle them or when to ignore them.
Don't worry about it.
If there is a build failure however and it happened due to a package related to your change, you need to investigate it of course.
If ofBorg reveals the build to be broken on some platform and you don't have access to that platform, you should set your package's `meta.broken` accordingly.
When in any doubt, please simply ask via a comment in your PR or through one of the help channels.
## I received a review on my PR, how do I get it over the finish line?
In the review process, the committer will have left some sort of feedback on your PR.
They may have immediately approved of your PR or even merged it but the more likely case is that they want you to change a few things or that they require further input.
A reviewer may have taken a look at the code and it looked good to them ("Diff LGTM") but they still need to be convinced of the artefact's quality.
They might also be waiting on input from other users of the component or its listed maintainer on whether the intention of your PR makes sense for the component.
If you know of people who could help clarify any of this, please bring the PR to their attention.
The current state of the PR is frequently not clearly communicated, so please don't hesitate to ask about it if it's unclear to you.
It's also possible for the reviewer to not be convinced that your PR is necessary or that the method you've chose to achieve your intention is the right one.
Please explain your intentions and reasoning to the committer in such a case.
There may be constraints you had to work with which they're not aware of or qualities of your approach that they didn't immediately notice.
(If these weren't clear to the reviewer, that's a good sign you should explain them in your commit message or code comments!)
There are some further pitfalls and realities which this section intends to make you aware of.
### A reviewer requested a bunch of insubstantial changes on my PR
The people involved in Nixpkgs care about code quality because, once in Nixpkgs, it needs to be maintained for many years to come.
It is therefore likely that other people will ask you to do some things in another way or adhere to some standard.
Sometimes however, they also care a bit too much and may ask you to adhere to a personal preference of theirs.
It's not always easy to tell which is which and whether the requests are critically important to merging the PR.
Sometimes another reviewer may also come along with totally different opinions on some points too.
It is convention to mark review comments that are not critical to the PR as nitpicks but this is not always followed.
As the PR author, you should still take a look at these as they will often reveal best practices and unwritten rules that usually have good reasons behind them and you may want to incorporate them into your modus operandi.
Please keep in mind that reviewers almost always mean well here.
Their intent is not to denounce your code, they simply want your code to be as good as it can be.
Through their experience, they may also take notice of a seemingly insignificant issues that have caused significant burden before.
Sometimes however, they can also get a bit carried away and become too perfectionistic.
If you feel some of the requests are unreasonable or merely a matter of personal preference, try to nicely remind the reviewers that you may not intend this code to be 100% perfect or that you have different taste in some regards and press them on whether they think that these requests are *critical* to the PR's success.
While we do have a set of [official standards for the Nix community](https://github.com/NixOS/rfcs/), we don't have standards for everything and there are often multiple valid ways to achieve the same goal.
Unless there are standards forbidding the patterns used in your code or there are serious technical, maintainability or readability issues with your code, you can insist to keep the code the way you made it and disregard the requests.
Please communicate this clearly though; a simple "I prefer it this way and see no major issue with it" can save you a lot of arguing.
If you are unsure about some change requests, please ask reviewers *why* they requested them.
This will usually reveal how important they deem it to be and will help educate you about standards, best practices, unwritten rules aswell as preferences people have and why.
Some committers may have stronger opinions on some things and therefore (understandably) may not want to merge your PR if you don't follow their requests.
It is totally fine to get yourself a second or third opinion in such a case.
### Committers work on a push-basis
It's possible for you to get a review but nothing happens afterwards, even if you reply to review comments.
A committer not following up on your PR does not necessarily mean they're disinterested or unresponsive, they may have simply forgotten to follow up on it or had some other circumstances preventing them from doing so.
Committers typically handle many other PRs besides yours and it is not realistic for them to keep up with all of them to a degree where they could reasonably remember to follow up on all PRs that they had intended following up upon.
If someone left an approving review on your PR and didn't merge a few days later, the most likely case is that they simply forgot.
Please see it as your responsibility to actively remind reviewers of your open PRs.
The easiest way to do so is to simply cause them a Github notification.
Github notifies people involved in the PR when you add a comment to your PR, push your PR or re-request their review.
Doing any of that will get you people's attention again.
It may very well be the case that you have to do this every time you need the committer to follow up upon your PR.
Again, this is a community project so please be mindful of people's circumstances here; be nice when requesting reviews again.
It may also be the case that the committer has lost interest or isn't familiar enough with the component you're touching to be comfortable merging your PR.
They will likely not immediately state that fact however, so please ask for clarification and don't hesitate to find yourself another committer to take a look at your PR.
### Nothing helped
If you followed these guidelines but still got no results or if you feel that you have been wronged in some way, please explicitly reach out to the greater community via its communication channels.
The [NixOS Discourse](https://discourse.nixos.org/) is a great place to do this as it has historically been the asynchronous medium with the greatest concentration of committers and other people who are significantly involved in Nixpkgs.
There is a dedicated discourse thread [PRs in distress](https://discourse.nixos.org/t/prs-in-distress/3604) where you can link your PR if everything else fails.
The [Nixpkgs / NixOS contributions Matrix channel](https://matrix.to/#/#dev:nixos.org) is the best synchronous channel with the same qualities.
Please reserve these for cases where you've made a serious effort in trying to get the attention of multiple active committers and provided realistic means for them to assess your PR's quality though.
As mentioned previously, it is unfortunately perfectly normal for a PR to sit around for weeks on end due to the realities of this being a community project.
Please don't blow up situations where progress is happening but is merely not going fast enough for your tastes.
Honking in a traffic jam will not make you go any faster.

407
third_party/nixpkgs/ci/OWNERS vendored Normal file
View file

@ -0,0 +1,407 @@
# This file is used to describe who owns what in this repository.
# Users/teams will get review requests for PRs that change their files.
#
# This file does not replace `meta.maintainers`
# but is instead used for other things than derivations and modules,
# like documentation, package sets, and other assets.
#
# This file uses the same syntax as the natively supported CODEOWNERS file,
# see https://help.github.com/articles/about-codeowners/ for documentation.
# However it comes with some notable differences:
# - There is no need for user/team listed here to have write access.
# - No reviews will be requested for PRs that target the wrong base branch.
#
# Processing of this file is implemented in workflows/codeowners.yml
# CI
/.github/workflows @NixOS/Security @Mic92 @zowoq
/.github/workflows/check-nix-format.yml @infinisil
/.github/workflows/nixpkgs-vet.yml @infinisil @philiptaron
/.github/workflows/codeowners.yml @infinisil
/ci/OWNERS @infinisil
/ci @infinisil @philiptaron @NixOS/Security
# Development support
/.editorconfig @Mic92 @zowoq
/shell.nix @infinisil @NixOS/Security
# Libraries
/lib @infinisil
/lib/systems @alyssais @ericson2314
/lib/generators.nix @infinisil @Profpatsch
/lib/cli.nix @infinisil @Profpatsch
/lib/debug.nix @infinisil @Profpatsch
/lib/asserts.nix @infinisil @Profpatsch
/lib/path/* @infinisil
/lib/fileset @infinisil
## Libraries / Module system
/lib/modules.nix @infinisil @roberth
/lib/types.nix @infinisil @roberth
/lib/options.nix @infinisil @roberth
/lib/tests/modules.sh @infinisil @roberth
/lib/tests/modules @infinisil @roberth
# Nixpkgs Internals
/default.nix @Ericson2314
/pkgs/top-level/default.nix @Ericson2314
/pkgs/top-level/impure.nix @Ericson2314
/pkgs/top-level/stage.nix @Ericson2314
/pkgs/top-level/splice.nix @Ericson2314
/pkgs/top-level/release-cross.nix @Ericson2314
/pkgs/top-level/by-name-overlay.nix @infinisil @philiptaron
/pkgs/stdenv @philiptaron
/pkgs/stdenv/generic @Ericson2314
/pkgs/stdenv/generic/check-meta.nix @Ericson2314
/pkgs/stdenv/cross @Ericson2314
/pkgs/build-support @philiptaron
/pkgs/build-support/cc-wrapper @Ericson2314
/pkgs/build-support/bintools-wrapper @Ericson2314
/pkgs/build-support/setup-hooks @Ericson2314
/pkgs/build-support/setup-hooks/auto-patchelf.sh @layus
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/pkgs-lib @infinisil
## Format generators/serializers
/pkgs/pkgs-lib/formats/libconfig @h7x4
/pkgs/pkgs-lib/formats/hocon @h7x4
# Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch
# Nixpkgs make-disk-image
/doc/build-helpers/images/makediskimage.section.md @raitobezarius
/nixos/lib/make-disk-image.nix @raitobezarius
# Nix, the package manager
# @raitobezarius is not "code owner", but is listed here to be notified of changes
# pertaining to the Nix package manager.
# i.e. no authority over those files.
pkgs/tools/package-management/nix/ @NixOS/nix-team @raitobezarius
nixos/modules/installer/tools/nix-fallback-paths.nix @NixOS/nix-team @raitobezarius
# Nixpkgs documentation
/maintainers/scripts/db-to-md.sh @jtojnar @ryantm
/maintainers/scripts/doc @jtojnar @ryantm
# Contributor documentation
/CONTRIBUTING.md @infinisil
/.github/PULL_REQUEST_TEMPLATE.md @infinisil
/doc/contributing/ @infinisil
/doc/contributing/contributing-to-documentation.chapter.md @jtojnar @infinisil
/lib/README.md @infinisil
/doc/README.md @infinisil
/nixos/README.md @infinisil
/pkgs/README.md @infinisil
/pkgs/by-name/README.md @infinisil
/maintainers/README.md @infinisil
# User-facing development documentation
/doc/development.md @infinisil
/doc/development @infinisil
# NixOS Internals
/nixos/default.nix @infinisil
/nixos/lib/from-env.nix @infinisil
/nixos/lib/eval-config.nix @infinisil
/nixos/modules/system/activation/bootspec.nix @grahamc @cole-h @raitobezarius
/nixos/modules/system/activation/bootspec.cue @grahamc @cole-h @raitobezarius
# NixOS integration test driver
/nixos/lib/test-driver @tfc
# NixOS QEMU virtualisation
/nixos/modules/virtualisation/qemu-vm.nix @raitobezarius
# ACME
/nixos/modules/security/acme @NixOS/acme
# Systemd
/nixos/modules/system/boot/systemd.nix @NixOS/systemd
/nixos/modules/system/boot/systemd @NixOS/systemd
/nixos/lib/systemd-*.nix @NixOS/systemd
/pkgs/os-specific/linux/systemd @NixOS/systemd
# Systemd-boot
/nixos/modules/system/boot/loader/systemd-boot @JulienMalka
# Images and installer media
/nixos/modules/profiles/installation-device.nix @ElvishJerricco
/nixos/modules/installer/cd-dvd/ @ElvishJerricco
/nixos/modules/installer/sd-card/
# Amazon
/nixos/modules/virtualisation/amazon-init.nix @arianvp
/nixos/modules/virtualisation/ec2-data.nix @arianvp
/nixos/modules/virtualisation/amazon-options.nix @arianvp
/nixos/modules/virtualisation/amazon-image.nix @arianvp
/nixos/maintainers/scripts/ec2/ @arianvp
/nixos/modules/services/misc/amazon-ssm-agent.nix @arianvp
/nixos/tests/amazon-ssm-agent.nix @arianvp
/nixos/modules/system/boot/grow-partition.nix @arianvp
# Updaters
## update.nix
/maintainers/scripts/update.nix @jtojnar
/maintainers/scripts/update.py @jtojnar
## common-updater-scripts
/pkgs/common-updater/scripts/update-source-version @jtojnar
# Python-related code and docs
/doc/languages-frameworks/python.section.md @mweinelt @natsukium
/maintainers/scripts/update-python-libraries @natsukium
/pkgs/development/interpreters/python @natsukium
/pkgs/top-level/python-packages.nix @natsukium
/pkgs/top-level/release-python.nix @natsukium
# Haskell
/doc/languages-frameworks/haskell.section.md @sternenseemann @maralorn
/maintainers/scripts/haskell @sternenseemann @maralorn
/pkgs/development/compilers/ghc @sternenseemann @maralorn
/pkgs/development/haskell-modules @sternenseemann @maralorn
/pkgs/test/haskell @sternenseemann @maralorn
/pkgs/top-level/release-haskell.nix @sternenseemann @maralorn
/pkgs/top-level/haskell-packages.nix @sternenseemann @maralorn
# Perl
/pkgs/development/interpreters/perl @stigtsp @zakame @marcusramberg
/pkgs/top-level/perl-packages.nix @stigtsp @zakame @marcusramberg
/pkgs/development/perl-modules @stigtsp @zakame @marcusramberg
# R
/pkgs/applications/science/math/R @jbedo
/pkgs/development/r-modules @jbedo
# Rust
/pkgs/development/compilers/rust @Mic92 @zowoq @winterqt @figsoda
/pkgs/build-support/rust @zowoq @winterqt @figsoda
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
# C compilers
/pkgs/development/compilers/gcc
/pkgs/development/compilers/llvm @alyssais @RossComputerGuy
/pkgs/development/compilers/emscripten @raitobezarius
/doc/languages-frameworks/emscripten.section.md @raitobezarius
# Audio
/nixos/modules/services/audio/botamusique.nix @mweinelt
/nixos/modules/services/audio/snapserver.nix @mweinelt
/nixos/tests/botamusique.nix @mweinelt
/nixos/tests/snapcast.nix @mweinelt
# Browsers
/pkgs/applications/networking/browsers/firefox @mweinelt
/pkgs/applications/networking/browsers/chromium @emilylange
/nixos/tests/chromium.nix @emilylange
# Certificate Authorities
pkgs/data/misc/cacert/ @ajs124 @lukegb @mweinelt
pkgs/development/libraries/nss/ @ajs124 @lukegb @mweinelt
pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# Jetbrains
/pkgs/applications/editors/jetbrains @edwtjo
# Licenses
/lib/licenses.nix @alyssais
# Qt
/pkgs/development/libraries/qt-5 @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/development/libraries/qt-6 @K900 @NickCao @SuperSandro2000 @ttuegel
# KDE / Plasma 5
/pkgs/applications/kde @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/desktops/plasma-5 @K900 @NickCao @SuperSandro2000 @ttuegel
/pkgs/development/libraries/kde-frameworks @K900 @NickCao @SuperSandro2000 @ttuegel
# KDE / Plasma 6
/pkgs/kde @K900 @NickCao @SuperSandro2000 @ttuegel
/maintainers/scripts/kde @K900 @NickCao @SuperSandro2000 @ttuegel
# PostgreSQL and related stuff
/pkgs/servers/sql/postgresql @thoughtpolice
/nixos/modules/services/databases/postgresql.md @thoughtpolice
/nixos/modules/services/databases/postgresql.nix @thoughtpolice
/nixos/tests/postgresql.nix @thoughtpolice
# Hardened profile & related modules
/nixos/modules/profiles/hardened.nix @joachifm
/nixos/modules/security/lock-kernel-modules.nix @joachifm
/nixos/modules/security/misc.nix @joachifm
/nixos/tests/hardened.nix @joachifm
/pkgs/os-specific/linux/kernel/hardened/config.nix @joachifm
# Home Automation
/nixos/modules/services/home-automation/home-assistant.nix @mweinelt
/nixos/modules/services/home-automation/zigbee2mqtt.nix @mweinelt
/nixos/tests/home-assistant.nix @mweinelt
/nixos/tests/zigbee2mqtt.nix @mweinelt
/pkgs/servers/home-assistant @mweinelt
/pkgs/tools/misc/esphome @mweinelt
# Network Time Daemons
/pkgs/by-name/ch/chrony @thoughtpolice
/pkgs/tools/networking/ntp @thoughtpolice
/pkgs/tools/networking/openntpd @thoughtpolice
/nixos/modules/services/networking/ntp @thoughtpolice
# Network
/pkgs/tools/networking/kea/default.nix @mweinelt
/pkgs/tools/networking/babeld/default.nix @mweinelt
/nixos/modules/services/networking/babeld.nix @mweinelt
/nixos/modules/services/networking/kea.nix @mweinelt
/nixos/modules/services/networking/knot.nix @mweinelt
/nixos/modules/services/monitoring/prometheus/exporters/kea.nix @mweinelt
/nixos/tests/babeld.nix @mweinelt
/nixos/tests/kea.nix @mweinelt
/nixos/tests/knot.nix @mweinelt
# Web servers
/doc/packages/nginx.section.md @raitobezarius
/pkgs/servers/http/nginx/ @raitobezarius
/nixos/modules/services/web-servers/nginx/ @raitobezarius
# Dhall
/pkgs/development/dhall-modules @Gabriella439 @Profpatsch @ehmry
/pkgs/development/interpreters/dhall @Gabriella439 @Profpatsch @ehmry
# Idris
/pkgs/development/idris-modules @Infinisil
/pkgs/development/compilers/idris2 @mattpolzin
# Bazel
/pkgs/development/tools/build-managers/bazel @Profpatsch
# NixOS modules for e-mail and dns services
/nixos/modules/services/mail/mailman.nix @peti
/nixos/modules/services/mail/postfix.nix @peti
/nixos/modules/services/networking/bind.nix @peti
/nixos/modules/services/mail/rspamd.nix @peti
# Emacs
/pkgs/applications/editors/emacs/elisp-packages @NixOS/emacs
/pkgs/applications/editors/emacs @NixOS/emacs
/pkgs/top-level/emacs-packages.nix @NixOS/emacs
/doc/packages/emacs.section.md @NixOS/emacs
/nixos/modules/services/editors/emacs.md @NixOS/emacs
# Kakoune
/pkgs/applications/editors/kakoune @philiptaron
# Neovim
/pkgs/applications/editors/neovim @figsoda @teto
# VimPlugins
/pkgs/applications/editors/vim/plugins @figsoda
# VsCode Extensions
/pkgs/applications/editors/vscode/extensions
# PHP interpreter, packages, extensions, tests and documentation
/doc/languages-frameworks/php.section.md @aanderse @drupol @globin @ma27 @talyz
/nixos/tests/php @aanderse @drupol @globin @ma27 @talyz
/pkgs/build-support/php/build-pecl.nix @aanderse @drupol @globin @ma27 @talyz
/pkgs/build-support/php @drupol
/pkgs/development/interpreters/php @jtojnar @aanderse @drupol @globin @ma27 @talyz
/pkgs/development/php-packages @aanderse @drupol @globin @ma27 @talyz
/pkgs/top-level/php-packages.nix @jtojnar @aanderse @drupol @globin @ma27 @talyz
# Docker tools
/pkgs/build-support/docker @roberth
/nixos/tests/docker-tools* @roberth
/doc/build-helpers/images/dockertools.section.md @roberth
# Blockchains
/pkgs/applications/blockchains @mmahut @RaghavSood
# Go
/doc/languages-frameworks/go.section.md @kalbasit @katexochen @Mic92 @zowoq
/pkgs/build-support/go @kalbasit @katexochen @Mic92 @zowoq
/pkgs/development/compilers/go @kalbasit @katexochen @Mic92 @zowoq
# GNOME
/pkgs/desktops/gnome @jtojnar
/pkgs/desktops/gnome/extensions @jtojnar
/pkgs/build-support/make-hardcode-gsettings-patch @jtojnar
# Cinnamon
/pkgs/by-name/ci/cinnamon-* @mkg20001
/pkgs/by-name/cj/cjs @mkg20001
/pkgs/by-name/mu/muffin @mkg20001
/pkgs/by-name/ne/nemo @mkg20001
/pkgs/by-name/ne/nemo-* @mkg20001
# nim
/pkgs/development/compilers/nim @ehmry
# terraform providers
/pkgs/applications/networking/cluster/terraform-providers @zowoq
# Forgejo
nixos/modules/services/misc/forgejo.nix @adamcstephens @bendlas @emilylange
pkgs/by-name/fo/forgejo/ @adamcstephens @bendlas @emilylange
# Dotnet
/pkgs/build-support/dotnet @corngood
/pkgs/development/compilers/dotnet @corngood
/pkgs/test/dotnet @corngood
/doc/languages-frameworks/dotnet.section.md @corngood
# Node.js
/pkgs/build-support/node/build-npm-package @winterqt
/pkgs/build-support/node/fetch-npm-deps @winterqt
/doc/languages-frameworks/javascript.section.md @winterqt
# OCaml
/pkgs/build-support/ocaml @ulrikstrid
/pkgs/development/compilers/ocaml @ulrikstrid
/pkgs/development/ocaml-modules @ulrikstrid
# ZFS
pkgs/os-specific/linux/zfs/2_1.nix @raitobezarius
pkgs/os-specific/linux/zfs/generic.nix @raitobezarius
nixos/modules/tasks/filesystems/zfs.nix @raitobezarius
nixos/tests/zfs.nix @raitobezarius
# Zig
/pkgs/development/compilers/zig @figsoda
/doc/hooks/zig.section.md @figsoda
# Buildbot
nixos/modules/services/continuous-integration/buildbot @Mic92 @zowoq
nixos/tests/buildbot.nix @Mic92 @zowoq
pkgs/development/tools/continuous-integration/buildbot @Mic92 @zowoq
# Pretix
pkgs/by-name/pr/pretix/ @mweinelt
pkgs/by-name/pr/pretalx/ @mweinelt
nixos/modules/services/web-apps/pretix.nix @mweinelt
nixos/modules/services/web-apps/pretalx.nix @mweinelt
nixos/tests/web-apps/pretix.nix @mweinelt
nixos/tests/web-apps/pretalx.nix @mweinelt
# incus/lxc
nixos/maintainers/scripts/incus/ @adamcstephens
nixos/modules/virtualisation/incus.nix @adamcstephens
nixos/modules/virtualisation/lxc* @adamcstephens
nixos/tests/incus/ @adamcstephens
pkgs/by-name/in/incus/ @adamcstephens
pkgs/by-name/lx/lxc* @adamcstephens
# ExpidusOS, Flutter
/pkgs/development/compilers/flutter @RossComputerGuy
/pkgs/desktops/expidus @RossComputerGuy
# GNU Tar & Zip
/pkgs/tools/archivers/gnutar @RossComputerGuy
/pkgs/tools/archivers/zip @RossComputerGuy
# SELinux
/pkgs/os-specific/linux/checkpolicy @RossComputerGuy
/pkgs/os-specific/linux/libselinux @RossComputerGuy
/pkgs/os-specific/linux/libsepol @RossComputerGuy
# installShellFiles
/pkgs/by-name/in/installShellFiles/* @Ericson2314
/pkgs/test/install-shell-files/* @Ericson2314
/doc/hooks/installShellFiles.section.md @Ericson2314

View file

@ -41,3 +41,58 @@ Why not just build the tooling right from the PRs Nixpkgs version?
- Because it improves security, since we don't have to build potentially untrusted code from PRs. - Because it improves security, since we don't have to build potentially untrusted code from PRs.
The tool only needs a very minimal Nix evaluation at runtime, which can work with [readonly-mode](https://nixos.org/manual/nix/stable/command-ref/opt-common.html#opt-readonly-mode) and [restrict-eval](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-restrict-eval). The tool only needs a very minimal Nix evaluation at runtime, which can work with [readonly-mode](https://nixos.org/manual/nix/stable/command-ref/opt-common.html#opt-readonly-mode) and [restrict-eval](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-restrict-eval).
## `get-merge-commit.sh GITHUB_REPO PR_NUMBER`
Check whether a PR is mergeable and return the test merge commit as
[computed by GitHub](https://docs.github.com/en/rest/guides/using-the-rest-api-to-interact-with-your-git-database?apiVersion=2022-11-28#checking-mergeability-of-pull-requests).
Arguments:
- `GITHUB_REPO`: The repository of the PR, e.g. `NixOS/nixpkgs`
- `PR_NUMBER`: The PR number, e.g. `1234`
Exit codes:
- 0: The PR can be merged, the test merge commit hash is returned on stdout
- 1: The PR cannot be merged because it's not open anymore
- 2: The PR cannot be merged because it has a merge conflict
- 3: The merge commit isn't being computed, GitHub is likely having internal issues, unknown if the PR is mergeable
### Usage
This script can be used in GitHub Actions workflows as follows:
```yaml
on: pull_request_target
# We need a token to query the API, but it doesn't need any special permissions
permissions: {}
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
# Important: Because of `pull_request_target`, this doesn't check out the PR,
# but rather the base branch of the PR, which is needed so we don't run untrusted code
- uses: actions/checkout@<VERSION>
with:
path: base
sparse-checkout: ci
- name: Resolving the merge commit
env:
GH_TOKEN: ${{ github.token }}
run: |
if mergedSha=$(base/ci/get-merge-commit.sh ${{ github.repository }} ${{ github.event.number }}); then
echo "Checking the merge commit $mergedSha"
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
else
# Skipping so that no notifications are sent
echo "Skipping the rest..."
fi
rm -rf base
- uses: actions/checkout@<VERSION>
# Add this to _all_ subsequent steps to skip them
if: env.mergedSha
with:
ref: ${{ env.mergedSha }}
- ...
```

62
third_party/nixpkgs/ci/get-merge-commit.sh vendored Executable file
View file

@ -0,0 +1,62 @@
#!/usr/bin/env bash
# See ./README.md for docs
set -euo pipefail
log() {
echo "$@" >&2
}
if (( $# < 2 )); then
log "Usage: $0 GITHUB_REPO PR_NUMBER"
exit 99
fi
repo=$1
prNumber=$2
# Retry the API query this many times
retryCount=5
# Start with 5 seconds, but double every retry
retryInterval=5
while true; do
log "Checking whether the pull request can be merged"
prInfo=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/$repo/pulls/$prNumber")
# Non-open PRs won't have their mergeability computed no matter what
state=$(jq -r .state <<< "$prInfo")
if [[ "$state" != open ]]; then
log "PR is not open anymore"
exit 1
fi
mergeable=$(jq -r .mergeable <<< "$prInfo")
if [[ "$mergeable" == "null" ]]; then
if (( retryCount == 0 )); then
log "Not retrying anymore. It's likely that GitHub is having internal issues: check https://www.githubstatus.com/"
exit 3
else
(( retryCount -= 1 )) || true
# null indicates that GitHub is still computing whether it's mergeable
# Wait a couple seconds before trying again
log "GitHub is still computing whether this PR can be merged, waiting $retryInterval seconds before trying again ($retryCount retries left)"
sleep "$retryInterval"
(( retryInterval *= 2 )) || true
fi
else
break
fi
done
if [[ "$mergeable" == "true" ]]; then
log "The PR can be merged"
jq -r .merge_commit_sha <<< "$prInfo"
else
log "The PR has a merge conflict"
exit 2
fi

View file

@ -10,16 +10,18 @@ log() {
echo "$@" >&2 echo "$@" >&2
} }
if (( "$#" < 5 )); then if (( "$#" < 7 )); then
log "Usage: $0 GIT_REPO BASE_REF HEAD_REF OWNERS_FILE PR_AUTHOR" log "Usage: $0 GIT_REPO OWNERS_FILE BASE_REPO BASE_REF HEAD_REF PR_NUMBER PR_AUTHOR"
exit 1 exit 1
fi fi
gitRepo=$1 gitRepo=$1
baseRef=$2 ownersFile=$2
headRef=$3 baseRepo=$3
ownersFile=$4 baseRef=$4
prAuthor=$5 headRef=$5
prNumber=$6
prAuthor=$7
tmp=$(mktemp -d) tmp=$(mktemp -d)
trap 'rm -rf "$tmp"' exit trap 'rm -rf "$tmp"' exit
@ -32,8 +34,9 @@ log "This PR touches ${#touchedFiles[@]} files"
# remove code owners to avoid pinging them # remove code owners to avoid pinging them
git -C "$gitRepo" show "$baseRef":"$ownersFile" > "$tmp"/codeowners git -C "$gitRepo" show "$baseRef":"$ownersFile" > "$tmp"/codeowners
# Associative arrays with the team/user as the key for easy deduplication # Associative array with the user as the key for easy de-duplication
declare -A teams users # Make sure to always lowercase keys to avoid duplicates with different casings
declare -A users=()
for file in "${touchedFiles[@]}"; do for file in "${touchedFiles[@]}"; do
result=$(codeowners --file "$tmp"/codeowners "$file") result=$(codeowners --file "$tmp"/codeowners "$file")
@ -59,29 +62,65 @@ for file in "${touchedFiles[@]}"; do
fi fi
# The first regex match is everything after the @ # The first regex match is everything after the @
entry=${BASH_REMATCH[1]} entry=${BASH_REMATCH[1]}
if [[ "$entry" =~ .*/(.*) ]]; then
# Teams look like $org/$team, where we only need $team for the API if [[ "$entry" =~ (.*)/(.*) ]]; then
# call to request reviews from teams # Teams look like $org/$team
teams[${BASH_REMATCH[1]}]= org=${BASH_REMATCH[1]}
team=${BASH_REMATCH[2]}
# Instead of requesting a review from the team itself,
# we request reviews from the individual users.
# This is because once somebody from a team reviewed the PR,
# the API doesn't expose that the team was already requested for a review,
# so we wouldn't be able to avoid rerequesting reviews
# without saving some some extra state somewhere
# We could also consider implementing a more advanced heuristic
# in the future that e.g. only pings one team member,
# but escalates to somebody else if that member doesn't respond in time.
gh api \
--cache=1h \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/orgs/$org/teams/$team/members" \
--jq '.[].login' > "$tmp/team-members"
readarray -t members < "$tmp/team-members"
log "Team $entry has these members: ${members[*]}"
for user in "${members[@]}"; do
users[${user,,}]=
done
else else
# Everything else is a user # Everything else is a user
users[$entry]= users[${entry,,}]=
fi fi
done done
done done
# Cannot request a review from the author # Cannot request a review from the author
if [[ -v users[$prAuthor] ]]; then if [[ -v users[${prAuthor,,}] ]]; then
log "One or more files are owned by the PR author, ignoring" log "One or more files are owned by the PR author, ignoring"
unset 'users[$prAuthor]' unset 'users[${prAuthor,,}]'
fi fi
gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
"/repos/$baseRepo/pulls/$prNumber/reviews" \
--jq '.[].user.login' > "$tmp/already-reviewed-by"
# And we don't want to rerequest reviews from people who already reviewed
while read -r user; do
if [[ -v users[${user,,}] ]]; then
log "User $user is a code owner but has already left a review, ignoring"
unset 'users[${user,,}]'
fi
done < "$tmp/already-reviewed-by"
# Turn it into a JSON for the GitHub API call to request PR reviewers # Turn it into a JSON for the GitHub API call to request PR reviewers
jq -n \ jq -n \
--arg users "${!users[*]}" \ --arg users "${!users[*]}" \
--arg teams "${!teams[*]}" \
'{ '{
reviewers: $users | split(" "), reviewers: $users | split(" "),
team_reviewers: $teams | split(" ")
}' }'

View file

@ -60,10 +60,8 @@ git -C "$tmp/nixpkgs.git" remote add fork https://github.com/"$prRepo".git
git -C "$tmp/nixpkgs.git" config remote.fork.partialclonefilter tree:0 git -C "$tmp/nixpkgs.git" config remote.fork.partialclonefilter tree:0
git -C "$tmp/nixpkgs.git" config remote.fork.promisor true git -C "$tmp/nixpkgs.git" config remote.fork.promisor true
# This should not conflict with any refs in Nixpkgs git -C "$tmp/nixpkgs.git" fetch --no-tags fork "$prBranch"
headRef=refs/remotes/fork/pr headRef=$(git -C "$tmp/nixpkgs.git" rev-parse refs/remotes/fork/"$prBranch")
# Only fetch into a remote ref, because the local ref namespace is used by Nixpkgs, don't want any conflicts
git -C "$tmp/nixpkgs.git" fetch --no-tags fork "$prBranch":"$headRef"
log "Checking correctness of the base branch" log "Checking correctness of the base branch"
if ! "$SCRIPT_DIR"/verify-base-branch.sh "$tmp/nixpkgs.git" "$headRef" "$baseRepo" "$baseBranch" "$prRepo" "$prBranch" | tee "$tmp/invalid-base-error" >&2; then if ! "$SCRIPT_DIR"/verify-base-branch.sh "$tmp/nixpkgs.git" "$headRef" "$baseRepo" "$baseBranch" "$prRepo" "$prBranch" | tee "$tmp/invalid-base-error" >&2; then
@ -80,7 +78,7 @@ if ! "$SCRIPT_DIR"/verify-base-branch.sh "$tmp/nixpkgs.git" "$headRef" "$baseRep
fi fi
log "Getting code owners to request reviews from" log "Getting code owners to request reviews from"
"$SCRIPT_DIR"/get-reviewers.sh "$tmp/nixpkgs.git" "$baseBranch" "$headRef" "$ownersFile" "$prAuthor" > "$tmp/reviewers.json" "$SCRIPT_DIR"/get-reviewers.sh "$tmp/nixpkgs.git" "$ownersFile" "$baseRepo" "$baseBranch" "$headRef" "$prNumber" "$prAuthor" > "$tmp/reviewers.json"
log "Requesting reviews from: $(<"$tmp/reviewers.json")" log "Requesting reviews from: $(<"$tmp/reviewers.json")"

View file

@ -90,8 +90,9 @@ for testBranch in "${devBranches[@]}"; do
log -e "\e[33m" log -e "\e[33m"
echo "The PR's base branch is set to $baseBranch, but $extraCommits commits from the $testBranch branch are included. Make sure you know the [right base branch for your changes](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#branch-conventions), then:" echo "The PR's base branch is set to $baseBranch, but $extraCommits commits from the $testBranch branch are included. Make sure you know the [right base branch for your changes](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#branch-conventions), then:"
echo "- If the changes should go to the $testBranch branch, [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) to $testBranch" echo "- If the changes should go to the $testBranch branch, [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) to $testBranch"
echo "- If the changes should go to the $baseBranch branch, rebase your PR onto the merge base with the $testBranch branch:" echo "- If the changes should go to the $baseBranch branch, rebase your PR onto the merge base with the $baseBranch branch:"
echo " \`\`\`" echo " \`\`\`bash"
echo " # git rebase --onto \$(git merge-base upstream/$baseBranch HEAD) \$(git merge-base upstream/$testBranch HEAD)"
echo " git rebase --onto $prMergeBase $testMergeBase" echo " git rebase --onto $prMergeBase $testMergeBase"
echo " git push --force-with-lease" echo " git push --force-with-lease"
echo " \`\`\`" echo " \`\`\`"

View file

@ -176,126 +176,138 @@ When needed, each convention explain why it exists, so you can make a decision w
Note that these conventions are about the **structure** of the manual (and its source files), not about the content that goes in it. Note that these conventions are about the **structure** of the manual (and its source files), not about the content that goes in it.
You, as the writer of documentation, are still in charge of its content. You, as the writer of documentation, are still in charge of its content.
- Put each sentence in its own line. ### One sentence per line
This makes reviews and suggestions much easier, since GitHub's review system is based on lines.
It also helps identifying long sentences at a glance.
- Use the [admonition syntax](#admonitions) for callouts and examples. Put each sentence in its own line.
This makes reviews and suggestions much easier, since GitHub's review system is based on lines.
It also helps identifying long sentences at a glance.
- Provide at least one example per function, and make examples self-contained. ### Callouts and examples
This is easier to understand for beginners.
It also helps with testing that it actually works especially once we introduce automation.
Example code should be such that it can be passed to `pkgs.callPackage`. Use the [admonition syntax](#admonitions) for callouts and examples.
Instead of something like:
```nix ### Provide self-contained examples
pkgs.dockerTools.buildLayeredImage {
name = "hello";
contents = [ pkgs.hello ];
}
```
Write something like: Provide at least one example per function, and make examples self-contained.
This is easier to understand for beginners.
It also helps with testing that it actually works especially once we introduce automation.
```nix Example code should be such that it can be passed to `pkgs.callPackage`.
{ dockerTools, hello }: Instead of something like:
dockerTools.buildLayeredImage {
name = "hello";
contents = [ hello ];
}
```
- When showing inputs/outputs of any [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop), such as a shell or the Nix REPL, use a format as you'd see in the REPL, while trying to visually separate inputs from outputs. ```nix
This means that for a shell, you should use a format like the following: pkgs.dockerTools.buildLayeredImage {
```shell name = "hello";
$ nix-build -A hello '<nixpkgs>' \ contents = [ pkgs.hello ];
--option require-sigs false \ }
--option trusted-substituters file:///tmp/hello-cache \ ```
--option substituters file:///tmp/hello-cache
/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
```
Note how the input is preceded by `$` on the first line and indented on subsequent lines, and how the output is provided as you'd see on the shell.
For the Nix REPL, you should use a format like the following: Write something like:
```shell
nix-repl> builtins.attrNames { a = 1; b = 2; }
[ "a" "b" ]
```
Note how the input is preceded by `nix-repl>` and the output is provided as you'd see on the Nix REPL.
- When documenting functions or anything that has inputs/outputs and example usage, use nested headings to clearly separate inputs, outputs, and examples. ```nix
Keep examples as the last nested heading, and link to the examples wherever applicable in the documentation. { dockerTools, hello }:
dockerTools.buildLayeredImage {
name = "hello";
contents = [ hello ];
}
```
The purpose of this convention is to provide a familiar structure for navigating the manual, so any reader can expect to find content related to inputs in an "inputs" heading, examples in an "examples" heading, and so on. ### REPLs
An example:
```
## buildImage
Some explanation about the function here. When showing inputs/outputs of any [REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop), such as a shell or the Nix REPL, use a format as you'd see in the REPL, while trying to visually separate inputs from outputs.
Describe a particular scenario, and point to [](#ex-dockerTools-buildImage), which is an example demonstrating it. This means that for a shell, you should use a format like the following:
```shell
$ nix-build -A hello '<nixpkgs>' \
--option require-sigs false \
--option trusted-substituters file:///tmp/hello-cache \
--option substituters file:///tmp/hello-cache
/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
```
Note how the input is preceded by `$` on the first line and indented on subsequent lines, and how the output is provided as you'd see on the shell.
### Inputs For the Nix REPL, you should use a format like the following:
```shell
nix-repl> builtins.attrNames { a = 1; b = 2; }
[ "a" "b" ]
```
Note how the input is preceded by `nix-repl>` and the output is provided as you'd see on the Nix REPL.
Documentation for the inputs of `buildImage`. ### Headings for inputs, outputs and examples
Perhaps even point to [](#ex-dockerTools-buildImage) again when talking about something specifically linked to it.
### Passthru outputs When documenting functions or anything that has inputs/outputs and example usage, use nested headings to clearly separate inputs, outputs, and examples.
Keep examples as the last nested heading, and link to the examples wherever applicable in the documentation.
Documentation for any passthru outputs of `buildImage`. The purpose of this convention is to provide a familiar structure for navigating the manual, so any reader can expect to find content related to inputs in an "inputs" heading, examples in an "examples" heading, and so on.
An example:
```
## buildImage
### Examples Some explanation about the function here.
Describe a particular scenario, and point to [](#ex-dockerTools-buildImage), which is an example demonstrating it.
Note that this is the last nested heading in the `buildImage` section. ### Inputs
:::{.example #ex-dockerTools-buildImage} Documentation for the inputs of `buildImage`.
Perhaps even point to [](#ex-dockerTools-buildImage) again when talking about something specifically linked to it.
# Using `buildImage` ### Passthru outputs
Example of how to use `buildImage` goes here. Documentation for any passthru outputs of `buildImage`.
::: ### Examples
```
- Use [definition lists](#definition-lists) to document function arguments, and the attributes of such arguments as well as their [types](https://nixos.org/manual/nix/stable/language/values). Note that this is the last nested heading in the `buildImage` section.
For example:
```markdown :::{.example #ex-dockerTools-buildImage}
# pkgs.coolFunction {#pkgs.coolFunction}
`pkgs.coolFunction` *`name`* *`config`* # Using `buildImage`
Description of what `callPackage` does. Example of how to use `buildImage` goes here.
:::
```
### Function arguments
Use [definition lists](#definition-lists) to document function arguments, and the attributes of such arguments as well as their [types](https://nixos.org/manual/nix/stable/language/values).
For example:
```markdown
# pkgs.coolFunction {#pkgs.coolFunction}
`pkgs.coolFunction` *`name`* *`config`*
Description of what `callPackage` does.
## Inputs {#pkgs-coolFunction-inputs} ## Inputs {#pkgs-coolFunction-inputs}
If something's special about `coolFunction`'s general argument handling, you can say so here. If something's special about `coolFunction`'s general argument handling, you can say so here.
Otherwise, just describe the single argument or start the arguments' definition list without introduction. Otherwise, just describe the single argument or start the arguments' definition list without introduction.
*`name`* (String) *`name`* (String)
: The name of the resulting image. : The name of the resulting image.
*`config`* (Attribute set) *`config`* (Attribute set)
: Introduce the parameter. Maybe you have a test to make sure `{ }` is a sensible default; then you can say: these attributes are optional; `{ }` is a valid argument. : Introduce the parameter. Maybe you have a test to make sure `{ }` is a sensible default; then you can say: these attributes are optional; `{ }` is a valid argument.
`outputHash` (String; _optional_) `outputHash` (String; _optional_)
: A brief explanation including when and when not to pass this attribute. : A brief explanation including when and when not to pass this attribute.
: _Default:_ the output path's hash. : _Default:_ the output path's hash.
``` ```
Checklist: Checklist:
- Start with a synopsis, to show the order of positional arguments. - Start with a synopsis, to show the order of positional arguments.
- Metavariables are in emphasized code spans: ``` *`arg1`* ```. Metavariables are placeholders where users may write arbitrary expressions. This includes positional arguments. - Metavariables are in emphasized code spans: ``` *`arg1`* ```. Metavariables are placeholders where users may write arbitrary expressions. This includes positional arguments.
- Attribute names are regular code spans: ``` `attr1` ```. These identifiers can _not_ be picked freely by users, so they are _not_ metavariables. - Attribute names are regular code spans: ``` `attr1` ```. These identifiers can _not_ be picked freely by users, so they are _not_ metavariables.
- _optional_ attributes have a _`Default:`_ if it's easily described as a value. - _optional_ attributes have a _`Default:`_ if it's easily described as a value.
- _optional_ attributes have a _`Default behavior:`_ if it's not easily described using a value. - _optional_ attributes have a _`Default behavior:`_ if it's not easily described using a value.
- Nix types aren't in code spans, because they are not code - Nix types aren't in code spans, because they are not code
- Nix types are capitalized, to distinguish them from the camelCase Module System types, which _are_ code and behave like functions. - Nix types are capitalized, to distinguish them from the camelCase Module System types, which _are_ code and behave like functions.
#### Examples #### Examples

View file

@ -28,7 +28,7 @@ It does so in a clean environment (using `env --ignore-environment`), and it che
The variables that this phase control are: The variables that this phase control are:
- `dontVersionCheck`: Disable adding this hook to the [`preDistPhases`](#var-stdenv-preDist). Useful if you do want to load the bash functions of the hook, but run them differently. - `dontVersionCheck`: Disable adding this hook to the [`preInstallCheckHooks`](#ssec-installCheck-phase). Useful if you do want to load the bash functions of the hook, but run them differently.
- `versionCheckProgram`: The full path to the program that should print the `${version}` string. Defaults roughly to `${placeholder "out"}/bin/${pname}`. Using `$out` in the value of this variable won't work, as environment variables from this variable are not expanded by the hook. Hence using `placeholder` is unavoidable. - `versionCheckProgram`: The full path to the program that should print the `${version}` string. Defaults roughly to `${placeholder "out"}/bin/${pname}`. Using `$out` in the value of this variable won't work, as environment variables from this variable are not expanded by the hook. Hence using `placeholder` is unavoidable.
- `versionCheckProgramArg`: The argument that needs to be passed to `versionCheckProgram`. If undefined the hook tries first `--help` and then `--version`. Examples: `version`, `-V`, `-v`. - `versionCheckProgramArg`: The argument that needs to be passed to `versionCheckProgram`. If undefined the hook tries first `--help` and then `--version`. Examples: `version`, `-V`, `-v`.
- `preVersionCheck`: A hook to run before the check is done. - `preVersionCheck`: A hook to run before the check is done.

View file

@ -149,3 +149,104 @@ All new projects should use the CUDA redistributables available in [`cudaPackage
| Find libraries | `buildPhase` or `patchelf` | Missing dependency on a `lib` or `static` output | Add the missing dependency | The `lib` or `static` output typically contain the libraries | | Find libraries | `buildPhase` or `patchelf` | Missing dependency on a `lib` or `static` output | Add the missing dependency | The `lib` or `static` output typically contain the libraries |
In the scenario you are unable to run the resulting binary: this is arguably the most complicated as it could be any combination of the previous reasons. This type of failure typically occurs when a library attempts to load or open a library it depends on that it does not declare in its `DT_NEEDED` section. As a first step, ensure that dependencies are patched with [`autoAddDriverRunpath`](https://search.nixos.org/packages?channel=unstable&type=packages&query=autoAddDriverRunpath). Failing that, try running the application with [`nixGL`](https://github.com/guibou/nixGL) or a similar wrapper tool. If that works, it likely means that the application is attempting to load a library that is not in the `RPATH` or `RUNPATH` of the binary. In the scenario you are unable to run the resulting binary: this is arguably the most complicated as it could be any combination of the previous reasons. This type of failure typically occurs when a library attempts to load or open a library it depends on that it does not declare in its `DT_NEEDED` section. As a first step, ensure that dependencies are patched with [`autoAddDriverRunpath`](https://search.nixos.org/packages?channel=unstable&type=packages&query=autoAddDriverRunpath). Failing that, try running the application with [`nixGL`](https://github.com/guibou/nixGL) or a similar wrapper tool. If that works, it likely means that the application is attempting to load a library that is not in the `RPATH` or `RUNPATH` of the binary.
## Running Docker or Podman containers with CUDA support {#running-docker-or-podman-containers-with-cuda-support}
It is possible to run Docker or Podman containers with CUDA support. The recommended mechanism to perform this task is to use the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/index.html).
The NVIDIA Container Toolkit can be enabled in NixOS like follows:
```nix
{
hardware.nvidia-container-toolkit.enable = true;
}
```
This will automatically enable a service that generates a CDI specification (located at `/var/run/cdi/nvidia-container-toolkit.json`) based on the auto-detected hardware of your machine. You can check this service by running:
```ShellSession
$ systemctl status nvidia-container-toolkit-cdi-generator.service
```
::: {.note}
Depending on what settings you had already enabled in your system, you might need to restart your machine in order for the NVIDIA Container Toolkit to generate a valid CDI specification for your machine.
:::
Once that a valid CDI specification has been generated for your machine on boot time, both Podman and Docker (> 25) will use this spec if you provide them with the `--device` flag:
```ShellSession
$ podman run --rm -it --device=nvidia.com/gpu=all ubuntu:latest nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 4090 (UUID: <REDACTED>)
GPU 1: NVIDIA GeForce RTX 2080 SUPER (UUID: <REDACTED>)
```
```ShellSession
$ docker run --rm -it --device=nvidia.com/gpu=all ubuntu:latest nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 4090 (UUID: <REDACTED>)
GPU 1: NVIDIA GeForce RTX 2080 SUPER (UUID: <REDACTED>)
```
You can check all the identifiers that have been generated for your auto-detected hardware by checking the contents of the `/var/run/cdi/nvidia-container-toolkit.json` file:
```ShellSession
$ nix run nixpkgs#jq -- -r '.devices[].name' < /var/run/cdi/nvidia-container-toolkit.json
0
1
all
```
### Specifying what devices to expose to the container {#specifying-what-devices-to-expose-to-the-container}
You can choose what devices are exposed to your containers by using the identifier on the generated CDI specification. Like follows:
```ShellSession
$ podman run --rm -it --device=nvidia.com/gpu=0 ubuntu:latest nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 4090 (UUID: <REDACTED>)
```
You can repeat the `--device` argument as many times as necessary if you have multiple GPU's and you want to pick up which ones to expose to the container:
```ShellSession
$ podman run --rm -it --device=nvidia.com/gpu=0 --device=nvidia.com/gpu=1 ubuntu:latest nvidia-smi -L
GPU 0: NVIDIA GeForce RTX 4090 (UUID: <REDACTED>)
GPU 1: NVIDIA GeForce RTX 2080 SUPER (UUID: <REDACTED>)
```
::: {.note}
By default, the NVIDIA Container Toolkit will use the GPU index to identify specific devices. You can change the way to identify what devices to expose by using the `hardware.nvidia-container-toolkit.device-name-strategy` NixOS attribute.
:::
### Using docker-compose {#using-docker-compose}
It's possible to expose GPU's to a `docker-compose` environment as well. With a `docker-compose.yaml` file like follows:
```yaml
services:
some-service:
image: ubuntu:latest
command: sleep infinity
deploy:
resources:
reservations:
devices:
- driver: cdi
device_ids:
- nvidia.com/gpu=all
```
In the same manner, you can pick specific devices that will be exposed to the container:
```yaml
services:
some-service:
image: ubuntu:latest
command: sleep infinity
deploy:
resources:
reservations:
devices:
- driver: cdi
device_ids:
- nvidia.com/gpu=0
- nvidia.com/gpu=1
```

View file

@ -100,7 +100,7 @@ $ sudo launchctl kickstart -k system/org.nixos.nix-daemon
darwin-builder = nixpkgs.lib.nixosSystem { darwin-builder = nixpkgs.lib.nixosSystem {
system = linuxSystem; system = linuxSystem;
modules = [ modules = [
"${nixpkgs}/nixos/modules/profiles/macos-builder.nix" "${nixpkgs}/nixos/modules/profiles/nix-builder-vm.nix"
{ virtualisation = { { virtualisation = {
host.pkgs = pkgs; host.pkgs = pkgs;
darwin-builder.workingDirectory = "/var/lib/darwin-builder"; darwin-builder.workingDirectory = "/var/lib/darwin-builder";
@ -158,7 +158,7 @@ in the example below and rebuild.
darwin-builder = nixpkgs.lib.nixosSystem { darwin-builder = nixpkgs.lib.nixosSystem {
system = linuxSystem; system = linuxSystem;
modules = [ modules = [
"${nixpkgs}/nixos/modules/profiles/macos-builder.nix" "${nixpkgs}/nixos/modules/profiles/nix-builder-vm.nix"
{ {
virtualisation.host.pkgs = pkgs; virtualisation.host.pkgs = pkgs;
virtualisation.darwin-builder.diskSize = 5120; virtualisation.darwin-builder.diskSize = 5120;
@ -185,6 +185,6 @@ nix-repl> darwin.linux-builder.nixosConfig.nix.package
«derivation /nix/store/...-nix-2.17.0.drv» «derivation /nix/store/...-nix-2.17.0.drv»
nix-repl> :p darwin.linux-builder.nixosOptions.virtualisation.memorySize.definitionsWithLocations nix-repl> :p darwin.linux-builder.nixosOptions.virtualisation.memorySize.definitionsWithLocations
[ { file = "/home/user/src/nixpkgs/nixos/modules/profiles/macos-builder.nix"; value = 3072; } ] [ { file = "/home/user/src/nixpkgs/nixos/modules/profiles/nix-builder-vm.nix"; value = 3072; } ]
``` ```

View file

@ -79,7 +79,7 @@ Besides tests provided by upstream, that you run in the [`checkPhase`](#ssec-che
- They access the package as consumers would, independently from the environment in which it was built - They access the package as consumers would, independently from the environment in which it was built
- They can be run and debugged without rebuilding the package, which is useful if that takes a long time - They can be run and debugged without rebuilding the package, which is useful if that takes a long time
- They don't add overhead to each build, as opposed checks added to the [`distPhase`](#ssec-distribution-phase), such as [`versionCheckHook`](#versioncheckhook). - They don't add overhead to each build, as opposed checks added to the [`installCheckPhase`](#ssec-installCheck-phase), such as [`versionCheckHook`](#versioncheckhook).
It is also possible to use `passthru.tests` to test the version with [`testVersion`](#tester-testVersion), but since that is pretty trivial and recommended thing to do, we recommend using [`versionCheckHook`](#versioncheckhook) for that, which has the following advantages over `passthru.tests`: It is also possible to use `passthru.tests` to test the version with [`testVersion`](#tester-testVersion), but since that is pretty trivial and recommended thing to do, we recommend using [`versionCheckHook`](#versioncheckhook) for that, which has the following advantages over `passthru.tests`:

View file

@ -518,8 +518,10 @@ There are a number of variables that control what phases are executed and in wha
Specifies the phases. You can change the order in which phases are executed, or add new phases, by setting this variable. If its not set, the default value is used, which is `$prePhases unpackPhase patchPhase $preConfigurePhases configurePhase $preBuildPhases buildPhase checkPhase $preInstallPhases installPhase fixupPhase installCheckPhase $preDistPhases distPhase $postPhases`. Specifies the phases. You can change the order in which phases are executed, or add new phases, by setting this variable. If its not set, the default value is used, which is `$prePhases unpackPhase patchPhase $preConfigurePhases configurePhase $preBuildPhases buildPhase checkPhase $preInstallPhases installPhase fixupPhase installCheckPhase $preDistPhases distPhase $postPhases`.
The elements of `phases` must not contain spaces. If `phases` is specified as a Nix Language attribute, it should be specified as lists instead of strings. The same rules apply to the `*Phases` variables.
It is discouraged to set this variable, as it is easy to miss some important functionality hidden in some of the less obviously needed phases (like `fixupPhase` which patches the shebang of scripts). It is discouraged to set this variable, as it is easy to miss some important functionality hidden in some of the less obviously needed phases (like `fixupPhase` which patches the shebang of scripts).
Usually, if you just want to add a few phases, its more convenient to set one of the variables below (such as `preInstallPhases`). Usually, if you just want to add a few phases, its more convenient to set one of the `*Phases` variables below.
##### `prePhases` {#var-stdenv-prePhases} ##### `prePhases` {#var-stdenv-prePhases}

View file

@ -668,6 +668,12 @@
githubId = 19290901; githubId = 19290901;
name = "Andrew Brooks"; name = "Andrew Brooks";
}; };
agilesteel = {
email = "agilesteel@gmail.com";
github = "agilesteel";
githubId = 1141462;
name = "Vladyslav Pekker";
};
aherrmann = { aherrmann = {
email = "andreash87@gmx.ch"; email = "andreash87@gmx.ch";
github = "aherrmann"; github = "aherrmann";
@ -954,6 +960,12 @@
githubId = 49609151; githubId = 49609151;
name = "Popa Ioan Alexandru"; name = "Popa Ioan Alexandru";
}; };
alexandru0-dev = {
email = "alexandru.italia32+nixpkgs@gmail.com";
github = "alexandru0-dev";
githubId = 45104896;
name = "Alexandru Nechita";
};
alexarice = { alexarice = {
email = "alexrice999@hotmail.co.uk"; email = "alexrice999@hotmail.co.uk";
github = "alexarice"; github = "alexarice";
@ -3804,6 +3816,13 @@
githubId = 98980; githubId = 98980;
name = "Chmouel Boudjnah"; name = "Chmouel Boudjnah";
}; };
chn = {
name = "Haonan Chen";
email = "chn@chn.moe";
matrix = "@chn:chn.moe";
github = "CHN-beta";
githubId = 35858462;
};
choochootrain = { choochootrain = {
email = "hurshal@imap.cc"; email = "hurshal@imap.cc";
github = "choochootrain"; github = "choochootrain";
@ -5829,6 +5848,12 @@
githubId = 6872940; githubId = 6872940;
name = "Dennis Værum"; name = "Dennis Værum";
}; };
dvcorreia = {
email = "dv_correia@hotmail.com";
name = "Diogo Correia";
github = "dvcorreia";
githubId = 20357938;
};
dvn0 = { dvn0 = {
email = "git@dvn.me"; email = "git@dvn.me";
github = "dvn0"; github = "dvn0";
@ -7663,6 +7688,12 @@
githubId = 111183546; githubId = 111183546;
keys = [ { fingerprint = "58CE D4BE 6B10 149E DA80 A990 2F48 6356 A4CB 30F3"; } ]; keys = [ { fingerprint = "58CE D4BE 6B10 149E DA80 A990 2F48 6356 A4CB 30F3"; } ];
}; };
genga898 = {
email = "genga898@gmail.com";
github = "genga898";
githubId = 84174227;
name = "Emmanuel Genga";
};
genofire = { genofire = {
name = "genofire"; name = "genofire";
email = "geno+dev@fireorbit.de"; email = "geno+dev@fireorbit.de";
@ -8239,6 +8270,13 @@
github = "hacker1024"; github = "hacker1024";
githubId = 20849728; githubId = 20849728;
}; };
hadilq = {
name = "Hadi Lashkari Ghouchani";
email = "hadilq.dev@gmail.com";
github = "hadilq";
githubId = 5190539;
keys = [ { fingerprint = "AD3D 53CB A68A FEC0 8065 BCBB 416A D9E8 E372 C075"; } ];
};
hagl = { hagl = {
email = "harald@glie.be"; email = "harald@glie.be";
github = "hagl"; github = "hagl";
@ -8363,6 +8401,13 @@
githubId = 33969028; githubId = 33969028;
name = "Sebastian Hasler"; name = "Sebastian Hasler";
}; };
hasnep = {
name = "Hannes";
email = "h@nnes.dev";
matrix = "@hasnep:matrix.org";
github = "Hasnep";
githubId = 25184102;
};
hausken = { hausken = {
name = "Hausken"; name = "Hausken";
email = "hauskens-git@disp.lease>"; email = "hauskens-git@disp.lease>";
@ -11685,6 +11730,13 @@
githubId = 168301; githubId = 168301;
name = "Victor Engmark"; name = "Victor Engmark";
}; };
l33tname = {
name = "l33tname";
email = "hi@l33t.name";
github = "Fliiiix";
githubId = 1682954;
};
l3af = { l3af = {
email = "L3afMeAlon3@gmail.com"; email = "L3afMeAlon3@gmail.com";
matrix = "@L3afMe:matrix.org"; matrix = "@L3afMe:matrix.org";
@ -13723,6 +13775,13 @@
github = "mi-ael"; github = "mi-ael";
githubId = 12199265; githubId = 12199265;
}; };
miampf = {
email = "miampf@proton.me";
github = "miampf";
githubId = 111570799;
name = "Mia Motte Mallon";
keys = [ { fingerprint = "7008 92AA 6F32 8CAC 8740 0070 EF03 9364 B5B6 886C"; } ];
};
miangraham = { miangraham = {
github = "miangraham"; github = "miangraham";
githubId = 704580; githubId = 704580;
@ -18371,6 +18430,18 @@
githubId = 1217934; githubId = 1217934;
name = "José Romildo Malaquias"; name = "José Romildo Malaquias";
}; };
romner-set = {
email = "admin@cynosure.red";
github = "romner-set";
githubId = 41077433;
name = "romner-set";
keys = [
{
# uploaded to https://keys.openpgp.org
fingerprint = "4B75 244B 0279 9598 FF3B C21F 95FC 58F1 8CFD FAB0";
}
];
};
ronanmacf = { ronanmacf = {
email = "macfhlar@tcd.ie"; email = "macfhlar@tcd.ie";
github = "RonanMacF"; github = "RonanMacF";
@ -22595,6 +22666,13 @@
githubId = 144771550; githubId = 144771550;
name = "Luca Uricariu"; name = "Luca Uricariu";
}; };
voronind = {
email = "hi@voronind.com";
name = "Dmitry Voronin";
github = "voronind-com";
githubId = 22127600;
keys = [ { fingerprint = "3241 FDAD 82A7 E22D 4279 F405 913F 3267 9278 2E1C"; } ];
};
votava = { votava = {
email = "votava@gmail.com"; email = "votava@gmail.com";
github = "janvotava"; github = "janvotava";

View file

@ -156,7 +156,7 @@ files_before=$(grep -c 'src = ' "$SRCS")
echo "writing output file $SRCS ..." echo "writing output file $SRCS ..."
cat >"$SRCS" <<EOF cat >"$SRCS" <<EOF
# DO NOT EDIT! This file is generated automatically. # DO NOT EDIT! This file is generated automatically.
# Command: $0 $@ # Command: ./maintainers/scripts/fetch-kde-qt.sh $@
{ fetchurl, mirror }: { fetchurl, mirror }:
{ {

View file

@ -72,6 +72,7 @@ OK_MISSING_BY_PACKAGE = {
}, },
"kwin": { "kwin": {
"display-info", # newer versions identify as libdisplay-info "display-info", # newer versions identify as libdisplay-info
"Libcap", # used to call setcap at build time and nothing else
}, },
"libksysguard": { "libksysguard": {
"Libcap", # used to call setcap at build time and nothing else "Libcap", # used to call setcap at build time and nothing else
@ -90,6 +91,7 @@ OK_MISSING_BY_PACKAGE = {
}, },
"powerdevil": { "powerdevil": {
"DDCUtil", # cursed, intentionally disabled "DDCUtil", # cursed, intentionally disabled
"Libcap", # used to call setcap at build time and nothing else
}, },
"print-manager": { "print-manager": {
"PackageKitQt6", # used for auto-installing drivers which does not work for obvious reasons "PackageKitQt6", # used for auto-installing drivers which does not work for obvious reasons

View file

@ -23,7 +23,8 @@ mkKdeDerivation {
'''.strip()) '''.strip())
ROOT_TEMPLATE = jinja2.Template(''' ROOT_TEMPLATE = jinja2.Template('''
{callPackage}: { { callPackage }:
{
{%- for p in packages %} {%- for p in packages %}
{{ p }} = callPackage ./{{ p }} { }; {{ p }} = callPackage ./{{ p }} { };
{%- endfor %} {%- endfor %}

View file

@ -12,6 +12,7 @@ import tempfile
class CalledProcessError(Exception): class CalledProcessError(Exception):
process: asyncio.subprocess.Process process: asyncio.subprocess.Process
stderr: Optional[bytes]
class UpdateFailedException(Exception): class UpdateFailedException(Exception):
pass pass
@ -19,20 +20,23 @@ class UpdateFailedException(Exception):
def eprint(*args, **kwargs): def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs) print(*args, file=sys.stderr, **kwargs)
async def check_subprocess(*args, **kwargs): async def check_subprocess_output(*args, **kwargs):
""" """
Emulate check argument of subprocess.run function. Emulate check and capture_output arguments of subprocess.run function.
""" """
process = await asyncio.create_subprocess_exec(*args, **kwargs) process = await asyncio.create_subprocess_exec(*args, **kwargs)
returncode = await process.wait() # We need to use communicate() instead of wait(), as the OS pipe buffers
# can fill up and cause a deadlock.
stdout, stderr = await process.communicate()
if returncode != 0: if process.returncode != 0:
error = CalledProcessError() error = CalledProcessError()
error.process = process error.process = process
error.stderr = stderr
raise error raise error
return process return stdout
async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool): async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool):
worktree: Optional[str] = None worktree: Optional[str] = None
@ -43,7 +47,7 @@ async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_di
worktree, _branch = temp_dir worktree, _branch = temp_dir
# Ensure the worktree is clean before update. # Ensure the worktree is clean before update.
await check_subprocess('git', 'reset', '--hard', '--quiet', 'HEAD', cwd=worktree) await check_subprocess_output('git', 'reset', '--hard', '--quiet', 'HEAD', cwd=worktree)
# Update scripts can use $(dirname $0) to get their location but we want to run # Update scripts can use $(dirname $0) to get their location but we want to run
# their clones in the git worktree, not in the main nixpkgs repo. # their clones in the git worktree, not in the main nixpkgs repo.
@ -52,7 +56,7 @@ async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_di
eprint(f" - {package['name']}: UPDATING ...") eprint(f" - {package['name']}: UPDATING ...")
try: try:
update_process = await check_subprocess( update_info = await check_subprocess_output(
'env', 'env',
f"UPDATE_NIX_NAME={package['name']}", f"UPDATE_NIX_NAME={package['name']}",
f"UPDATE_NIX_PNAME={package['pname']}", f"UPDATE_NIX_PNAME={package['pname']}",
@ -63,8 +67,6 @@ async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_di
stderr=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE,
cwd=worktree, cwd=worktree,
) )
update_info = await update_process.stdout.read()
await merge_changes(merge_lock, package, update_info, temp_dir) await merge_changes(merge_lock, package, update_info, temp_dir)
except KeyboardInterrupt as e: except KeyboardInterrupt as e:
eprint('Cancelling…') eprint('Cancelling…')
@ -74,10 +76,9 @@ async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_di
eprint() eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------") eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
eprint() eprint()
stderr = await e.process.stderr.read() eprint(e.stderr.decode('utf-8'))
eprint(stderr.decode('utf-8'))
with open(f"{package['pname']}.log", 'wb') as logfile: with open(f"{package['pname']}.log", 'wb') as logfile:
logfile.write(stderr) logfile.write(e.stderr)
eprint() eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------") eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
@ -101,14 +102,14 @@ async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, bra
for change in changes: for change in changes:
# Git can only handle a single index operation at a time # Git can only handle a single index operation at a time
async with merge_lock: async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree) await check_subprocess_output('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion} -> {newVersion}'.format(**change) commit_message = '{attrPath}: {oldVersion} -> {newVersion}'.format(**change)
if 'commitMessage' in change: if 'commitMessage' in change:
commit_message = change['commitMessage'] commit_message = change['commitMessage']
elif 'commitBody' in change: elif 'commitBody' in change:
commit_message = commit_message + '\n\n' + change['commitBody'] commit_message = commit_message + '\n\n' + change['commitBody']
await check_subprocess('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree) await check_subprocess_output('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree)
await check_subprocess('git', 'cherry-pick', branch) await check_subprocess_output('git', 'cherry-pick', branch)
async def check_changes(package: Dict, worktree: str, update_info: str): async def check_changes(package: Dict, worktree: str, update_info: str):
if 'commit' in package['supportedFeatures']: if 'commit' in package['supportedFeatures']:
@ -129,12 +130,12 @@ async def check_changes(package: Dict, worktree: str, update_info: str):
if 'newVersion' not in changes[0]: if 'newVersion' not in changes[0]:
attr_path = changes[0]['attrPath'] attr_path = changes[0]['attrPath']
obtain_new_version_process = await check_subprocess('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree) obtain_new_version_output = await check_subprocess_output('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8')) changes[0]['newVersion'] = json.loads(obtain_new_version_output.decode('utf-8'))
if 'files' not in changes[0]: if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree) changed_files_output = await check_subprocess_output('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines() changed_files = changed_files_output.splitlines()
changes[0]['files'] = changed_files changes[0]['files'] = changed_files
if len(changed_files) == 0: if len(changed_files) == 0:
@ -176,8 +177,8 @@ async def start_updates(max_workers: int, keep_going: bool, commit: bool, packag
# Do not create more workers than there are packages. # Do not create more workers than there are packages.
num_workers = min(max_workers, len(packages)) num_workers = min(max_workers, len(packages))
nixpkgs_root_process = await check_subprocess('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE) nixpkgs_root_output = await check_subprocess_output('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE)
nixpkgs_root = (await nixpkgs_root_process.stdout.read()).decode('utf-8').strip() nixpkgs_root = nixpkgs_root_output.decode('utf-8').strip()
# Set up temporary directories when using auto-commit. # Set up temporary directories when using auto-commit.
for i in range(num_workers): for i in range(num_workers):

View file

@ -278,7 +278,11 @@ with lib.maintainers;
}; };
emacs = { emacs = {
members = [ adisbladis ]; members = [
AndersonTorres
adisbladis
linj
];
scope = "Maintain the Emacs editor and packages."; scope = "Maintain the Emacs editor and packages.";
shortName = "Emacs"; shortName = "Emacs";
}; };
@ -495,7 +499,6 @@ with lib.maintainers;
ryantm ryantm
lassulus lassulus
yayayayaka yayayayaka
asymmetric
]; ];
scope = "Maintain Jitsi."; scope = "Maintain Jitsi.";
shortName = "Jitsi"; shortName = "Jitsi";
@ -745,6 +748,16 @@ with lib.maintainers;
enableFeatureFreezePing = true; enableFeatureFreezePing = true;
}; };
ngi = {
members = [
eljamm
fricklerhandwerk
wegank
];
scope = "Maintain NGI-supported software.";
shortName = "NGI";
};
node = { node = {
members = [ winter ]; members = [ winter ];
scope = "Maintain Node.js runtimes and build tooling."; scope = "Maintain Node.js runtimes and build tooling.";

View file

@ -85,7 +85,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- [filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html), a lightweight shipper for forwarding and centralizing log data. Available as [services.filebeat](#opt-services.filebeat.enable). - [filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html), a lightweight shipper for forwarding and centralizing log data. Available as [services.filebeat](#opt-services.filebeat.enable).
- [FRRouting](https://frrouting.org/), a popular suite of Internet routing protocol daemons (BGP, BFD, OSPF, IS-IS, VRRP and others). Available as [services.frr](#opt-services.frr.babel.enable). - [FRRouting](https://frrouting.org/), a popular suite of Internet routing protocol daemons (BGP, BFD, OSPF, IS-IS, VRRP and others). Available as [services.frr](#opt-services.frr.babeld.enable).
- [Grafana Mimir](https://grafana.com/oss/mimir/), an open source, horizontally scalable, highly available, multi-tenant, long-term storage for Prometheus. Available as [services.mimir](#opt-services.mimir.enable). - [Grafana Mimir](https://grafana.com/oss/mimir/), an open source, horizontally scalable, highly available, multi-tenant, long-term storage for Prometheus. Available as [services.mimir](#opt-services.mimir.enable).

View file

@ -112,7 +112,7 @@
- [ddns-updater](https://github.com/qdm12/ddns-updater), a service to update DNS records periodically with WebUI for many DNS providers. Available as [services.ddns-updater](#opt-services.ddns-updater.enable). - [ddns-updater](https://github.com/qdm12/ddns-updater), a service to update DNS records periodically with WebUI for many DNS providers. Available as [services.ddns-updater](#opt-services.ddns-updater.enable).
- [Immersed VR](https://immersed.com/), a closed-source coworking platform. Available as [programs.immersed-vr](#opt-programs.immersed-vr.enable). - [Immersed](https://immersed.com/), a closed-source coworking platform. Available as [programs.immersed](#opt-programs.immersed.enable).
- [HomeBox](https://github.com/sysadminsmedia/homebox): the inventory and organization system built for the Home User. Available as [services.homebox](#opt-services.homebox.enable). - [HomeBox](https://github.com/sysadminsmedia/homebox): the inventory and organization system built for the Home User. Available as [services.homebox](#opt-services.homebox.enable).
@ -175,18 +175,25 @@
- [Immich](https://github.com/immich-app/immich), a self-hosted photo and video backup solution. Available as [services.immich](#opt-services.immich.enable). - [Immich](https://github.com/immich-app/immich), a self-hosted photo and video backup solution. Available as [services.immich](#opt-services.immich.enable).
- [saunafs](https://saunafs.com) Distributed POSIX file system. Available as [services.saunafs](options.html#opt-services.saunafs).
- [obs-studio](https://obsproject.com/), Free and open source software for video recording and live streaming. Available as [programs.obs-studio.enable](#opt-programs.obs-studio.enable). - [obs-studio](https://obsproject.com/), Free and open source software for video recording and live streaming. Available as [programs.obs-studio.enable](#opt-programs.obs-studio.enable).
- [Veilid](https://veilid.com), a headless server that enables privacy-focused data sharing and messaging on a peer-to-peer network. Available as [services.veilid](#opt-services.veilid.enable). - [Veilid](https://veilid.com), a headless server that enables privacy-focused data sharing and messaging on a peer-to-peer network. Available as [services.veilid](#opt-services.veilid.enable).
- [Fedimint](https://github.com/fedimint/fedimint), a module based system for building federated applications (Federated E-Cash Mint). Available as [services.fedimintd](#opt-services.fedimintd). - [Fedimint](https://github.com/fedimint/fedimint), a module based system for building federated applications (Federated E-Cash Mint). Available as [services.fedimintd](#opt-services.fedimintd).
- [Zapret](https://github.com/bol-van/zapret), a DPI bypass tool. Available as [services.zapret](option.html#opt-services.zapret).
## Backward Incompatibilities {#sec-release-24.11-incompatibilities} ## Backward Incompatibilities {#sec-release-24.11-incompatibilities}
- The `sound` options have been removed or renamed, as they had a lot of unintended side effects. See [below](#sec-release-24.11-migration-sound) for details. - The `sound` options have been removed or renamed, as they had a lot of unintended side effects. See [below](#sec-release-24.11-migration-sound) for details.
- The nvidia driver no longer defaults to the proprietary driver starting with version 560. You will need to manually set `hardware.nvidia.open` to select the proprietary or open driver. - The nvidia driver no longer defaults to the proprietary driver starting with version 560. You will need to manually set `hardware.nvidia.open` to select the proprietary or open driver.
- The `(buildPythonPackage { ... }).override` attribute is now deprecated and removed in favour of `overridePythonAttrs`.
This change does not affect the override interface of most Python packages, as [`<pkg>.override`](https://nixos.org/manual/nixpkgs/unstable/#sec-pkg-override) provided by `callPackage` shadows such a locally-defined `override` attribute.
- All Cinnamon and XApp packages have been moved to top-level (i.e., `cinnamon.nemo` is now `nemo`). - All Cinnamon and XApp packages have been moved to top-level (i.e., `cinnamon.nemo` is now `nemo`).
- All GNOME packages have been moved to top-level (i.e., `gnome.nautilus` is now `nautilus`). - All GNOME packages have been moved to top-level (i.e., `gnome.nautilus` is now `nautilus`).
@ -205,6 +212,12 @@
- `grafana` has been updated to version 11.1. This version doesn't support setting `http_addr` to a hostname anymore, an IP address is expected. - `grafana` has been updated to version 11.1. This version doesn't support setting `http_addr` to a hostname anymore, an IP address is expected.
- `deno` has been updated to v2 which has breaking changes. Upstream will be abandoning v1 soon but for now you can use `deno_1` if you are yet to migrate (will be removed prior to cutting a final 24.11 release).
- `gogs` has been removed. Upstream development has stalled and it has several
[critical vulnerabilities](https://github.com/gogs/gogs/issues/7777) that weren't addressed
within a year. Consider migrating to `forgejo` or `gitea`.
- `knot-dns` has been updated to version 3.4.x. Check the [migration guide](https://www.knot-dns.cz/docs/latest/html/migration.html#upgrade-3-3-x-to-3-4-x) for breaking changes. - `knot-dns` has been updated to version 3.4.x. Check the [migration guide](https://www.knot-dns.cz/docs/latest/html/migration.html#upgrade-3-3-x-to-3-4-x) for breaking changes.
- `services.kubernetes.kubelet.clusterDns` now accepts a list of DNS resolvers rather than a single string, bringing the module more in line with the upstream Kubelet configuration schema. - `services.kubernetes.kubelet.clusterDns` now accepts a list of DNS resolvers rather than a single string, bringing the module more in line with the upstream Kubelet configuration schema.
@ -221,6 +234,11 @@
Also be aware that if you have set additional options in `services.wstunnel.{clients,servers}.<name>.extraArgs`, Also be aware that if you have set additional options in `services.wstunnel.{clients,servers}.<name>.extraArgs`,
that those might have been removed or modified upstream. that those might have been removed or modified upstream.
- `percona-server_8_4` and `mysql84` now have password authentication via the deprecated `mysql_native_password` disabled by default. This authentication plugin can be enabled via a CLI argument again, for detailed instructions and alternative authentication methods [see upstream documentation](https://dev.mysql.com/doc/refman/8.4/en/native-pluggable-authentication.html). The config file directive `default_authentication_plugin` has been removed.
- Percona has decided not to follow the LTS/ Innovation release scheme of upstream MySQL and thus [will only create releases for MySQL LTS versions](https://www.percona.com/blog/no-mysql-9-x-innovation-releases-from-percona/). Hence, the package names `percona-server_lts`, `percona-server_innovation`, `percona-xtrabackup_lts` and `percona-xtrabackup_innovation` are deprecated.
- `percona-server` and `percona-server_lts` now point towards the new LTS release `percona-server_8_4`. The previous LTS continues to be supported and is available as `percona-server_8_0`. The same is true for the supporting `percona-xtrabackup` tooling.
- `clang-tools_<version>` packages have been moved into `llvmPackages_<version>` (i.e. `clang-tools_18` is now `llvmPackages_18.clang-tools`). - `clang-tools_<version>` packages have been moved into `llvmPackages_<version>` (i.e. `clang-tools_18` is now `llvmPackages_18.clang-tools`).
- For convenience, the top-level `clang-tools` attribute remains and is now bound to `llvmPackages.clang-tools`. - For convenience, the top-level `clang-tools` attribute remains and is now bound to `llvmPackages.clang-tools`.
- Top-level `clang_tools_<version>` attributes are now aliases; these will be removed in a future release. - Top-level `clang_tools_<version>` attributes are now aliases; these will be removed in a future release.
@ -265,6 +283,11 @@
- The `mautrix-signal` module was adapted to incorporate the configuration rearrangement that resulted from the update to the mautrix bridgev2 architecture. Pre-0.7.0 configurations should continue to work. - The `mautrix-signal` module was adapted to incorporate the configuration rearrangement that resulted from the update to the mautrix bridgev2 architecture. Pre-0.7.0 configurations should continue to work.
In case you want to update your configuration make sure to check the NixOS manual. In case you want to update your configuration make sure to check the NixOS manual.
- The dhcpcd service (`networking.useDHCP`) has been hardened and now runs exclusively as the "dhcpcd" user.
Users that were relying on the root privileges in `networking.dhcpcd.runHook` will have to write specific [sudo](security.sudo.extraRules) or [polkit](security.polkit.extraConfig) rules to allow dhcpcd to perform privileged actions.
As part of these changes, the DHCP lease files directory has also been moved from `/var/db/dhcpcd` to `/var/lib/dhcpcd`. This migration is performed automatically, but users may have to update their backup configuration.
- `singularity-tools` have the `storeDir` argument removed from its override interface and use `builtins.storeDir` instead. - `singularity-tools` have the `storeDir` argument removed from its override interface and use `builtins.storeDir` instead.
- Two build helpers in `singularity-tools`, i.e., `mkLayer` and `shellScript`, are deprecated, as they are no longer involved in image-building. Maintainers will remove them in future releases. - Two build helpers in `singularity-tools`, i.e., `mkLayer` and `shellScript`, are deprecated, as they are no longer involved in image-building. Maintainers will remove them in future releases.
@ -313,10 +336,16 @@
Most prominently access to the webinterface and API are now protected by authentication. Retrieve the auto-created Most prominently access to the webinterface and API are now protected by authentication. Retrieve the auto-created
admin account from the `frigate.service` journal after upgrading. admin account from the `frigate.service` journal after upgrading.
- `nodePackages.coc-python` was dropped, as [its upstream is unmaintained](https://github.com/neoclide/coc-python). The associated `vimPlugins.coc-python` was also dropped.
The upstream project recommends using `coc-pyright` or `coc-jedi` as replacements.
- `forgejo` has been upgraded from version 7.0 to version 9.0, see the release notes for [8.0](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/RELEASE-NOTES.md#8-0-0) and [9.0](https://codeberg.org/forgejo/forgejo/milestone/7235).
- `services.forgejo.mailerPasswordFile` has been deprecated by the drop-in replacement `services.forgejo.secrets.mailer.PASSWD`, - `services.forgejo.mailerPasswordFile` has been deprecated by the drop-in replacement `services.forgejo.secrets.mailer.PASSWD`,
which is part of the new free-form `services.forgejo.secrets` option. which is part of the new free-form `services.forgejo.secrets` option.
`services.forgejo.secrets` is a small wrapper over systemd's `LoadCredential=`. It has the same structure (sections/keys) as `services.forgejo.secrets` is a small wrapper over systemd's `LoadCredential=`. It has the same structure (sections/keys) as
`services.forgejo.settings` but takes file paths that will be read before service startup instead of some plaintext value. `services.forgejo.settings` but takes file paths that will be read before service startup instead of some plaintext value.
`services.forgejo.package` now defaults to `forgejo-lts`, the Long Term Support version of Forgejo.
- `forgejo` and `forgejo-lts` no longer support the opt-in feature [PAM (Pluggable Authentication Module)](https://forgejo.org/docs/latest/user/authentication/#pam-pluggable-authentication-module). - `forgejo` and `forgejo-lts` no longer support the opt-in feature [PAM (Pluggable Authentication Module)](https://forgejo.org/docs/latest/user/authentication/#pam-pluggable-authentication-module).
@ -497,9 +526,6 @@
- The `services.mxisd` module has been removed as both [mxisd](https://github.com/kamax-matrix/mxisd) and [ma1sd](https://github.com/ma1uta/ma1sd) are not maintained any longer. - The `services.mxisd` module has been removed as both [mxisd](https://github.com/kamax-matrix/mxisd) and [ma1sd](https://github.com/ma1uta/ma1sd) are not maintained any longer.
Consequently the package `pkgs.ma1sd` has also been removed. Consequently the package `pkgs.ma1sd` has also been removed.
- `ffmpeg_5` has been removed. Please use the unversioned `ffmpeg`,
pin a newer version, or if necessary pin `ffmpeg_4` for compatibility.
- The `rss-bridge` service drops the support to load a configuration file from `${config.services.rss-bridge.dataDir}/config.ini.php`. - The `rss-bridge` service drops the support to load a configuration file from `${config.services.rss-bridge.dataDir}/config.ini.php`.
Consider using the `services.rss-bridge.config` option instead. Consider using the `services.rss-bridge.config` option instead.
@ -519,6 +545,12 @@
- `ceph` has been upgraded to v19. See the [Ceph "squid" release notes](https://docs.ceph.com/en/latest/releases/squid/#v19-2-0-squid) for details and recommended upgrade procedure. - `ceph` has been upgraded to v19. See the [Ceph "squid" release notes](https://docs.ceph.com/en/latest/releases/squid/#v19-2-0-squid) for details and recommended upgrade procedure.
- `services.frr` has been refactored to use upstream service scripts. The per-daemon configurations
have been removed in favour of an `integrated-vtysh-config` style config. The daemon submodules
now use the daemon name (e.g. `ospfd`) instead of the protocol name (`ospf`). The daemons `zebra`,
`mgmtd` and `staticd` are always enabled if a config is present. The `vtyListenAddress` and
`vtyListenPort` options have been removed; use `options` or `extraOptions` instead, respectively.
- `opencv2` and `opencv3` have been removed, as they are obsolete and - `opencv2` and `opencv3` have been removed, as they are obsolete and
were not used by any other package. External users are encouraged to were not used by any other package. External users are encouraged to
migrate to OpenCV 4. migrate to OpenCV 4.
@ -546,6 +578,16 @@
- Minimal installer ISOs are no longer built on the small channel. - Minimal installer ISOs are no longer built on the small channel.
Please obtain installer images from the full release channels. Please obtain installer images from the full release channels.
- The default FFmpeg version is now 7, and FFmpeg 5 has been removed.
Please prefer using the package variants without a version suffix,
or pin FFmpeg 6 or 4 if necessary for compatibility.
Note that we keep old versions around only as required
to support packages in the tree,
and FFmpeg 4 especially should be avoided in favour of newer versions
as it may be removed soon.
- `openssl` now defaults to the latest version line `3.3.x`, instead of `3.0.x` before. While there should be no major code incompatibilities, newer OpenSSL versions typically strengthen the default security level. This means that you may have to explicitly allow weak ciphers, hashes and key lengths if necessary. See: [OpenSSL security level documentation](https://docs.openssl.org/3.3/man3/SSL_CTX_set_security_level/).
- The `isync` package has been updated to version `1.5.0`, which introduces some breaking changes. See the [compatibility concerns](https://sourceforge.net/projects/isync/files/isync/1.5.0/) for more details. - The `isync` package has been updated to version `1.5.0`, which introduces some breaking changes. See the [compatibility concerns](https://sourceforge.net/projects/isync/files/isync/1.5.0/) for more details.
- Legacy package `globalprotect-openconnect` 1.x and related module - Legacy package `globalprotect-openconnect` 1.x and related module
@ -558,6 +600,14 @@
- The `rustic` package was upgrade to `0.9.0`, which contains [breaking changes to the config file format](https://github.com/rustic-rs/rustic/releases/tag/v0.9.0). - The `rustic` package was upgrade to `0.9.0`, which contains [breaking changes to the config file format](https://github.com/rustic-rs/rustic/releases/tag/v0.9.0).
- `pkgs.formats.ini` and `pkgs.formats.iniWithGlobalSection` with
`listsAsDuplicateKeys` or `listToValue` no longer merge non-list values into
lists by default. Backwards-compatible behavior can be enabled with
`atomsCoercedToLists`.
- `python3Packages.nose` has been removed, as it has been deprecated and unmaintained for almost a decade and does not work on Python 3.12.
Please switch to `pytest` or another test runner/framework.
## Other Notable Changes {#sec-release-24.11-notable-changes} ## Other Notable Changes {#sec-release-24.11-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -577,6 +627,12 @@
`goModules`, `modRoot`, `vendorHash`, `deleteVendor`, and `proxyVendor` are now passed as derivation attributes. `goModules`, `modRoot`, `vendorHash`, `deleteVendor`, and `proxyVendor` are now passed as derivation attributes.
`goModules` and `vendorHash` are no longer placed under `passthru`. `goModules` and `vendorHash` are no longer placed under `passthru`.
- `buildFlags`/`buildFlagsArray` on `buildGoModule` have been deprecated. 24.11 is the last release where `buildGoModule` accepts these flags (while throwing a warning).
Use the [`ldflags`](https://nixos.org/manual/nixpkgs/unstable/#var-go-ldflags) and/or [`tags`](https://nixos.org/manual/nixpkgs/unstable/#var-go-tags) attributes or
[the environment](https://nixos.org/manual/nixpkgs/unstable/#ssec-go-environment) instead.
- `buildGoPackage` has been deprecated. 24.11 is the last release with `buildGoPackage` available.
- `hareHook` has been added as the language framework for Hare. From now on, it, - `hareHook` has been added as the language framework for Hare. From now on, it,
not the `hare` package, should be added to `nativeBuildInputs` when building not the `hare` package, should be added to `nativeBuildInputs` when building
Hare programs. Hare programs.
@ -590,8 +646,6 @@
- `nixosTests` now provide a working IPv6 setup for VLAN 1 by default. - `nixosTests` now provide a working IPv6 setup for VLAN 1 by default.
- `services.dhcpcd` is now started with additional systemd sandbox/hardening options for better security. When using `networking.dhcpcd.runHook` these settings are not applied.
- Kanidm can now be provisioned using the new [`services.kanidm.provision`] option, but requires using a patched version available via `pkgs.kanidm.withSecretProvisioning`. - Kanidm can now be provisioned using the new [`services.kanidm.provision`] option, but requires using a patched version available via `pkgs.kanidm.withSecretProvisioning`.
- Kanidm previously had an incorrect systemd service type, causing dependent units with an `after` and `requires` directive to start before `kanidm*` finished startup. The module has now been updated in line with upstream recommendations. - Kanidm previously had an incorrect systemd service type, causing dependent units with an `after` and `requires` directive to start before `kanidm*` finished startup. The module has now been updated in line with upstream recommendations.
@ -618,6 +672,8 @@
The derivation now installs "impl" headers selectively instead of by a wildcard. The derivation now installs "impl" headers selectively instead of by a wildcard.
Use `imgui.src` if you just want to access the unpacked sources. Use `imgui.src` if you just want to access the unpacked sources.
- The new `boot.loader.systemd-boot.windows` option makes setting up dual-booting with Windows on a different drive easier
- Linux 4.19 has been removed because it will reach its end of life within the lifespan of 24.11 - Linux 4.19 has been removed because it will reach its end of life within the lifespan of 24.11
- Unprivileged access to the kernel syslog via `dmesg` is now restricted by default. Users wanting to keep an - Unprivileged access to the kernel syslog via `dmesg` is now restricted by default. Users wanting to keep an
@ -657,6 +713,10 @@
- `cargo-tauri.hook` was introduced to help users build [Tauri](https://tauri.app/) projects. It is meant to be used alongside - `cargo-tauri.hook` was introduced to help users build [Tauri](https://tauri.app/) projects. It is meant to be used alongside
`rustPlatform.buildRustPackage` and Node hooks such as `npmConfigHook`, `pnpm.configHook`, and the new `yarnConfig` `rustPlatform.buildRustPackage` and Node hooks such as `npmConfigHook`, `pnpm.configHook`, and the new `yarnConfig`
- `power.ups` now powers off UPSs during a power outage event.
This saves UPS battery and ensures that host(s) get back up again when power comes back, even in the scenario when the UPS would have had enough capacity to keep power on during the whole power outage.
If you like the old behaviour of keeping the UPSs on (and emptying the battery) after the host(s) have shut down, and risk not getting a power cycle event to get the host(s) back up, set `power.ups.upsmon.settings.POWERDOWNFLAG = null;`.
- Support for *runner registration tokens* has been [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/380872) - Support for *runner registration tokens* has been [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/380872)
in `gitlab-runner` 15.6 and is expected to be removed in `gitlab-runner` 18.0. Configuration of existing runners in `gitlab-runner` 15.6 and is expected to be removed in `gitlab-runner` 18.0. Configuration of existing runners
should be changed to using *runner authentication tokens* by configuring should be changed to using *runner authentication tokens* by configuring
@ -670,6 +730,12 @@
- ZFS now imports its pools in `postResumeCommands` rather than `postDeviceCommands`. If you had `postDeviceCommands` scripts that depended on ZFS pools being imported, those now need to be in `postResumeCommands`. - ZFS now imports its pools in `postResumeCommands` rather than `postDeviceCommands`. If you had `postDeviceCommands` scripts that depended on ZFS pools being imported, those now need to be in `postResumeCommands`.
- `services.automatic-timezoned.enable = true` will now set `time.timeZone = null`.
This is to avoid silently shadowing a user's explicitly defined timezone without recognition on the user's part.
- `services.localtimed.enable = true` will now set `time.timeZone = null`.
This is to avoid silently shadowing a user's explicitly defined timezone without recognition on the user's part.
## Detailed migration information {#sec-release-24.11-migration} ## Detailed migration information {#sec-release-24.11-migration}
### `sound` options removal {#sec-release-24.11-migration-sound} ### `sound` options removal {#sec-release-24.11-migration-sound}

View file

@ -17,6 +17,7 @@ let
filterAttrs filterAttrs
flatten flatten
flip flip
hasPrefix
head head
isInt isInt
isFloat isFloat
@ -196,6 +197,10 @@ in rec {
optional (attr ? ${name}) optional (attr ? ${name})
"Systemd ${group} field `${name}' has been removed. See ${see}"; "Systemd ${group} field `${name}' has been removed. See ${see}";
assertKeyIsSystemdCredential = name: group: attr:
optional (attr ? ${name} && !(hasPrefix "@" attr.${name}))
"Systemd ${group} field `${name}' is not a systemd credential";
checkUnitConfig = group: checks: attrs: let checkUnitConfig = group: checks: attrs: let
# We're applied at the top-level type (attrsOf unitOption), so the actual # We're applied at the top-level type (attrsOf unitOption), so the actual
# unit options might contain attributes from mkOverride and mkIf that we need to # unit options might contain attributes from mkOverride and mkIf that we need to

View file

@ -99,7 +99,16 @@ class Driver:
with self.logger.nested("cleanup"): with self.logger.nested("cleanup"):
self.race_timer.cancel() self.race_timer.cancel()
for machine in self.machines: for machine in self.machines:
machine.release() try:
machine.release()
except Exception as e:
self.logger.error(f"Error during cleanup of {machine.name}: {e}")
for vlan in self.vlans:
try:
vlan.stop()
except Exception as e:
self.logger.error(f"Error during cleanup of vlan{vlan.nr}: {e}")
def subtest(self, name: str) -> Iterator[None]: def subtest(self, name: str) -> Iterator[None]:
"""Group logs under a given test name""" """Group logs under a given test name"""

View file

@ -1234,6 +1234,9 @@ class Machine:
self.monitor.close() self.monitor.close()
self.serial_thread.join() self.serial_thread.join()
if self.qmp_client:
self.qmp_client.close()
def run_callbacks(self) -> None: def run_callbacks(self) -> None:
for callback in self.callbacks: for callback in self.callbacks:
callback() callback()

View file

@ -49,7 +49,7 @@ class QMPSession:
sock.connect(str(path)) sock.connect(str(path))
return cls(sock) return cls(sock)
def __del__(self) -> None: def close(self) -> None:
self.sock.close() self.sock.close()
def _wait_for_new_result(self) -> dict[str, str]: def _wait_for_new_result(self) -> dict[str, str]:

View file

@ -59,7 +59,7 @@ class VLan:
self.logger.info(f"running vlan (pid {self.pid}; ctl {self.socket_dir})") self.logger.info(f"running vlan (pid {self.pid}; ctl {self.socket_dir})")
def __del__(self) -> None: def stop(self) -> None:
self.logger.info(f"kill vlan (pid {self.pid})") self.logger.info(f"kill vlan (pid {self.pid})")
self.fd.close() self.fd.close()
self.process.terminate() self.process.terminate()

View file

@ -114,6 +114,15 @@ in
''; '';
}; };
subscriberFiles = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [];
description = ''
Files written by resolvconf updates
'';
internal = true;
};
}; };
}; };
@ -132,6 +141,10 @@ in
} }
(lib.mkIf cfg.enable { (lib.mkIf cfg.enable {
users.groups.resolvconf = {};
networking.resolvconf.subscriberFiles = [ "/etc/resolv.conf" ];
networking.resolvconf.package = pkgs.openresolv; networking.resolvconf.package = pkgs.openresolv;
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
@ -143,12 +156,15 @@ in
wants = [ "network-pre.target" ]; wants = [ "network-pre.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."resolvconf.conf".source ]; restartTriggers = [ config.environment.etc."resolvconf.conf".source ];
serviceConfig.Type = "oneshot";
serviceConfig.RemainAfterExit = true;
serviceConfig = { script = ''
Type = "oneshot"; ${lib.getExe cfg.package} -u
ExecStart = "${cfg.package}/bin/resolvconf -u"; files=(/run/resolvconf ${lib.escapeShellArgs cfg.subscriberFiles})
RemainAfterExit = true; chgrp -R resolvconf "''${files[@]}"
}; chmod -R g=u "''${files[@]}"
'';
}; };
}) })

View file

@ -16,7 +16,7 @@ in
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
services.udev.packages = [ services.udev.packages = [
pkgs.steamPackages.steam pkgs.steam-unwrapped
]; ];
# The uinput module needs to be loaded in order to trigger the udev rules # The uinput module needs to be loaded in order to trigger the udev rules

View file

@ -36,7 +36,7 @@ let
}; };
}; };
power-pkg = config.boot.kernelPackages.system76-power; power-pkg = pkgs.system76-power;
powerConfig = mkIf cfg.power-daemon.enable { powerConfig = mkIf cfg.power-daemon.enable {
# Make system76-power usable by root from the command line. # Make system76-power usable by root from the command line.
environment.systemPackages = [ power-pkg ]; environment.systemPackages = [ power-pkg ];

View file

@ -26,9 +26,9 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
# Module is upstream as of 6.10 # Module is upstream as of 6.10,
boot.extraModulePackages = with config.boot.kernelPackages; # but still needs various out-of-tree i2c and the `intel-ipu6-psys` kernel driver
optional (kernelOlder "6.10") ipu6-drivers; boot.extraModulePackages = with config.boot.kernelPackages; [ ipu6-drivers ];
hardware.firmware = with pkgs; [ hardware.firmware = with pkgs; [
ipu6-camera-bins ipu6-camera-bins

View file

@ -9,12 +9,7 @@ with lib;
options = { options = {
netboot.squashfsCompression = mkOption { netboot.squashfsCompression = mkOption {
default = with pkgs.stdenv.hostPlatform; "xz -Xdict-size 100% " default = "zstd -Xcompression-level 19";
+ lib.optionalString isx86 "-Xbcj x86"
# Untested but should also reduce size for these platforms
+ lib.optionalString isAarch "-Xbcj arm"
+ lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
+ lib.optionalString (isSparc) "-Xbcj sparc";
description = '' description = ''
Compression settings to use for the squashfs nix store. Compression settings to use for the squashfs nix store.
''; '';

View file

@ -1,7 +1,8 @@
{ {
x86_64-linux = "/nix/store/vhv7ckr0winivvwfqxd54d6pgq2hx1is-nix-2.18.8"; x86_64-linux = "/nix/store/2nhrwv91g6ycpyxvhmvc0xs8p92wp4bk-nix-2.24.9";
i686-linux = "/nix/store/8x7rmgi225r5kygpf17swvk3vll0c61y-nix-2.18.8"; i686-linux = "/nix/store/idaxj9ji6ggpn1h47a35mf0c8ns4ma39-nix-2.24.9";
aarch64-linux = "/nix/store/sbyj0rb1wd314zfxpf834d0clvxrxmv3-nix-2.18.8"; aarch64-linux = "/nix/store/7b5q44l2p70bf6m6dprr8f0587ypwq1z-nix-2.24.9";
x86_64-darwin = "/nix/store/vsy1wl865md71qv177nchj0aj5p26pkl-nix-2.18.8"; riscv64-linux = "/nix/store/mgw3il1qk59750g5hbf02km79rgyx00y-nix-riscv64-unknown-linux-gnu-2.24.9";
aarch64-darwin = "/nix/store/54kqc2da3fjyjgzab4vaszxjmdvii6yk-nix-2.18.8"; x86_64-darwin = "/nix/store/rp8rc0pfgham7d7spj5s9syzb138dmmd-nix-2.24.9";
aarch64-darwin = "/nix/store/1n95r340s7p3vdwqh7m94q0a42crahqq-nix-2.24.9";
} }

View file

@ -296,7 +296,7 @@ in
sickbeard = 265; sickbeard = 265;
headphones = 266; headphones = 266;
# couchpotato = 267; # unused, removed 2022-01-01 # couchpotato = 267; # unused, removed 2022-01-01
gogs = 268; # gogs = 268; # unused, removed in 2024-10-12
#pdns-recursor = 269; # dynamically allocated as of 2020-20-18 #pdns-recursor = 269; # dynamically allocated as of 2020-20-18
#kresd = 270; # switched to "knot-resolver" with dynamic ID #kresd = 270; # switched to "knot-resolver" with dynamic ID
rpc = 271; rpc = 271;
@ -607,7 +607,7 @@ in
sickbeard = 265; sickbeard = 265;
headphones = 266; headphones = 266;
# couchpotato = 267; # unused, removed 2022-01-01 # couchpotato = 267; # unused, removed 2022-01-01
gogs = 268; # gogs = 268; # unused, removed in 2024-10-12
#kresd = 270; # switched to "knot-resolver" with dynamic ID #kresd = 270; # switched to "knot-resolver" with dynamic ID
#rpc = 271; # unused #rpc = 271; # unused
#geoip = 272; # unused #geoip = 272; # unused

View file

@ -215,7 +215,7 @@
./programs/iftop.nix ./programs/iftop.nix
./programs/i3lock.nix ./programs/i3lock.nix
./programs/iio-hyprland.nix ./programs/iio-hyprland.nix
./programs/immersed-vr.nix ./programs/immersed.nix
./programs/iotop.nix ./programs/iotop.nix
./programs/java.nix ./programs/java.nix
./programs/joycond-cemuhook.nix ./programs/joycond-cemuhook.nix
@ -759,7 +759,6 @@
./services/misc/gitlab.nix ./services/misc/gitlab.nix
./services/misc/gitolite.nix ./services/misc/gitolite.nix
./services/misc/gitweb.nix ./services/misc/gitweb.nix
./services/misc/gogs.nix
./services/misc/gollum.nix ./services/misc/gollum.nix
./services/misc/gotenberg.nix ./services/misc/gotenberg.nix
./services/misc/gpsd.nix ./services/misc/gpsd.nix
@ -966,6 +965,7 @@
./services/network-filesystems/rsyncd.nix ./services/network-filesystems/rsyncd.nix
./services/network-filesystems/samba-wsdd.nix ./services/network-filesystems/samba-wsdd.nix
./services/network-filesystems/samba.nix ./services/network-filesystems/samba.nix
./services/network-filesystems/saunafs.nix
./services/network-filesystems/tahoe.nix ./services/network-filesystems/tahoe.nix
./services/network-filesystems/u9fs.nix ./services/network-filesystems/u9fs.nix
./services/network-filesystems/webdav-server-rs.nix ./services/network-filesystems/webdav-server-rs.nix
@ -980,6 +980,7 @@
./services/networking/aria2.nix ./services/networking/aria2.nix
./services/networking/asterisk.nix ./services/networking/asterisk.nix
./services/networking/atftpd.nix ./services/networking/atftpd.nix
./services/networking/atticd.nix
./services/networking/autossh.nix ./services/networking/autossh.nix
./services/networking/avahi-daemon.nix ./services/networking/avahi-daemon.nix
./services/networking/babeld.nix ./services/networking/babeld.nix
@ -1276,6 +1277,7 @@
./services/networking/xray.nix ./services/networking/xray.nix
./services/networking/xrdp.nix ./services/networking/xrdp.nix
./services/networking/yggdrasil.nix ./services/networking/yggdrasil.nix
./services/networking/zapret.nix
./services/networking/zerobin.nix ./services/networking/zerobin.nix
./services/networking/zeronet.nix ./services/networking/zeronet.nix
./services/networking/zerotierone.nix ./services/networking/zerotierone.nix

View file

@ -1,256 +1,5 @@
{ config, lib, options, ... }: let lib = import ../../../lib;
let
keysDirectory = "/var/keys";
user = "builder";
keyType = "ed25519";
cfg = config.virtualisation.darwin-builder;
in in
lib.warnIf (lib.isInOldestRelease 2411)
{ "nixos/modules/profiles/macos-builder.nix has moved to nixos/modules/profiles/nix-builder-vm.nix; please update your NixOS imports."
imports = [ ./nix-builder-vm.nix
../virtualisation/qemu-vm.nix
# Avoid a dependency on stateVersion
{
disabledModules = [
../virtualisation/nixos-containers.nix
../services/x11/desktop-managers/xterm.nix
];
# swraid's default depends on stateVersion
config.boot.swraid.enable = false;
options.boot.isContainer = lib.mkOption { default = false; internal = true; };
}
];
options.virtualisation.darwin-builder = with lib; {
diskSize = mkOption {
default = 20 * 1024;
type = types.int;
example = 30720;
description = "The maximum disk space allocated to the runner in MB";
};
memorySize = mkOption {
default = 3 * 1024;
type = types.int;
example = 8192;
description = "The runner's memory in MB";
};
min-free = mkOption {
default = 1024 * 1024 * 1024;
type = types.int;
example = 1073741824;
description = ''
The threshold (in bytes) of free disk space left at which to
start garbage collection on the runner
'';
};
max-free = mkOption {
default = 3 * 1024 * 1024 * 1024;
type = types.int;
example = 3221225472;
description = ''
The threshold (in bytes) of free disk space left at which to
stop garbage collection on the runner
'';
};
workingDirectory = mkOption {
default = ".";
type = types.str;
example = "/var/lib/darwin-builder";
description = ''
The working directory to use to run the script. When running
as part of a flake will need to be set to a non read-only filesystem.
'';
};
hostPort = mkOption {
default = 31022;
type = types.int;
example = 22;
description = ''
The localhost host port to forward TCP to the guest port.
'';
};
};
config = {
# The builder is not intended to be used interactively
documentation.enable = false;
environment.etc = {
"ssh/ssh_host_ed25519_key" = {
mode = "0600";
source = ./keys/ssh_host_ed25519_key;
};
"ssh/ssh_host_ed25519_key.pub" = {
mode = "0644";
source = ./keys/ssh_host_ed25519_key.pub;
};
};
# DNS fails for QEMU user networking (SLiRP) on macOS. See:
#
# https://github.com/utmapp/UTM/issues/2353
#
# This works around that by using a public DNS server other than the DNS
# server that QEMU provides (normally 10.0.2.3)
networking.nameservers = [ "8.8.8.8" ];
# The linux builder is a lightweight VM for remote building; not evaluation.
nix.channel.enable = false;
# Deployment is by image.
# TODO system.switch.enable = false;?
system.disableInstallerTools = true;
nix.settings = {
auto-optimise-store = true;
min-free = cfg.min-free;
max-free = cfg.max-free;
trusted-users = [ user ];
};
services = {
getty.autologinUser = user;
openssh = {
enable = true;
authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
};
};
system.build.macos-builder-installer =
let
privateKey = "/etc/nix/${user}_${keyType}";
publicKey = "${privateKey}.pub";
# This installCredentials script is written so that it's as easy as
# possible for a user to audit before confirming the `sudo`
installCredentials = hostPkgs.writeShellScript "install-credentials" ''
set -euo pipefail
KEYS="''${1}"
INSTALL=${hostPkgs.coreutils}/bin/install
"''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
"''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
'';
hostPkgs = config.virtualisation.host.pkgs;
script = hostPkgs.writeShellScriptBin "create-builder" (
''
set -euo pipefail
'' +
# When running as non-interactively as part of a DarwinConfiguration the working directory
# must be set to a writeable directory.
(if cfg.workingDirectory != "." then ''
${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
cd "${cfg.workingDirectory}"
'' else "") + ''
KEYS="''${KEYS:-./keys}"
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
fi
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
fi
KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${lib.getExe config.system.build.vm}
'');
in
script.overrideAttrs (old: {
pos = __curPos; # sets meta.position to point here; see script binding above for package definition
meta = (old.meta or { }) // {
platforms = lib.platforms.darwin;
};
passthru = (old.passthru or { }) // {
# Let users in the repl inspect the config
nixosConfig = config;
nixosOptions = options;
};
});
system = {
# To prevent gratuitous rebuilds on each change to Nixpkgs
nixos.revision = null;
# to be updated by module maintainers, see nixpkgs#325610
stateVersion = "24.05";
};
users.users."${user}" = {
isNormalUser = true;
};
security.polkit.enable = true;
security.polkit.extraConfig = ''
polkit.addRule(function(action, subject) {
if (action.id === "org.freedesktop.login1.power-off" && subject.user === "${user}") {
return "yes";
} else {
return "no";
}
})
'';
virtualisation = {
diskSize = cfg.diskSize;
memorySize = cfg.memorySize;
forwardPorts = [
{ from = "host"; guest.port = 22; host.port = cfg.hostPort; }
];
# Disable graphics for the builder since users will likely want to run it
# non-interactively in the background.
graphics = false;
sharedDirectories.keys = {
source = "\"$KEYS\"";
target = keysDirectory;
};
# If we don't enable this option then the host will fail to delegate builds
# to the guest, because:
#
# - The host will lock the path to build
# - The host will delegate the build to the guest
# - The guest will attempt to lock the same path and fail because
# the lockfile on the host is visible on the guest
#
# Snapshotting the host's /nix/store as an image isolates the guest VM's
# /nix/store from the host's /nix/store, preventing this problem.
useNixStoreImage = true;
# Obviously the /nix/store needs to be writable on the guest in order for it
# to perform builds.
writableStore = true;
# This ensures that anything built on the guest isn't lost when the guest is
# restarted.
writableStoreUseTmpfs = false;
# Pass certificates from host to the guest otherwise when custom CA certificates
# are required we can't use the cached builder.
useHostCerts = true;
};
};
}

View file

@ -0,0 +1,284 @@
/*
This profile uses NixOS to create a remote builder VM to build Linux packages,
which can be used to build packages for Linux on other operating systems;
primarily macOS.
It contains both the relevant guest settings as well as an installer script
that manages it as a QEMU virtual machine on the host.
*/
{
config,
lib,
options,
...
}:
let
keysDirectory = "/var/keys";
user = "builder";
keyType = "ed25519";
cfg = config.virtualisation.darwin-builder;
in
{
imports = [
../virtualisation/qemu-vm.nix
# Avoid a dependency on stateVersion
{
disabledModules = [
../virtualisation/nixos-containers.nix
../services/x11/desktop-managers/xterm.nix
];
# swraid's default depends on stateVersion
config.boot.swraid.enable = false;
options.boot.isContainer = lib.mkOption {
default = false;
internal = true;
};
}
];
options.virtualisation.darwin-builder = with lib; {
diskSize = mkOption {
default = 20 * 1024;
type = types.int;
example = 30720;
description = "The maximum disk space allocated to the runner in MB";
};
memorySize = mkOption {
default = 3 * 1024;
type = types.int;
example = 8192;
description = "The runner's memory in MB";
};
min-free = mkOption {
default = 1024 * 1024 * 1024;
type = types.int;
example = 1073741824;
description = ''
The threshold (in bytes) of free disk space left at which to
start garbage collection on the runner
'';
};
max-free = mkOption {
default = 3 * 1024 * 1024 * 1024;
type = types.int;
example = 3221225472;
description = ''
The threshold (in bytes) of free disk space left at which to
stop garbage collection on the runner
'';
};
workingDirectory = mkOption {
default = ".";
type = types.str;
example = "/var/lib/darwin-builder";
description = ''
The working directory to use to run the script. When running
as part of a flake will need to be set to a non read-only filesystem.
'';
};
hostPort = mkOption {
default = 31022;
type = types.int;
example = 22;
description = ''
The localhost host port to forward TCP to the guest port.
'';
};
};
config = {
# The builder is not intended to be used interactively
documentation.enable = false;
environment.etc = {
"ssh/ssh_host_ed25519_key" = {
mode = "0600";
source = ./keys/ssh_host_ed25519_key;
};
"ssh/ssh_host_ed25519_key.pub" = {
mode = "0644";
source = ./keys/ssh_host_ed25519_key.pub;
};
};
# DNS fails for QEMU user networking (SLiRP) on macOS. See:
#
# https://github.com/utmapp/UTM/issues/2353
#
# This works around that by using a public DNS server other than the DNS
# server that QEMU provides (normally 10.0.2.3)
networking.nameservers = [ "8.8.8.8" ];
# The linux builder is a lightweight VM for remote building; not evaluation.
nix.channel.enable = false;
# Deployment is by image.
# TODO system.switch.enable = false;?
system.disableInstallerTools = true;
nix.settings = {
auto-optimise-store = true;
min-free = cfg.min-free;
max-free = cfg.max-free;
trusted-users = [ user ];
};
services = {
getty.autologinUser = user;
openssh = {
enable = true;
authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
};
};
system.build.macos-builder-installer =
let
privateKey = "/etc/nix/${user}_${keyType}";
publicKey = "${privateKey}.pub";
# This installCredentials script is written so that it's as easy as
# possible for a user to audit before confirming the `sudo`
installCredentials = hostPkgs.writeShellScript "install-credentials" ''
set -euo pipefail
KEYS="''${1}"
INSTALL=${hostPkgs.coreutils}/bin/install
"''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
"''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
'';
hostPkgs = config.virtualisation.host.pkgs;
script = hostPkgs.writeShellScriptBin "create-builder" (
''
set -euo pipefail
''
+
# When running as non-interactively as part of a DarwinConfiguration the working directory
# must be set to a writeable directory.
(
if cfg.workingDirectory != "." then
''
${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
cd "${cfg.workingDirectory}"
''
else
""
)
+ ''
KEYS="''${KEYS:-./keys}"
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
PUBLIC_KEY="''${PRIVATE_KEY}.pub"
if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
fi
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
fi
KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${lib.getExe config.system.build.vm}
''
);
in
script.overrideAttrs (old: {
pos = __curPos; # sets meta.position to point here; see script binding above for package definition
meta = (old.meta or { }) // {
platforms = lib.platforms.darwin;
};
passthru = (old.passthru or { }) // {
# Let users in the repl inspect the config
nixosConfig = config;
nixosOptions = options;
};
});
system = {
# To prevent gratuitous rebuilds on each change to Nixpkgs
nixos.revision = null;
# to be updated by module maintainers, see nixpkgs#325610
stateVersion = "24.05";
};
users.users."${user}" = {
isNormalUser = true;
};
security.polkit.enable = true;
security.polkit.extraConfig = ''
polkit.addRule(function(action, subject) {
if (action.id === "org.freedesktop.login1.power-off" && subject.user === "${user}") {
return "yes";
} else {
return "no";
}
})
'';
virtualisation = {
diskSize = cfg.diskSize;
memorySize = cfg.memorySize;
forwardPorts = [
{
from = "host";
guest.port = 22;
host.port = cfg.hostPort;
}
];
# Disable graphics for the builder since users will likely want to run it
# non-interactively in the background.
graphics = false;
sharedDirectories.keys = {
source = "\"$KEYS\"";
target = keysDirectory;
};
# If we don't enable this option then the host will fail to delegate builds
# to the guest, because:
#
# - The host will lock the path to build
# - The host will delegate the build to the guest
# - The guest will attempt to lock the same path and fail because
# the lockfile on the host is visible on the guest
#
# Snapshotting the host's /nix/store as an image isolates the guest VM's
# /nix/store from the host's /nix/store, preventing this problem.
useNixStoreImage = true;
# Obviously the /nix/store needs to be writable on the guest in order for it
# to perform builds.
writableStore = true;
# This ensures that anything built on the guest isn't lost when the guest is
# restarted.
writableStoreUseTmpfs = false;
# Pass certificates from host to the guest otherwise when custom CA certificates
# are required we can't use the cached builder.
useHostCerts = true;
};
};
}

View file

@ -2,7 +2,7 @@
let let
cfg = config.programs.gamemode; cfg = config.programs.gamemode;
settingsFormat = pkgs.formats.ini { }; settingsFormat = pkgs.formats.ini { listsAsDuplicateKeys = true; };
configFile = settingsFormat.generate "gamemode.ini" cfg.settings; configFile = settingsFormat.generate "gamemode.ini" cfg.settings;
in in
{ {

View file

@ -1,14 +1,20 @@
{ config, lib, pkgs, ... }: {
config,
lib,
pkgs,
...
}:
let let
cfg = config.programs.gpu-screen-recorder; cfg = config.programs.gpu-screen-recorder;
package = cfg.package.override { package = cfg.package.override {
inherit (config.security) wrapperDir; inherit (config.security) wrapperDir;
}; };
in { in
{
options = { options = {
programs.gpu-screen-recorder = { programs.gpu-screen-recorder = {
package = lib.mkPackageOption pkgs "gpu-screen-recorder" {}; package = lib.mkPackageOption pkgs "gpu-screen-recorder" { };
enable = lib.mkOption { enable = lib.mkOption {
type = lib.types.bool; type = lib.types.bool;
@ -28,12 +34,6 @@ in {
capabilities = "cap_sys_admin+ep"; capabilities = "cap_sys_admin+ep";
source = "${package}/bin/gsr-kms-server"; source = "${package}/bin/gsr-kms-server";
}; };
security.wrappers."gpu-screen-recorder" = {
owner = "root";
group = "root";
capabilities = "cap_sys_nice+ep";
source = "${package}/bin/gpu-screen-recorder";
};
}; };
meta.maintainers = with lib.maintainers; [ timschumi ]; meta.maintainers = with lib.maintainers; [ timschumi ];

View file

@ -1,34 +0,0 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.immersed-vr;
in
{
options = {
programs.immersed-vr = {
enable = lib.mkEnableOption "immersed-vr";
package = lib.mkPackageOption pkgs "immersed-vr" {};
};
};
config = lib.mkIf cfg.enable {
boot = {
kernelModules = [ "v4l2loopback" "snd-aloop" ];
extraModulePackages = [ config.boot.kernelPackages.v4l2loopback ];
extraModprobeConfig = ''
options v4l2loopback exclusive_caps=1 card_label="v4l2loopback Virtual Camera"
'';
};
environment.systemPackages = [ cfg.package ];
};
meta.maintainers = pkgs.immersed-vr.meta.maintainers;
}

View file

@ -0,0 +1,49 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.immersed;
in
{
imports = [
(lib.mkRenamedOptionModule
[
"programs"
"immersed-vr"
]
[
"programs"
"immersed"
]
)
];
options = {
programs.immersed = {
enable = lib.mkEnableOption "immersed";
package = lib.mkPackageOption pkgs "immersed" { };
};
};
config = lib.mkIf cfg.enable {
boot = {
kernelModules = [
"v4l2loopback"
"snd-aloop"
];
extraModulePackages = [ config.boot.kernelPackages.v4l2loopback ];
extraModprobeConfig = ''
options v4l2loopback exclusive_caps=1 card_label="v4l2loopback Virtual Camera"
'';
};
environment.systemPackages = [ cfg.package ];
};
meta.maintainers = pkgs.immersed.meta.maintainers;
}

View file

@ -31,7 +31,7 @@ in {
default = pkgs.steam; default = pkgs.steam;
defaultText = lib.literalExpression "pkgs.steam"; defaultText = lib.literalExpression "pkgs.steam";
example = lib.literalExpression '' example = lib.literalExpression ''
pkgs.steam-small.override { pkgs.steam.override {
extraEnv = { extraEnv = {
MANGOHUD = true; MANGOHUD = true;
OBS_VKCAPTURE = true; OBS_VKCAPTURE = true;

View file

@ -287,6 +287,18 @@ let
''; '';
}; };
rssh = lib.mkOption {
default = false;
type = lib.types.bool;
description = ''
If set, the calling user's SSH agent is used to authenticate
against the configured keys. This module works in a manner
similar to pam_ssh_agent_auth, but supports a wider range
of SSH key types, including those protected by security
keys (FIDO2).
'';
};
duoSecurity = { duoSecurity = {
enable = lib.mkOption { enable = lib.mkOption {
default = false; default = false;
@ -673,6 +685,7 @@ let
{ name = "ssh_agent_auth"; enable = config.security.pam.sshAgentAuth.enable && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = { { name = "ssh_agent_auth"; enable = config.security.pam.sshAgentAuth.enable && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = {
file = lib.concatStringsSep ":" config.security.pam.sshAgentAuth.authorizedKeysFiles; file = lib.concatStringsSep ":" config.security.pam.sshAgentAuth.authorizedKeysFiles;
}; } }; }
(let inherit (config.security.pam) rssh; in { name = "rssh"; enable = rssh.enable && cfg.rssh; control = "sufficient"; modulePath = "${pkgs.pam_rssh}/lib/libpam_rssh.so"; inherit (rssh) settings; })
(let p11 = config.security.pam.p11; in { name = "p11"; enable = cfg.p11Auth; control = p11.control; modulePath = "${pkgs.pam_p11}/lib/security/pam_p11.so"; args = [ (let p11 = config.security.pam.p11; in { name = "p11"; enable = cfg.p11Auth; control = p11.control; modulePath = "${pkgs.pam_p11}/lib/security/pam_p11.so"; args = [
"${pkgs.opensc}/lib/opensc-pkcs11.so" "${pkgs.opensc}/lib/opensc-pkcs11.so"
]; }) ]; })
@ -950,8 +963,9 @@ let
value.source = pkgs.writeText "${name}.pam" service.text; value.source = pkgs.writeText "${name}.pam" service.text;
}; };
optionalSudoConfigForSSHAgentAuth = lib.optionalString config.security.pam.sshAgentAuth.enable '' optionalSudoConfigForSSHAgentAuth = lib.optionalString
# Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so can do its magic. (config.security.pam.sshAgentAuth.enable || config.security.pam.rssh.enable) ''
# Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so and libpam_rssh.so can do their magic.
Defaults env_keep+=SSH_AUTH_SOCK Defaults env_keep+=SSH_AUTH_SOCK
''; '';
@ -1068,6 +1082,55 @@ in
}; };
}; };
security.pam.rssh = {
enable = lib.mkEnableOption "authenticating using a signature performed by the ssh-agent";
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = moduleSettingsType;
options = {
auth_key_file = lib.mkOption {
type = with lib.types; nullOr nonEmptyStr;
description = ''
Path to file with trusted public keys in OpenSSH's `authorized_keys` format. The following
variables are expanded to the respective PAM items:
- `service`: `PAM_SERVICE`, the service name,
- `user`: `PAM_USER`, the username of the entity under whose identity service will be given,
- `tty`: `PAM_TTY`, the terminal name,
- `rhost`: `PAM_RHOST`, the requesting hostname, and
- `ruser`: `PAM_RUSER`, the requesting entity.
These PAM items are explained in {manpage}`pam_get_item(3)`.
Variables may be specified as `$var`, `''${var}` or `''${var:defaultValue}`.
::: {.note}
Specifying user-writeable files here results in an insecure configuration: a malicious process
can then edit such an `authorized_keys` file and bypass the ssh-agent-based authentication.
This option is ignored if {option}`security.pam.rssh.settings.authorized_keys_command` is set.
If both this option and {option}`security.pam.rssh.settings.authorized_keys_command` are unset,
the keys will be read from `''${HOME}/.ssh/authorized_keys`, which should be considered
insecure.
'';
default = "/etc/ssh/authorized_keys.d/$ruser";
};
};
};
default = { };
description = ''
Options to pass to the pam_rssh module. Refer to
<https://github.com/z4yx/pam_rssh/blob/main/README.md#optional-arguments>
for supported values.
${moduleSettingsDescription}
'';
};
};
security.pam.enableOTPW = lib.mkEnableOption "the OTPW (one-time password) PAM module"; security.pam.enableOTPW = lib.mkEnableOption "the OTPW (one-time password) PAM module";
security.pam.dp9ik = { security.pam.dp9ik = {
@ -1512,16 +1575,30 @@ in
Did you forget to set `services.openssh.enable` ? Did you forget to set `services.openssh.enable` ?
''; '';
} }
{
assertion = with config.security.pam.rssh;
enable -> (settings.auth_key_file or null != null || settings.authorized_keys_command or null != null);
message = ''
security.pam.rssh.enable requires either security.pam.rssh.settings.auth_key_file or
security.pam.rssh.settings.authorized_keys_command to be set.
'';
}
]; ];
warnings = lib.optional warnings = lib.optional
(with lib; with config.security.pam.sshAgentAuth; (with config.security.pam.sshAgentAuth;
enable && lib.any (s: lib.hasPrefix "%h" s || lib.hasPrefix "~" s) authorizedKeysFiles) enable && lib.any (s: lib.hasPrefix "%h" s || lib.hasPrefix "~" s) authorizedKeysFiles)
''config.security.pam.sshAgentAuth.authorizedKeysFiles contains files in the user's home directory. ''config.security.pam.sshAgentAuth.authorizedKeysFiles contains files in the user's home directory.
Specifying user-writeable files there result in an insecure configuration: Specifying user-writeable files there result in an insecure configuration:
a malicious process can then edit such an authorized_keys file and bypass the ssh-agent-based authentication. a malicious process can then edit such an authorized_keys file and bypass the ssh-agent-based authentication.
See https://github.com/NixOS/nixpkgs/issues/31611 See https://github.com/NixOS/nixpkgs/issues/31611
'' ++ lib.optional
(with config.security.pam.rssh;
enable && settings.auth_key_file or null != null && settings.authorized_keys_command or null != null) ''
security.pam.rssh.settings.auth_key_file will be ignored as
security.pam.rssh.settings.authorized_keys_command has been specified.
Explictly set the former to null to silence this warning.
''; '';
environment.systemPackages = environment.systemPackages =

View file

@ -186,7 +186,8 @@ in {
http.docRoot = lib.mkOption { http.docRoot = lib.mkOption {
type = with lib.types; nullOr path; type = with lib.types; nullOr path;
default = null; default = pkgs.snapweb;
defaultText = lib.literalExpression "pkgs.snapweb";
description = '' description = ''
Path to serve from the HTTP servers root. Path to serve from the HTTP servers root.
''; '';

View file

@ -19,7 +19,9 @@ with lib;
]) ])
); );
config.systemd.services = flip mapAttrs' config.services.github-runners (name: cfg: config.systemd.services =
let enabledRunners = filterAttrs (_: cfg: cfg.enable) config.services.github-runners;
in (flip mapAttrs' enabledRunners (name: cfg:
let let
svcName = "github-runner-${name}"; svcName = "github-runner-${name}";
systemdDir = "github-runner/${name}"; systemdDir = "github-runner/${name}";
@ -296,5 +298,5 @@ with lib;
cfg.serviceOverrides cfg.serviceOverrides
]; ];
} }
); ));
} }

View file

@ -1,7 +1,4 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
cfg = config.services.mysql; cfg = config.services.mysql;
@ -9,7 +6,7 @@ let
isMariaDB = lib.getName cfg.package == lib.getName pkgs.mariadb; isMariaDB = lib.getName cfg.package == lib.getName pkgs.mariadb;
isOracle = lib.getName cfg.package == lib.getName pkgs.mysql80; isOracle = lib.getName cfg.package == lib.getName pkgs.mysql80;
# Oracle MySQL has supported "notify" service type since 8.0 # Oracle MySQL has supported "notify" service type since 8.0
hasNotify = isMariaDB || (isOracle && versionAtLeast cfg.package.version "8.0"); hasNotify = isMariaDB || (isOracle && lib.versionAtLeast cfg.package.version "8.0");
mysqldOptions = mysqldOptions =
"--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${cfg.package}"; "--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${cfg.package}";
@ -21,11 +18,11 @@ in
{ {
imports = [ imports = [
(mkRemovedOptionModule [ "services" "mysql" "pidDir" ] "Don't wait for pidfiles, describe dependencies through systemd.") (lib.mkRemovedOptionModule [ "services" "mysql" "pidDir" ] "Don't wait for pidfiles, describe dependencies through systemd.")
(mkRemovedOptionModule [ "services" "mysql" "rootPassword" ] "Use socket authentication or set the password outside of the nix store.") (lib.mkRemovedOptionModule [ "services" "mysql" "rootPassword" ] "Use socket authentication or set the password outside of the nix store.")
(mkRemovedOptionModule [ "services" "mysql" "extraOptions" ] "Use services.mysql.settings.mysqld instead.") (lib.mkRemovedOptionModule [ "services" "mysql" "extraOptions" ] "Use services.mysql.settings.mysqld instead.")
(mkRemovedOptionModule [ "services" "mysql" "bind" ] "Use services.mysql.settings.mysqld.bind-address instead.") (lib.mkRemovedOptionModule [ "services" "mysql" "bind" ] "Use services.mysql.settings.mysqld.bind-address instead.")
(mkRemovedOptionModule [ "services" "mysql" "port" ] "Use services.mysql.settings.mysqld.port instead.") (lib.mkRemovedOptionModule [ "services" "mysql" "port" ] "Use services.mysql.settings.mysqld.port instead.")
]; ];
###### interface ###### interface
@ -34,18 +31,18 @@ in
services.mysql = { services.mysql = {
enable = mkEnableOption "MySQL server"; enable = lib.mkEnableOption "MySQL server";
package = mkOption { package = lib.mkOption {
type = types.package; type = lib.types.package;
example = literalExpression "pkgs.mariadb"; example = lib.literalExpression "pkgs.mariadb";
description = '' description = ''
Which MySQL derivation to use. MariaDB packages are supported too. Which MySQL derivation to use. MariaDB packages are supported too.
''; '';
}; };
user = mkOption { user = lib.mkOption {
type = types.str; type = lib.types.str;
default = "mysql"; default = "mysql";
description = '' description = ''
User account under which MySQL runs. User account under which MySQL runs.
@ -58,8 +55,8 @@ in
''; '';
}; };
group = mkOption { group = lib.mkOption {
type = types.str; type = lib.types.str;
default = "mysql"; default = "mysql";
description = '' description = ''
Group account under which MySQL runs. Group account under which MySQL runs.
@ -72,8 +69,8 @@ in
''; '';
}; };
dataDir = mkOption { dataDir = lib.mkOption {
type = types.path; type = lib.types.path;
example = "/var/lib/mysql"; example = "/var/lib/mysql";
description = '' description = ''
The data directory for MySQL. The data directory for MySQL.
@ -85,8 +82,8 @@ in
''; '';
}; };
configFile = mkOption { configFile = lib.mkOption {
type = types.path; type = lib.types.path;
default = configFile; default = configFile;
defaultText = '' defaultText = ''
A configuration file automatically generated by NixOS. A configuration file automatically generated by NixOS.
@ -95,7 +92,7 @@ in
Override the configuration file used by MySQL. By default, Override the configuration file used by MySQL. By default,
NixOS generates one automatically from {option}`services.mysql.settings`. NixOS generates one automatically from {option}`services.mysql.settings`.
''; '';
example = literalExpression '' example = lib.literalExpression ''
pkgs.writeText "my.cnf" ''' pkgs.writeText "my.cnf" '''
[mysqld] [mysqld]
datadir = /var/lib/mysql datadir = /var/lib/mysql
@ -107,7 +104,7 @@ in
''; '';
}; };
settings = mkOption { settings = lib.mkOption {
type = format.type; type = format.type;
default = {}; default = {};
description = '' description = ''
@ -123,7 +120,7 @@ in
`1`, or `0`. See the provided example below. `1`, or `0`. See the provided example below.
::: :::
''; '';
example = literalExpression '' example = lib.literalExpression ''
{ {
mysqld = { mysqld = {
key_buffer_size = "6G"; key_buffer_size = "6G";
@ -139,17 +136,17 @@ in
''; '';
}; };
initialDatabases = mkOption { initialDatabases = lib.mkOption {
type = types.listOf (types.submodule { type = lib.types.listOf (lib.types.submodule {
options = { options = {
name = mkOption { name = lib.mkOption {
type = types.str; type = lib.types.str;
description = '' description = ''
The name of the database to create. The name of the database to create.
''; '';
}; };
schema = mkOption { schema = lib.mkOption {
type = types.nullOr types.path; type = lib.types.nullOr lib.types.path;
default = null; default = null;
description = '' description = ''
The initial schema of the database; if null (the default), The initial schema of the database; if null (the default),
@ -163,7 +160,7 @@ in
List of database names and their initial schemas that should be used to create databases on the first startup List of database names and their initial schemas that should be used to create databases on the first startup
of MySQL. The schema attribute is optional: If not specified, an empty database is created. of MySQL. The schema attribute is optional: If not specified, an empty database is created.
''; '';
example = literalExpression '' example = lib.literalExpression ''
[ [
{ name = "foodatabase"; schema = ./foodatabase.sql; } { name = "foodatabase"; schema = ./foodatabase.sql; }
{ name = "bardatabase"; } { name = "bardatabase"; }
@ -171,14 +168,14 @@ in
''; '';
}; };
initialScript = mkOption { initialScript = lib.mkOption {
type = types.nullOr types.path; type = lib.types.nullOr lib.types.path;
default = null; default = null;
description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database."; description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database.";
}; };
ensureDatabases = mkOption { ensureDatabases = lib.mkOption {
type = types.listOf types.str; type = lib.types.listOf lib.types.str;
default = []; default = [];
description = '' description = ''
Ensures that the specified databases exist. Ensures that the specified databases exist.
@ -192,17 +189,17 @@ in
]; ];
}; };
ensureUsers = mkOption { ensureUsers = lib.mkOption {
type = types.listOf (types.submodule { type = lib.types.listOf (lib.types.submodule {
options = { options = {
name = mkOption { name = lib.mkOption {
type = types.str; type = lib.types.str;
description = '' description = ''
Name of the user to ensure. Name of the user to ensure.
''; '';
}; };
ensurePermissions = mkOption { ensurePermissions = lib.mkOption {
type = types.attrsOf types.str; type = lib.types.attrsOf lib.types.str;
default = {}; default = {};
description = '' description = ''
Permissions to ensure for the user, specified as attribute set. Permissions to ensure for the user, specified as attribute set.
@ -216,7 +213,7 @@ in
[GRANT syntax](https://mariadb.com/kb/en/library/grant/). [GRANT syntax](https://mariadb.com/kb/en/library/grant/).
The attributes are used as `GRANT ''${attrName} ON ''${attrValue}`. The attributes are used as `GRANT ''${attrName} ON ''${attrValue}`.
''; '';
example = literalExpression '' example = lib.literalExpression ''
{ {
"database.*" = "ALL PRIVILEGES"; "database.*" = "ALL PRIVILEGES";
"*.*" = "SELECT, LOCK TABLES"; "*.*" = "SELECT, LOCK TABLES";
@ -234,7 +231,7 @@ in
option is changed. This means that users created and permissions assigned once through this option or option is changed. This means that users created and permissions assigned once through this option or
otherwise have to be removed manually. otherwise have to be removed manually.
''; '';
example = literalExpression '' example = lib.literalExpression ''
[ [
{ {
name = "nextcloud"; name = "nextcloud";
@ -253,40 +250,40 @@ in
}; };
replication = { replication = {
role = mkOption { role = lib.mkOption {
type = types.enum [ "master" "slave" "none" ]; type = lib.types.enum [ "master" "slave" "none" ];
default = "none"; default = "none";
description = "Role of the MySQL server instance."; description = "Role of the MySQL server instance.";
}; };
serverId = mkOption { serverId = lib.mkOption {
type = types.int; type = lib.types.int;
default = 1; default = 1;
description = "Id of the MySQL server instance. This number must be unique for each instance."; description = "Id of the MySQL server instance. This number must be unique for each instance.";
}; };
masterHost = mkOption { masterHost = lib.mkOption {
type = types.str; type = lib.types.str;
description = "Hostname of the MySQL master server."; description = "Hostname of the MySQL master server.";
}; };
slaveHost = mkOption { slaveHost = lib.mkOption {
type = types.str; type = lib.types.str;
description = "Hostname of the MySQL slave server."; description = "Hostname of the MySQL slave server.";
}; };
masterUser = mkOption { masterUser = lib.mkOption {
type = types.str; type = lib.types.str;
description = "Username of the MySQL replication user."; description = "Username of the MySQL replication user.";
}; };
masterPassword = mkOption { masterPassword = lib.mkOption {
type = types.str; type = lib.types.str;
description = "Password of the MySQL replication user."; description = "Password of the MySQL replication user.";
}; };
masterPort = mkOption { masterPort = lib.mkOption {
type = types.port; type = lib.types.port;
default = 3306; default = 3306;
description = "Port number on which the MySQL master server runs."; description = "Port number on which the MySQL master server runs.";
}; };
@ -298,30 +295,30 @@ in
###### implementation ###### implementation
config = mkIf cfg.enable { config = lib.mkIf cfg.enable {
services.mysql.dataDir = services.mysql.dataDir =
mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/mysql" lib.mkDefault (if lib.versionAtLeast config.system.stateVersion "17.09" then "/var/lib/mysql"
else "/var/mysql"); else "/var/mysql");
services.mysql.settings.mysqld = mkMerge [ services.mysql.settings.mysqld = lib.mkMerge [
{ {
datadir = cfg.dataDir; datadir = cfg.dataDir;
port = mkDefault 3306; port = lib.mkDefault 3306;
} }
(mkIf (cfg.replication.role == "master" || cfg.replication.role == "slave") { (lib.mkIf (cfg.replication.role == "master" || cfg.replication.role == "slave") {
log-bin = "mysql-bin-${toString cfg.replication.serverId}"; log-bin = "mysql-bin-${toString cfg.replication.serverId}";
log-bin-index = "mysql-bin-${toString cfg.replication.serverId}.index"; log-bin-index = "mysql-bin-${toString cfg.replication.serverId}.index";
relay-log = "mysql-relay-bin"; relay-log = "mysql-relay-bin";
server-id = cfg.replication.serverId; server-id = cfg.replication.serverId;
binlog-ignore-db = [ "information_schema" "performance_schema" "mysql" ]; binlog-ignore-db = [ "information_schema" "performance_schema" "mysql" ];
}) })
(mkIf (!isMariaDB) { (lib.mkIf (!isMariaDB) {
plugin-load-add = "auth_socket.so"; plugin-load-add = "auth_socket.so";
}) })
]; ];
users.users = optionalAttrs (cfg.user == "mysql") { users.users = lib.optionalAttrs (cfg.user == "mysql") {
mysql = { mysql = {
description = "MySQL server user"; description = "MySQL server user";
group = cfg.group; group = cfg.group;
@ -329,7 +326,7 @@ in
}; };
}; };
users.groups = optionalAttrs (cfg.group == "mysql") { users.groups = lib.optionalAttrs (cfg.group == "mysql") {
mysql.gid = config.ids.gids.mysql; mysql.gid = config.ids.gids.mysql;
}; };
@ -380,7 +377,7 @@ in
# The super user account to use on *first* run of MySQL server # The super user account to use on *first* run of MySQL server
superUser = if isMariaDB then cfg.user else "root"; superUser = if isMariaDB then cfg.user else "root";
in '' in ''
${optionalString (!hasNotify) '' ${lib.optionalString (!hasNotify) ''
# Wait until the MySQL server is available for use # Wait until the MySQL server is available for use
while [ ! -e /run/mysqld/mysqld.sock ] while [ ! -e /run/mysqld/mysqld.sock ]
do do
@ -397,13 +394,13 @@ in
echo "GRANT ALL PRIVILEGES ON *.* TO '${cfg.user}'@'localhost' WITH GRANT OPTION;" echo "GRANT ALL PRIVILEGES ON *.* TO '${cfg.user}'@'localhost' WITH GRANT OPTION;"
) | ${cfg.package}/bin/mysql -u ${superUser} -N ) | ${cfg.package}/bin/mysql -u ${superUser} -N
${concatMapStrings (database: '' ${lib.concatMapStrings (database: ''
# Create initial databases # Create initial databases
if ! test -e "${cfg.dataDir}/${database.name}"; then if ! test -e "${cfg.dataDir}/${database.name}"; then
echo "Creating initial database: ${database.name}" echo "Creating initial database: ${database.name}"
( echo 'create database `${database.name}`;' ( echo 'create database `${database.name}`;'
${optionalString (database.schema != null) '' ${lib.optionalString (database.schema != null) ''
echo 'use `${database.name}`;' echo 'use `${database.name}`;'
# TODO: this silently falls through if database.schema does not exist, # TODO: this silently falls through if database.schema does not exist,
@ -420,7 +417,7 @@ in
fi fi
'') cfg.initialDatabases} '') cfg.initialDatabases}
${optionalString (cfg.replication.role == "master") ${lib.optionalString (cfg.replication.role == "master")
'' ''
# Set up the replication master # Set up the replication master
@ -431,7 +428,7 @@ in
) | ${cfg.package}/bin/mysql -u ${superUser} -N ) | ${cfg.package}/bin/mysql -u ${superUser} -N
''} ''}
${optionalString (cfg.replication.role == "slave") ${lib.optionalString (cfg.replication.role == "slave")
'' ''
# Set up the replication slave # Set up the replication slave
@ -441,7 +438,7 @@ in
) | ${cfg.package}/bin/mysql -u ${superUser} -N ) | ${cfg.package}/bin/mysql -u ${superUser} -N
''} ''}
${optionalString (cfg.initialScript != null) ${lib.optionalString (cfg.initialScript != null)
'' ''
# Execute initial script # Execute initial script
# using toString to avoid copying the file to nix store if given as path instead of string, # using toString to avoid copying the file to nix store if given as path instead of string,
@ -452,25 +449,25 @@ in
rm ${cfg.dataDir}/mysql_init rm ${cfg.dataDir}/mysql_init
fi fi
${optionalString (cfg.ensureDatabases != []) '' ${lib.optionalString (cfg.ensureDatabases != []) ''
( (
${concatMapStrings (database: '' ${lib.concatMapStrings (database: ''
echo "CREATE DATABASE IF NOT EXISTS \`${database}\`;" echo "CREATE DATABASE IF NOT EXISTS \`${database}\`;"
'') cfg.ensureDatabases} '') cfg.ensureDatabases}
) | ${cfg.package}/bin/mysql -N ) | ${cfg.package}/bin/mysql -N
''} ''}
${concatMapStrings (user: ${lib.concatMapStrings (user:
'' ''
( echo "CREATE USER IF NOT EXISTS '${user.name}'@'localhost' IDENTIFIED WITH ${if isMariaDB then "unix_socket" else "auth_socket"};" ( echo "CREATE USER IF NOT EXISTS '${user.name}'@'localhost' IDENTIFIED WITH ${if isMariaDB then "unix_socket" else "auth_socket"};"
${concatStringsSep "\n" (mapAttrsToList (database: permission: '' ${lib.concatStringsSep "\n" (lib.mapAttrsToList (database: permission: ''
echo "GRANT ${permission} ON ${database} TO '${user.name}'@'localhost';" echo "GRANT ${permission} ON ${database} TO '${user.name}'@'localhost';"
'') user.ensurePermissions)} '') user.ensurePermissions)}
) | ${cfg.package}/bin/mysql -N ) | ${cfg.package}/bin/mysql -N
'') cfg.ensureUsers} '') cfg.ensureUsers}
''; '';
serviceConfig = mkMerge [ serviceConfig = lib.mkMerge [
{ {
Type = if hasNotify then "notify" else "simple"; Type = if hasNotify then "notify" else "simple";
Restart = "on-abort"; Restart = "on-abort";
@ -506,7 +503,7 @@ in
# System Call Filtering # System Call Filtering
SystemCallArchitectures = "native"; SystemCallArchitectures = "native";
} }
(mkIf (cfg.dataDir == "/var/lib/mysql") { (lib.mkIf (cfg.dataDir == "/var/lib/mysql") {
StateDirectory = "mysql"; StateDirectory = "mysql";
StateDirectoryMode = "0700"; StateDirectoryMode = "0700";
}) })

View file

@ -72,7 +72,28 @@ in {
defaultText = literalExpression '' defaultText = literalExpression ''
if name == "" then "redis" else "redis-''${name}" if name == "" then "redis" else "redis-''${name}"
''; '';
description = "The username and groupname for redis-server."; description = ''
User account under which this instance of redis-server runs.
::: {.note}
If left as the default value this user will automatically be
created on system activation, otherwise you are responsible for
ensuring the user exists before the redis service starts.
'';
};
group = mkOption {
type = types.str;
default = config.user;
defaultText = literalExpression "config.user";
description = ''
Group account under which this instance of redis-server runs.
::: {.note}
If left as the default value this group will automatically be
created on system activation, otherwise you are responsible for
ensuring the group exists before the redis service starts.
'';
}; };
port = mkOption { port = mkOption {
@ -337,7 +358,7 @@ in {
redisConfStore = redisConfig conf.settings; redisConfStore = redisConfig conf.settings;
in '' in ''
touch "${redisConfVar}" "${redisConfRun}" touch "${redisConfVar}" "${redisConfRun}"
chown '${conf.user}' "${redisConfVar}" "${redisConfRun}" chown '${conf.user}':'${conf.group}' "${redisConfVar}" "${redisConfRun}"
chmod 0600 "${redisConfVar}" "${redisConfRun}" chmod 0600 "${redisConfVar}" "${redisConfRun}"
if [ ! -s ${redisConfVar} ]; then if [ ! -s ${redisConfVar} ]; then
echo 'include "${redisConfRun}"' > "${redisConfVar}" echo 'include "${redisConfRun}"' > "${redisConfVar}"
@ -353,7 +374,7 @@ in {
Type = "notify"; Type = "notify";
# User and group # User and group
User = conf.user; User = conf.user;
Group = conf.user; Group = conf.group;
# Runtime directory and mode # Runtime directory and mode
RuntimeDirectory = redisName name; RuntimeDirectory = redisName name;
RuntimeDirectoryMode = "0750"; RuntimeDirectoryMode = "0750";

View file

@ -16,7 +16,6 @@ in {
libayatana-common libayatana-common
ubports-click ubports-click
]) ++ (with pkgs.lomiri; [ ]) ++ (with pkgs.lomiri; [
content-hub
hfd-service hfd-service
history-service history-service
libusermetrics libusermetrics
@ -24,6 +23,7 @@ in {
lomiri-calculator-app lomiri-calculator-app
lomiri-camera-app lomiri-camera-app
lomiri-clock-app lomiri-clock-app
lomiri-content-hub
lomiri-docviewer-app lomiri-docviewer-app
lomiri-download-manager lomiri-download-manager
lomiri-filemanager-app lomiri-filemanager-app
@ -129,7 +129,7 @@ in {
environment.pathsToLink = [ environment.pathsToLink = [
# Configs for inter-app data exchange system # Configs for inter-app data exchange system
"/share/content-hub/peers" "/share/lomiri-content-hub/peers"
# Configs for inter-app URL requests # Configs for inter-app URL requests
"/share/lomiri-url-dispatcher/urls" "/share/lomiri-url-dispatcher/urls"
# Splash screens & other images for desktop apps launched via lomiri-app-launch # Splash screens & other images for desktop apps launched via lomiri-app-launch
@ -194,10 +194,6 @@ in {
}; };
users.groups.usermetrics = { }; users.groups.usermetrics = { };
# TODO content-hub cannot pass files between applications without asking AA for permissions. And alot of the Lomiri stack is designed with AA availability in mind. This might be a requirement to be closer to upstream?
# But content-hub currently fails to pass files between applications even with AA enabled, and we can get away without AA in many places. Let's see how this develops before requiring this for good.
# security.apparmor.enable = true;
}; };
meta.maintainers = lib.teams.lomiri.members; meta.maintainers = lib.teams.lomiri.members;

View file

@ -249,10 +249,11 @@ in {
xdg.portal.enable = true; xdg.portal.enable = true;
xdg.portal.extraPortals = [ xdg.portal.extraPortals = [
kdePackages.kwallet
kdePackages.xdg-desktop-portal-kde kdePackages.xdg-desktop-portal-kde
pkgs.xdg-desktop-portal-gtk pkgs.xdg-desktop-portal-gtk
]; ];
xdg.portal.configPackages = mkDefault [kdePackages.xdg-desktop-portal-kde]; xdg.portal.configPackages = mkDefault [kdePackages.plasma-workspace];
services.pipewire.enable = mkDefault true; services.pipewire.enable = mkDefault true;
# Enable screen reader by default # Enable screen reader by default

View file

@ -95,8 +95,8 @@ in {
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = config.boot.kernelPackages.system76-scheduler; default = pkgs.system76-scheduler;
defaultText = literalExpression "config.boot.kernelPackages.system76-scheduler"; defaultText = literalExpression "pkgs.system76-scheduler";
description = "Which System76-Scheduler package to use."; description = "Which System76-Scheduler package to use.";
}; };
@ -252,7 +252,7 @@ in {
# No custom settings: just use stock configuration with a fix for Pipewire # No custom settings: just use stock configuration with a fix for Pipewire
"system76-scheduler/config.kdl".source = "${cfg.package}/data/config.kdl"; "system76-scheduler/config.kdl".source = "${cfg.package}/data/config.kdl";
"system76-scheduler/process-scheduler/00-dist.kdl".source = "${cfg.package}/data/pop_os.kdl"; "system76-scheduler/process-scheduler/00-dist.kdl".source = "${cfg.package}/data/pop_os.kdl";
"system76-scheduler/process-scheduler/01-fix-pipewire-paths.kdl".source = ../../../../pkgs/os-specific/linux/system76-scheduler/01-fix-pipewire-paths.kdl; "system76-scheduler/process-scheduler/01-fix-pipewire-paths.kdl".source = ../../../../pkgs/by-name/sy/system76-scheduler/01-fix-pipewire-paths.kdl;
}) })
(let (let

View file

@ -185,7 +185,7 @@ in {
}) })
(lib.mkIf (cfg.enable && (cfg.user == "jupyter")) { (lib.mkIf (cfg.enable && (cfg.user == "jupyter")) {
users.extraUsers.jupyter = { users.extraUsers.jupyter = {
extraGroups = [ cfg.group ]; inherit (cfg) group;
home = "/var/lib/jupyter"; home = "/var/lib/jupyter";
createHome = true; createHome = true;
isSystemUser = true; isSystemUser = true;

View file

@ -77,50 +77,63 @@
}; };
config = { config = lib.mkIf config.hardware.nvidia-container-toolkit.enable {
virtualisation.docker = {
daemon.settings = lib.mkIf
(lib.versionAtLeast config.virtualisation.docker.package.version "25") {
features.cdi = true;
};
virtualisation.docker.daemon.settings = lib.mkIf rootless.daemon.settings = lib.mkIf
(config.hardware.nvidia-container-toolkit.enable && (config.virtualisation.docker.rootless.enable &&
(lib.versionAtLeast config.virtualisation.docker.package.version "25")) { (lib.versionAtLeast config.virtualisation.docker.package.version "25")) {
features.cdi = true; features.cdi = true;
}; };
};
hardware.nvidia-container-toolkit.mounts = let hardware = {
nvidia-driver = config.hardware.nvidia.package; graphics.enable = lib.mkIf (!config.hardware.nvidia.datacenter.enable) true;
in (lib.mkMerge [
[{ hostPath = pkgs.addDriverRunpath.driverLink;
containerPath = pkgs.addDriverRunpath.driverLink; }
{ hostPath = "${lib.getLib nvidia-driver}/etc";
containerPath = "${lib.getLib nvidia-driver}/etc"; }
{ hostPath = "${lib.getLib nvidia-driver}/share";
containerPath = "${lib.getLib nvidia-driver}/share"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib";
containerPath = "${lib.getLib pkgs.glibc}/lib"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib64";
containerPath = "${lib.getLib pkgs.glibc}/lib64"; }]
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-executables
[{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
containerPath = "/usr/bin/nvidia-debugdump"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
containerPath = "/usr/bin/nvidia-powerd"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-smi";
containerPath = "/usr/bin/nvidia-smi"; }])
# nvidia-docker 1.0 uses /usr/local/nvidia/lib{,64}
# e.g.
# - https://gitlab.com/nvidia/container-images/cuda/-/blob/e3ff10eab3a1424fe394899df0e0f8ca5a410f0f/dist/12.3.1/ubi9/base/Dockerfile#L44
# - https://github.com/NVIDIA/nvidia-docker/blob/01d2c9436620d7dde4672e414698afe6da4a282f/src/nvidia/volumes.go#L104-L173
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-docker-1-directories
[{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib"; }
{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib64"; }])
]);
systemd.services.nvidia-container-toolkit-cdi-generator = lib.mkIf config.hardware.nvidia-container-toolkit.enable { nvidia-container-toolkit.mounts = let
nvidia-driver = config.hardware.nvidia.package;
in (lib.mkMerge [
[{ hostPath = pkgs.addDriverRunpath.driverLink;
containerPath = pkgs.addDriverRunpath.driverLink; }
{ hostPath = "${lib.getLib nvidia-driver}/etc";
containerPath = "${lib.getLib nvidia-driver}/etc"; }
{ hostPath = "${lib.getLib nvidia-driver}/share";
containerPath = "${lib.getLib nvidia-driver}/share"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib";
containerPath = "${lib.getLib pkgs.glibc}/lib"; }
{ hostPath = "${lib.getLib pkgs.glibc}/lib64";
containerPath = "${lib.getLib pkgs.glibc}/lib64"; }]
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-executables
[{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-control";
containerPath = "/usr/bin/nvidia-cuda-mps-control"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-cuda-mps-server";
containerPath = "/usr/bin/nvidia-cuda-mps-server"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-debugdump";
containerPath = "/usr/bin/nvidia-debugdump"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-powerd";
containerPath = "/usr/bin/nvidia-powerd"; }
{ hostPath = lib.getExe' nvidia-driver "nvidia-smi";
containerPath = "/usr/bin/nvidia-smi"; }])
# nvidia-docker 1.0 uses /usr/local/nvidia/lib{,64}
# e.g.
# - https://gitlab.com/nvidia/container-images/cuda/-/blob/e3ff10eab3a1424fe394899df0e0f8ca5a410f0f/dist/12.3.1/ubi9/base/Dockerfile#L44
# - https://github.com/NVIDIA/nvidia-docker/blob/01d2c9436620d7dde4672e414698afe6da4a282f/src/nvidia/volumes.go#L104-L173
(lib.mkIf config.hardware.nvidia-container-toolkit.mount-nvidia-docker-1-directories
[{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib"; }
{ hostPath = "${lib.getLib nvidia-driver}/lib";
containerPath = "/usr/local/nvidia/lib64"; }])
]);
};
services.xserver.videoDrivers = lib.mkIf
(!config.hardware.nvidia.datacenter.enable) [ "nvidia" ];
systemd.services.nvidia-container-toolkit-cdi-generator = {
description = "Container Device Interface (CDI) for Nvidia generator"; description = "Container Device Interface (CDI) for Nvidia generator";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ]; after = [ "systemd-udev-settle.service" ];

View file

@ -82,6 +82,7 @@ in {
) { ) {
webadmin = lib.mkDefault "file://${cfg.package.webadmin}/webadmin.zip"; webadmin = lib.mkDefault "file://${cfg.package.webadmin}/webadmin.zip";
}; };
webadmin.path = "/var/cache/stalwart-mail";
}; };
# This service stores a potentially large amount of data. # This service stores a potentially large amount of data.
@ -117,6 +118,7 @@ in {
StandardOutput = "journal"; StandardOutput = "journal";
StandardError = "journal"; StandardError = "journal";
CacheDirectory = "stalwart-mail";
StateDirectory = "stalwart-mail"; StateDirectory = "stalwart-mail";
# Bind standard privileged ports # Bind standard privileged ports

View file

@ -1,271 +0,0 @@
{ config, lib, options, pkgs, ... }:
let
cfg = config.services.gogs;
opt = options.services.gogs;
configFile = pkgs.writeText "app.ini" ''
BRAND_NAME = ${cfg.appName}
RUN_USER = ${cfg.user}
RUN_MODE = prod
[database]
TYPE = ${cfg.database.type}
HOST = ${cfg.database.host}:${toString cfg.database.port}
NAME = ${cfg.database.name}
USER = ${cfg.database.user}
PASSWORD = #dbpass#
PATH = ${cfg.database.path}
[repository]
ROOT = ${cfg.repositoryRoot}
[server]
DOMAIN = ${cfg.domain}
HTTP_ADDR = ${cfg.httpAddress}
HTTP_PORT = ${toString cfg.httpPort}
EXTERNAL_URL = ${cfg.rootUrl}
[session]
COOKIE_NAME = session
COOKIE_SECURE = ${lib.boolToString cfg.cookieSecure}
[security]
SECRET_KEY = #secretkey#
INSTALL_LOCK = true
[log]
ROOT_PATH = ${cfg.stateDir}/log
${cfg.extraConfig}
'';
in
{
options = {
services.gogs = {
enable = lib.mkOption {
default = false;
type = lib.types.bool;
description = "Enable Go Git Service.";
};
useWizard = lib.mkOption {
default = false;
type = lib.types.bool;
description = "Do not generate a configuration and use Gogs' installation wizard instead. The first registered user will be administrator.";
};
stateDir = lib.mkOption {
default = "/var/lib/gogs";
type = lib.types.str;
description = "Gogs data directory.";
};
user = lib.mkOption {
type = lib.types.str;
default = "gogs";
description = "User account under which Gogs runs.";
};
group = lib.mkOption {
type = lib.types.str;
default = "gogs";
description = "Group account under which Gogs runs.";
};
database = {
type = lib.mkOption {
type = lib.types.enum [ "sqlite3" "mysql" "postgres" ];
example = "mysql";
default = "sqlite3";
description = "Database engine to use.";
};
host = lib.mkOption {
type = lib.types.str;
default = "127.0.0.1";
description = "Database host address.";
};
port = lib.mkOption {
type = lib.types.port;
default = 3306;
description = "Database host port.";
};
name = lib.mkOption {
type = lib.types.str;
default = "gogs";
description = "Database name.";
};
user = lib.mkOption {
type = lib.types.str;
default = "gogs";
description = "Database user.";
};
password = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
The password corresponding to {option}`database.user`.
Warning: this is stored in cleartext in the Nix store!
Use {option}`database.passwordFile` instead.
'';
};
passwordFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/gogs-dbpassword";
description = ''
A file containing the password corresponding to
{option}`database.user`.
'';
};
path = lib.mkOption {
type = lib.types.str;
default = "${cfg.stateDir}/data/gogs.db";
defaultText = lib.literalExpression ''"''${config.${opt.stateDir}}/data/gogs.db"'';
description = "Path to the sqlite3 database file.";
};
};
appName = lib.mkOption {
type = lib.types.str;
default = "Gogs: Go Git Service";
description = "Application name.";
};
repositoryRoot = lib.mkOption {
type = lib.types.str;
default = "${cfg.stateDir}/repositories";
defaultText = lib.literalExpression ''"''${config.${opt.stateDir}}/repositories"'';
description = "Path to the git repositories.";
};
domain = lib.mkOption {
type = lib.types.str;
default = "localhost";
description = "Domain name of your server.";
};
rootUrl = lib.mkOption {
type = lib.types.str;
default = "http://localhost:3000/";
description = "Full public URL of Gogs server.";
};
httpAddress = lib.mkOption {
type = lib.types.str;
default = "0.0.0.0";
description = "HTTP listen address.";
};
httpPort = lib.mkOption {
type = lib.types.port;
default = 3000;
description = "HTTP listen port.";
};
cookieSecure = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
Marks session cookies as "secure" as a hint for browsers to only send
them via HTTPS. This option is recommend, if Gogs is being served over HTTPS.
'';
};
extraConfig = lib.mkOption {
type = lib.types.str;
default = "";
description = "Configuration lines appended to the generated Gogs configuration file.";
};
};
};
config = lib.mkIf cfg.enable {
systemd.services.gogs = {
description = "Gogs (Go Git Service)";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.gogs ];
preStart = let
runConfig = "${cfg.stateDir}/custom/conf/app.ini";
secretKey = "${cfg.stateDir}/custom/conf/secret_key";
in ''
mkdir -p ${cfg.stateDir}
# copy custom configuration and generate a random secret key if needed
${lib.optionalString (cfg.useWizard == false) ''
mkdir -p ${cfg.stateDir}/custom/conf
cp -f ${configFile} ${runConfig}
if [ ! -e ${secretKey} ]; then
head -c 16 /dev/urandom | base64 > ${secretKey}
fi
KEY=$(head -n1 ${secretKey})
DBPASS=$(head -n1 ${cfg.database.passwordFile})
sed -e "s,#secretkey#,$KEY,g" \
-e "s,#dbpass#,$DBPASS,g" \
-i ${runConfig}
''}
mkdir -p ${cfg.repositoryRoot}
# update all hooks' binary paths
HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 4 -type f -wholename "*git/hooks/*")
if [ "$HOOKS" ]
then
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gogs,${pkgs.gogs}/bin/gogs,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
fi
'';
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.stateDir;
ExecStart = "${pkgs.gogs}/bin/gogs web";
Restart = "always";
UMask = "0027";
};
environment = {
USER = cfg.user;
HOME = cfg.stateDir;
GOGS_WORK_DIR = cfg.stateDir;
};
};
users = lib.mkIf (cfg.user == "gogs") {
users.gogs = {
description = "Go Git Service";
uid = config.ids.uids.gogs;
group = "gogs";
home = cfg.stateDir;
createHome = true;
shell = pkgs.bash;
};
groups.gogs.gid = config.ids.gids.gogs;
};
warnings = lib.optional (cfg.database.password != "")
''config.services.gogs.database.password will be stored as plaintext
in the Nix store. Use database.passwordFile instead.'';
# Create database passwordFile default when password is configured.
services.gogs.database.passwordFile =
(lib.mkDefault (toString (pkgs.writeTextFile {
name = "gogs-database-password";
text = cfg.database.password;
})));
};
}

View file

@ -436,6 +436,30 @@ in
TimeoutSec = "300"; TimeoutSec = "300";
WorkingDirectory = "${cfg.package}/share/redmine"; WorkingDirectory = "${cfg.package}/share/redmine";
ExecStart="${bundle} exec rails server -u webrick -e production -b ${toString cfg.address} -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'"; ExecStart="${bundle} exec rails server -u webrick -e production -b ${toString cfg.address} -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'";
AmbientCapabilities = "";
CapabilityBoundingSet = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "noaccess";
ProtectSystem = "full";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
UMask = 027;
}; };
}; };

View file

@ -309,8 +309,10 @@ let
defaultText = lib.literalMD '' defaultText = lib.literalMD ''
{ {
MINSUPPLIES = 1; MINSUPPLIES = 1;
RUN_AS_USER = "root"; MONITOR = <generated from config.power.ups.upsmon.monitor>
NOTIFYCMD = "''${pkgs.nut}/bin/upssched"; NOTIFYCMD = "''${pkgs.nut}/bin/upssched";
POWERDOWNFLAG = "/run/killpower";
RUN_AS_USER = "root";
SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now"; SHUTDOWNCMD = "''${pkgs.systemd}/bin/shutdown now";
} }
''; '';
@ -330,11 +332,12 @@ let
config = { config = {
enable = lib.mkDefault (lib.elem cfg.mode [ "standalone" "netserver" "netclient" ]); enable = lib.mkDefault (lib.elem cfg.mode [ "standalone" "netserver" "netclient" ]);
settings = { settings = {
RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
MINSUPPLIES = lib.mkDefault 1; MINSUPPLIES = lib.mkDefault 1;
NOTIFYCMD = lib.mkDefault "${pkgs.nut}/bin/upssched";
SHUTDOWNCMD = lib.mkDefault "${pkgs.systemd}/bin/shutdown now";
MONITOR = lib.flip lib.mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]); MONITOR = lib.flip lib.mapAttrsToList cfg.upsmon.monitor (name: monitor: with monitor; [ system powerValue user "\"@upsmon_password_${name}@\"" type ]);
NOTIFYCMD = lib.mkDefault "${pkgs.nut}/bin/upssched";
POWERDOWNFLAG = lib.mkDefault "/run/killpower";
RUN_AS_USER = "root"; # TODO: replace 'root' by another username.
SHUTDOWNCMD = lib.mkDefault "${pkgs.systemd}/bin/shutdown now";
}; };
}; };
}; };
@ -574,6 +577,24 @@ in
]; ];
}; };
systemd.services.ups-killpower = lib.mkIf (cfg.upsmon.settings.POWERDOWNFLAG != null) {
enable = cfg.upsd.enable;
description = "UPS Kill Power";
wantedBy = [ "shutdown.target" ];
after = [ "shutdown.target" ];
before = [ "final.target" ];
unitConfig = {
ConditionPathExists = cfg.upsmon.settings.POWERDOWNFLAG;
DefaultDependencies = "no";
};
environment = envVars;
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.nut}/bin/upsdrvctl shutdown";
Slice = "system-ups.slice";
};
};
environment.etc = { environment.etc = {
"nut/nut.conf".source = pkgs.writeText "nut.conf" "nut/nut.conf".source = pkgs.writeText "nut.conf"
'' ''

View file

@ -0,0 +1,287 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.saunafs;
settingsFormat =
let
listSep = " ";
allowedTypes = with lib.types; [
bool
int
float
str
];
valueToString =
val:
if lib.isList val then
lib.concatStringsSep listSep (map (x: valueToString x) val)
else if lib.isBool val then
(if val then "1" else "0")
else
toString val;
in
{
type =
let
valueType =
lib.types.oneOf (
[
(lib.types.listOf valueType)
]
++ allowedTypes
)
// {
description = "Flat key-value file";
};
in
lib.types.attrsOf valueType;
generate =
name: value:
pkgs.writeText name (
lib.concatStringsSep "\n" (lib.mapAttrsToList (key: val: "${key} = ${valueToString val}") value)
);
};
initTool = pkgs.writeShellScriptBin "sfsmaster-init" ''
if [ ! -e ${cfg.master.settings.DATA_PATH}/metadata.sfs ]; then
cp --update=none ${pkgs.saunafs}/var/lib/saunafs/metadata.sfs.empty ${cfg.master.settings.DATA_PATH}/metadata.sfs
chmod +w ${cfg.master.settings.DATA_PATH}/metadata.sfs
fi
'';
# master config file
masterCfg = settingsFormat.generate "sfsmaster.cfg" cfg.master.settings;
# metalogger config file
metaloggerCfg = settingsFormat.generate "sfsmetalogger.cfg" cfg.metalogger.settings;
# chunkserver config file
chunkserverCfg = settingsFormat.generate "sfschunkserver.cfg" cfg.chunkserver.settings;
# generic template for all daemons
systemdService = name: extraConfig: configFile: {
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [
"network.target"
"network-online.target"
];
serviceConfig = {
Type = "forking";
ExecStart = "${pkgs.saunafs}/bin/sfs${name} -c ${configFile} start";
ExecStop = "${pkgs.saunafs}/bin/sfs${name} -c ${configFile} stop";
ExecReload = "${pkgs.saunafs}/bin/sfs${name} -c ${configFile} reload";
} // extraConfig;
};
in
{
###### interface
options = {
services.saunafs = {
masterHost = lib.mkOption {
type = lib.types.str;
default = null;
description = "IP or hostname name of master host.";
};
sfsUser = lib.mkOption {
type = lib.types.str;
default = "saunafs";
description = "Run daemons as user.";
};
client.enable = lib.mkEnableOption "Saunafs client";
master = {
enable = lib.mkOption {
type = lib.types.bool;
description = ''
Enable Saunafs master daemon.
You need to run `sfsmaster-init` on a freshly installed master server to
initialize the `DATA_PATH` directory.
'';
default = false;
};
exports = lib.mkOption {
type = with lib.types; listOf str;
default = null;
description = "Paths to exports file (see {manpage}`sfsexports.cfg(5)`).";
example = lib.literalExpression ''
[ "* / rw,alldirs,admin,maproot=0:0" ];
'';
};
openFirewall = lib.mkOption {
type = lib.types.bool;
description = "Whether to automatically open the necessary ports in the firewall.";
default = false;
};
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = settingsFormat.type;
options.DATA_PATH = lib.mkOption {
type = lib.types.str;
default = "/var/lib/saunafs/master";
description = "Data storage directory.";
};
};
description = "Contents of config file ({manpage}`sfsmaster.cfg(5)`).";
};
};
metalogger = {
enable = lib.mkEnableOption "Saunafs metalogger daemon";
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = settingsFormat.type;
options.DATA_PATH = lib.mkOption {
type = lib.types.str;
default = "/var/lib/saunafs/metalogger";
description = "Data storage directory";
};
};
description = "Contents of metalogger config file (see {manpage}`sfsmetalogger.cfg(5)`).";
};
};
chunkserver = {
enable = lib.mkEnableOption "Saunafs chunkserver daemon";
openFirewall = lib.mkOption {
type = lib.types.bool;
description = "Whether to automatically open the necessary ports in the firewall.";
default = false;
};
hdds = lib.mkOption {
type = with lib.types; listOf str;
default = null;
example = lib.literalExpression ''
[ "/mnt/hdd1" ];
'';
description = ''
Mount points to be used by chunkserver for storage (see {manpage}`sfshdd.cfg(5)`).
Note, that these mount points must writeable by the user defined by the saunafs user.
'';
};
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = settingsFormat.type;
options.DATA_PATH = lib.mkOption {
type = lib.types.str;
default = "/var/lib/saunafs/chunkserver";
description = "Directory for chunck meta data";
};
};
description = "Contents of chunkserver config file (see {manpage}`sfschunkserver.cfg(5)`).";
};
};
};
};
###### implementation
config =
lib.mkIf (cfg.client.enable || cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable)
{
warnings = [
(lib.mkIf (cfg.sfsUser == "root") "Running saunafs services as root is not recommended.")
];
# Service settings
services.saunafs = {
master.settings = lib.mkIf cfg.master.enable {
WORKING_USER = cfg.sfsUser;
EXPORTS_FILENAME = toString (
pkgs.writeText "sfsexports.cfg" (lib.concatStringsSep "\n" cfg.master.exports)
);
};
metalogger.settings = lib.mkIf cfg.metalogger.enable {
WORKING_USER = cfg.sfsUser;
MASTER_HOST = cfg.masterHost;
};
chunkserver.settings = lib.mkIf cfg.chunkserver.enable {
WORKING_USER = cfg.sfsUser;
MASTER_HOST = cfg.masterHost;
HDD_CONF_FILENAME = toString (
pkgs.writeText "sfshdd.cfg" (lib.concatStringsSep "\n" cfg.chunkserver.hdds)
);
};
};
# Create system user account for daemons
users =
lib.mkIf
(cfg.sfsUser != "root" && (cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable))
{
users."${cfg.sfsUser}" = {
isSystemUser = true;
description = "saunafs daemon user";
group = "saunafs";
};
groups."${cfg.sfsUser}" = { };
};
environment.systemPackages =
(lib.optional cfg.client.enable pkgs.saunafs) ++ (lib.optional cfg.master.enable initTool);
networking.firewall.allowedTCPPorts =
(lib.optionals cfg.master.openFirewall [
9419
9420
9421
])
++ (lib.optional cfg.chunkserver.openFirewall 9422);
# Ensure storage directories exist
systemd.tmpfiles.rules =
lib.optional cfg.master.enable "d ${cfg.master.settings.DATA_PATH} 0700 ${cfg.sfsUser} ${cfg.sfsUser} -"
++ lib.optional cfg.metalogger.enable "d ${cfg.metalogger.settings.DATA_PATH} 0700 ${cfg.sfsUser} ${cfg.sfsUser} -"
++ lib.optional cfg.chunkserver.enable "d ${cfg.chunkserver.settings.DATA_PATH} 0700 ${cfg.sfsUser} ${cfg.sfsUser} -";
# Service definitions
systemd.services.sfs-master = lib.mkIf cfg.master.enable (
systemdService "master" {
TimeoutStartSec = 1800;
TimeoutStopSec = 1800;
Restart = "no";
} masterCfg
);
systemd.services.sfs-metalogger = lib.mkIf cfg.metalogger.enable (
systemdService "metalogger" { Restart = "on-abort"; } metaloggerCfg
);
systemd.services.sfs-chunkserver = lib.mkIf cfg.chunkserver.enable (
systemdService "chunkserver" { Restart = "on-abort"; } chunkserverCfg
);
};
}

View file

@ -0,0 +1,234 @@
{
lib,
pkgs,
config,
...
}:
let
inherit (lib) types;
cfg = config.services.atticd;
format = pkgs.formats.toml { };
checkedConfigFile =
pkgs.runCommand "checked-attic-server.toml"
{
configFile = format.generate "server.toml" cfg.settings;
}
''
export ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64="$(${lib.getExe pkgs.openssl} genrsa -traditional 4096 | ${pkgs.coreutils}/bin/base64 -w0)"
export ATTIC_SERVER_DATABASE_URL="sqlite://:memory:"
${lib.getExe cfg.package} --mode check-config -f $configFile
cat <$configFile >$out
'';
atticadmShim = pkgs.writeShellScript "atticadm" ''
if [ -n "$ATTICADM_PWD" ]; then
cd "$ATTICADM_PWD"
if [ "$?" != "0" ]; then
>&2 echo "Warning: Failed to change directory to $ATTICADM_PWD"
fi
fi
exec ${cfg.package}/bin/atticadm -f ${checkedConfigFile} "$@"
'';
atticadmWrapper = pkgs.writeShellScriptBin "atticd-atticadm" ''
exec systemd-run \
--quiet \
--pipe \
--pty \
--same-dir \
--wait \
--collect \
--service-type=exec \
--property=EnvironmentFile=${cfg.environmentFile} \
--property=DynamicUser=yes \
--property=User=${cfg.user} \
--property=Environment=ATTICADM_PWD=$(pwd) \
--working-directory / \
-- \
${atticadmShim} "$@"
'';
hasLocalPostgresDB =
let
url = cfg.settings.database.url or "";
localStrings = [
"localhost"
"127.0.0.1"
"/run/postgresql"
];
hasLocalStrings = lib.any (lib.flip lib.hasInfix url) localStrings;
in
config.services.postgresql.enable && lib.hasPrefix "postgresql://" url && hasLocalStrings;
in
{
options = {
services.atticd = {
enable = lib.mkEnableOption "the atticd, the Nix Binary Cache server";
package = lib.mkPackageOption pkgs "attic-server" { };
environmentFile = lib.mkOption {
description = ''
Path to an EnvironmentFile containing required environment
variables:
- ATTIC_SERVER_TOKEN_RS256_SECRET_BASE64: The base64-encoded RSA PEM PKCS1 of the
RS256 JWT secret. Generate it with `openssl genrsa -traditional 4096 | base64 -w0`.
'';
type = types.nullOr types.path;
default = null;
};
user = lib.mkOption {
description = ''
The group under which attic runs.
'';
type = types.str;
default = "atticd";
};
group = lib.mkOption {
description = ''
The user under which attic runs.
'';
type = types.str;
default = "atticd";
};
settings = lib.mkOption {
description = ''
Structured configurations of atticd.
See https://github.com/zhaofengli/attic/blob/main/server/src/config-template.toml
'';
type = format.type;
default = { };
};
mode = lib.mkOption {
description = ''
Mode in which to run the server.
'monolithic' runs all components, and is suitable for single-node deployments.
'api-server' runs only the API server, and is suitable for clustering.
'garbage-collector' only runs the garbage collector periodically.
A simple NixOS-based Attic deployment will typically have one 'monolithic' and any number of 'api-server' nodes.
There are several other supported modes that perform one-off operations, but these are the only ones that make sense to run via the NixOS module.
'';
type = lib.types.enum [
"monolithic"
"api-server"
"garbage-collector"
];
default = "monolithic";
};
};
};
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = cfg.environmentFile != null;
message = ''
<option>services.atticd.environmentFile</option> is not set.
Run `openssl genrsa -traditional 4096 | base64 -w0` and create a file with the following contents:
ATTIC_SERVER_TOKEN_RS256_SECRET="output from command"
Then, set `services.atticd.environmentFile` to the quoted absolute path of the file.
'';
}
];
services.atticd.settings = {
chunking = lib.mkDefault {
nar-size-threshold = 65536;
min-size = 16384; # 16 KiB
avg-size = 65536; # 64 KiB
max-size = 262144; # 256 KiB
};
database.url = lib.mkDefault "sqlite:///var/lib/atticd/server.db?mode=rwc";
# "storage" is internally tagged
# if the user sets something the whole thing must be replaced
storage = lib.mkDefault {
type = "local";
path = "/var/lib/atticd/storage";
};
};
systemd.services.atticd = {
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ] ++ lib.optionals hasLocalPostgresDB [ "postgresql.service" ];
requires = lib.optionals hasLocalPostgresDB [ "postgresql.service" ];
wants = [ "network-online.target" ];
serviceConfig = {
ExecStart = "${lib.getExe cfg.package} -f ${checkedConfigFile} --mode ${cfg.mode}";
EnvironmentFile = cfg.environmentFile;
StateDirectory = "atticd"; # for usage with local storage and sqlite
DynamicUser = true;
User = cfg.user;
Group = cfg.group;
Restart = "on-failure";
RestartSec = 10;
CapabilityBoundingSet = [ "" ];
DeviceAllow = "";
DevicePolicy = "closed";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
ReadWritePaths =
let
path = cfg.settings.storage.path;
isDefaultStateDirectory = path == "/var/lib/atticd" || lib.hasPrefix "/var/lib/atticd/" path;
in
lib.optionals (cfg.settings.storage.type or "" == "local" && !isDefaultStateDirectory) [ path ];
RemoveIPC = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@resources"
"~@privileged"
];
UMask = "0077";
};
};
environment.systemPackages = [
atticadmWrapper
];
};
}

View file

@ -10,7 +10,7 @@ let
enableDHCP = config.networking.dhcpcd.enable && enableDHCP = config.networking.dhcpcd.enable &&
(config.networking.useDHCP || lib.any (i: i.useDHCP == true) interfaces); (config.networking.useDHCP || lib.any (i: i.useDHCP == true) interfaces);
enableNTPService = (config.services.ntp.enable || config.services.ntpd-rs.enable || config.services.openntpd.enable || config.services.chrony.enable); useResolvConf = config.networking.resolvconf.enable;
# Don't start dhcpcd on explicitly configured interfaces or on # Don't start dhcpcd on explicitly configured interfaces or on
# interfaces that are part of a bridge, bond or sit device. # interfaces that are part of a bridge, bond or sit device.
@ -88,23 +88,6 @@ let
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
exitHook = pkgs.writeText "dhcpcd.exit-hook" ''
${lib.optionalString enableNTPService ''
if [ "$reason" = BOUND -o "$reason" = REBOOT ]; then
# Restart ntpd. We need to restart it to make sure that it will actually do something:
# if ntpd cannot resolve the server hostnames in its config file, then it will never do
# anything ever again ("couldn't resolve ..., giving up on it"), so we silently lose
# time synchronisation. This also applies to openntpd.
${lib.optionalString config.services.ntp.enable "/run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd.service || true"}
${lib.optionalString config.services.ntpd-rs.enable "/run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd-rs.service || true"}
${lib.optionalString config.services.openntpd.enable "/run/current-system/systemd/bin/systemctl try-reload-or-restart openntpd.service || true"}
${lib.optionalString config.services.chrony.enable "/run/current-system/systemd/bin/systemctl try-reload-or-restart chronyd.service || true"}
fi
''}
${cfg.runHook}
'';
in in
{ {
@ -181,6 +164,19 @@ in
description = '' description = ''
Shell code that will be run after all other hooks. See Shell code that will be run after all other hooks. See
`man dhcpcd-run-hooks` for details on what is possible. `man dhcpcd-run-hooks` for details on what is possible.
::: {.note}
To use sudo or similar tools in your script you may have to set:
systemd.services.dhcpcd.serviceConfig.NoNewPrivileges = false;
In addition, as most of the filesystem is inaccessible to dhcpcd
by default, you may want to define some exceptions, e.g.
systemd.services.dhcpcd.serviceConfig.ReadOnlyPaths = [
"/run/user/1000/bus" # to send desktop notifications
];
:::
''; '';
}; };
@ -206,22 +202,6 @@ in
config = lib.mkIf enableDHCP { config = lib.mkIf enableDHCP {
assertions = [ {
# dhcpcd doesn't start properly with malloc ∉ [ jemalloc libc mimalloc scudo ]
# see https://github.com/NixOS/nixpkgs/issues/151696
assertion =
dhcpcd.enablePrivSep
-> lib.elem config.environment.memoryAllocator.provider [ "jemalloc" "libc" "mimalloc" "scudo" ];
message = ''
dhcpcd with privilege separation is incompatible with chosen system malloc.
Currently `graphene-hardened` allocator is known to be broken.
To disable dhcpcd's privilege separation, overlay Nixpkgs and override dhcpcd
to set `enablePrivSep = false`.
'';
} ];
environment.etc."dhcpcd.conf".source = dhcpcdConf;
systemd.services.dhcpcd = let systemd.services.dhcpcd = let
cfgN = config.networking; cfgN = config.networking;
hasDefaultGatewaySet = (cfgN.defaultGateway != null && cfgN.defaultGateway.address != "") hasDefaultGatewaySet = (cfgN.defaultGateway != null && cfgN.defaultGateway.address != "")
@ -230,10 +210,11 @@ in
{ description = "DHCP Client"; { description = "DHCP Client";
wantedBy = [ "multi-user.target" ] ++ lib.optional (!hasDefaultGatewaySet) "network-online.target"; wantedBy = [ "multi-user.target" ] ++ lib.optional (!hasDefaultGatewaySet) "network-online.target";
wants = [ "network.target" ]; wants = [ "network.target" "resolvconf.service" ];
after = [ "resolvconf.service" ];
before = [ "network-online.target" ]; before = [ "network-online.target" ];
restartTriggers = lib.optional (enableNTPService || cfg.runHook != "") [ exitHook ]; restartTriggers = [ cfg.runHook ];
# Stopping dhcpcd during a reconfiguration is undesirable # Stopping dhcpcd during a reconfiguration is undesirable
# because it brings down the network interfaces configured by # because it brings down the network interfaces configured by
@ -247,46 +228,64 @@ in
serviceConfig = serviceConfig =
{ Type = "forking"; { Type = "forking";
PIDFile = "/run/dhcpcd/pid"; PIDFile = "/run/dhcpcd/pid";
SupplementaryGroups = lib.optional useResolvConf "resolvconf";
User = "dhcpcd";
Group = "dhcpcd";
StateDirectory = "dhcpcd";
RuntimeDirectory = "dhcpcd"; RuntimeDirectory = "dhcpcd";
ExecStartPre = "+${pkgs.writeShellScript "migrate-dhcpcd" ''
# migrate from old database directory
if test -f /var/db/dhcpcd/duid; then
echo 'migrating DHCP leases from /var/db/dhcpcd to /var/lib/dhcpcd ...'
mv /var/db/dhcpcd/* -t /var/lib/dhcpcd
chown dhcpcd:dhcpcd /var/lib/dhcpcd/*
rmdir /var/db/dhcpcd || true
echo done
fi
''}";
ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${lib.optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}"; ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${lib.optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind"; ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
Restart = "always"; Restart = "always";
} // lib.optionalAttrs (cfg.runHook == "") { AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_RAW" "CAP_NET_BIND_SERVICE" ];
# Proc filesystem ReadWritePaths = [ "/proc/sys/net/ipv6" ]
ProcSubset = "all"; ++ lib.optionals useResolvConf ([ "/run/resolvconf" ] ++ config.networking.resolvconf.subscriberFiles);
ProtectProc = "invisible"; DeviceAllow = "";
# Access write directories
UMask = "0027";
# Capabilities
CapabilityBoundingSet = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" "CAP_SETGID" "CAP_SETUID" "CAP_SYS_CHROOT" ];
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = true;
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = false;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = false;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" "AF_PACKET" ];
RestrictNamespaces = true;
LockPersonality = true; LockPersonality = true;
MemoryDenyWriteExecute = true; MemoryDenyWriteExecute = true;
NoNewPrivileges = lib.mkDefault true; # may be disabled for sudo in runHook
PrivateDevices = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = "tmpfs"; # allow exceptions to be added to ReadOnlyPaths, etc.
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" "AF_PACKET" ];
RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
RemoveIPC = true; SystemCallFilter = [
PrivateMounts = true; "@system-service"
# System Call Filtering "~@aio" "~@chown" "~@keyring" "~@memlock"
];
SystemCallArchitectures = "native"; SystemCallArchitectures = "native";
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "chroot" "gettid" "setgroups" "setuid" ]; UMask = "0027";
}; };
}; };
# Note: the service could run with `DynamicUser`, however that makes
# impossible (for no good reason, see systemd issue #20495) to disable
# `NoNewPrivileges` or `ProtectHome`, which users may want to in order
# to run certain scripts in `networking.dhcpcd.runHook`.
users.users.dhcpcd = { users.users.dhcpcd = {
isSystemUser = true; isSystemUser = true;
group = "dhcpcd"; group = "dhcpcd";
@ -295,9 +294,7 @@ in
environment.systemPackages = [ dhcpcd ]; environment.systemPackages = [ dhcpcd ];
environment.etc."dhcpcd.exit-hook" = lib.mkIf (enableNTPService || cfg.runHook != "") { environment.etc."dhcpcd.exit-hook".text = cfg.runHook;
source = exitHook;
};
powerManagement.resumeCommands = lib.mkIf config.systemd.services.dhcpcd.enable powerManagement.resumeCommands = lib.mkIf config.systemd.services.dhcpcd.enable
'' ''

View file

@ -133,6 +133,11 @@ in
dnsmasq_conf=/etc/dnsmasq-conf.conf dnsmasq_conf=/etc/dnsmasq-conf.conf
dnsmasq_resolv=/etc/dnsmasq-resolv.conf dnsmasq_resolv=/etc/dnsmasq-resolv.conf
''; '';
subscriberFiles = [
"/etc/dnsmasq-conf.conf"
"/etc/dnsmasq-resolv.conf"
];
}; };
systemd.services.dnsmasq = { systemd.services.dnsmasq = {

View file

@ -63,7 +63,7 @@ let
}; };
url = mkOption { url = mkOption {
type = types.str; type = types.str;
example = "fedimint://p2p.myfedimint.com"; example = "fedimint://p2p.myfedimint.com:8173";
description = '' description = ''
Public address for p2p connections from peers Public address for p2p connections from peers
''; '';
@ -159,6 +159,12 @@ let
example = "api.myfedimint.com"; example = "api.myfedimint.com";
description = "Public domain of the API address of the reverse proxy/tls terminator."; description = "Public domain of the API address of the reverse proxy/tls terminator.";
}; };
path = mkOption {
type = types.str;
example = "/";
default = "/ws/";
description = "Path to host the API on and forward to the daemon's api port";
};
config = mkOption { config = mkOption {
type = types.submodule ( type = types.submodule (
recursiveUpdate (import ../web-servers/nginx/vhost-options.nix { recursiveUpdate (import ../web-servers/nginx/vhost-options.nix {
@ -286,8 +292,7 @@ in
# overriden by default value from vhost-options.nix # overriden by default value from vhost-options.nix
enableACME = mkOverride 99 true; enableACME = mkOverride 99 true;
forceSSL = mkOverride 99 true; forceSSL = mkOverride 99 true;
# Currently Fedimint API only support JsonRPC on `/ws/` endpoint, so no need to handle `/` locations.${cfg.nginx.path} = {
locations."/ws/" = {
proxyPass = "http://127.0.0.1:${toString cfg.api.port}/"; proxyPass = "http://127.0.0.1:${toString cfg.api.port}/";
proxyWebsockets = true; proxyWebsockets = true;
extraConfig = '' extraConfig = ''

View file

@ -1,10 +1,55 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
cfg = config.services.frr; cfg = config.services.frr;
services = [ daemons = [
"static" "bgpd"
"ospfd"
"ospf6d"
"ripd"
"ripngd"
"isisd"
"pimd"
"pim6d"
"ldpd"
"nhrpd"
"eigrpd"
"babeld"
"sharpd"
"pbrd"
"bfdd"
"fabricd"
"vrrpd"
"pathd"
];
daemonDefaultOptions = {
zebra = "-A 127.0.0.1 -s 90000000";
mgmtd = "-A 127.0.0.1";
bgpd = "-A 127.0.0.1";
ospfd = "-A 127.0.0.1";
ospf6d = "-A ::1";
ripd = "-A 127.0.0.1";
ripngd = "-A ::1";
isisd = "-A 127.0.0.1";
pimd = "-A 127.0.0.1";
pim6d = "-A ::1";
ldpd = "-A 127.0.0.1";
nhrpd = "-A 127.0.0.1";
eigrpd = "-A 127.0.0.1";
babeld = "-A 127.0.0.1";
sharpd = "-A 127.0.0.1";
pbrd = "-A 127.0.0.1";
staticd = "-A 127.0.0.1";
bfdd = "-A 127.0.0.1";
fabricd = "-A 127.0.0.1";
vrrpd = "-A 127.0.0.1";
pathd = "-A 127.0.0.1";
};
renamedServices = [
"bgp" "bgp"
"ospf" "ospf"
"ospf6" "ospf6"
@ -22,210 +67,194 @@ let
"fabric" "fabric"
]; ];
allServices = services ++ [ "zebra" "mgmt" ]; obsoleteServices = renamedServices ++ [ "static" "mgmt" "zebra" ];
allDaemons = builtins.attrNames daemonDefaultOptions;
isEnabled = service: cfg.${service}.enable; isEnabled = service: cfg.${service}.enable;
daemonName = service: if service == "zebra" then service else "${service}d"; daemonLine = d: "${d}=${if isEnabled d then "yes" else "no"}";
configFile = service: configFile =
let if cfg.configFile != null then
scfg = cfg.${service}; cfg.configFile
in else
if scfg.configFile != null then scfg.configFile pkgs.writeText "frr.conf" ''
else pkgs.writeText "${daemonName service}.conf" ! FRR configuration
'' !
! FRR ${daemonName service} configuration hostname ${config.networking.hostName}
! log syslog
hostname ${config.networking.hostName} service password-encryption
log syslog service integrated-vtysh-config
service password-encryption !
! ${cfg.config}
${scfg.config} !
! end
end '';
'';
serviceOptions = service: serviceOptions =
service:
{ {
enable = lib.mkEnableOption "the FRR ${lib.toUpper service} routing protocol"; options = lib.mkOption {
type = lib.types.listOf lib.types.str;
configFile = lib.mkOption { default = [ daemonDefaultOptions.${service} ];
type = lib.types.nullOr lib.types.path;
default = null;
example = "/etc/frr/${daemonName service}.conf";
description = '' description = ''
Configuration file to use for FRR ${daemonName service}. Options for the FRR ${service} daemon.
By default the NixOS generated files are used.
''; '';
}; };
config = lib.mkOption {
type = lib.types.lines;
default = "";
example =
let
examples = {
rip = ''
router rip
network 10.0.0.0/8
'';
ospf = ''
router ospf
network 10.0.0.0/8 area 0
'';
bgp = ''
router bgp 65001
neighbor 10.0.0.1 remote-as 65001
'';
};
in
examples.${service} or "";
description = ''
${daemonName service} configuration statements.
'';
};
vtyListenAddress = lib.mkOption {
type = lib.types.str;
default = "localhost";
description = ''
Address to bind to for the VTY interface.
'';
};
vtyListenPort = lib.mkOption {
type = lib.types.nullOr lib.types.int;
default = null;
description = ''
TCP Port to bind to for the VTY interface.
'';
};
extraOptions = lib.mkOption { extraOptions = lib.mkOption {
type = lib.types.listOf lib.types.str; type = lib.types.listOf lib.types.str;
default = []; default = [ ];
description = '' description = ''
Extra options for the daemon. Extra options to be appended to the FRR ${service} daemon options.
''; '';
}; };
}; }
// (if (builtins.elem service daemons) then { enable = lib.mkEnableOption "FRR ${service}"; } else { });
in in
{ {
###### interface ###### interface
imports = [ imports =
{ [
options.services.frr = { {
zebra = (serviceOptions "zebra") // { options.services.frr = {
enable = lib.mkOption { configFile = lib.mkOption {
type = lib.types.bool; type = lib.types.nullOr lib.types.path;
default = lib.any isEnabled services; default = null;
example = "/etc/frr/frr.conf";
description = '' description = ''
Whether to enable the Zebra routing manager. Configuration file to use for FRR.
By default the NixOS generated files are used.
The Zebra routing manager is automatically enabled '';
if any routing protocols are configured. };
config = lib.mkOption {
type = lib.types.lines;
default = "";
example = ''
router rip
network 10.0.0.0/8
router ospf
network 10.0.0.0/8 area 0
router bgp 65001
neighbor 10.0.0.1 remote-as 65001
'';
description = ''
FRR configuration statements.
'';
};
openFilesLimit = lib.mkOption {
type = lib.types.ints.unsigned;
default = 1024;
description = ''
This is the maximum number of FD's that will be available. Use a
reasonable value for your setup if you are expecting a large number
of peers in say BGP.
''; '';
}; };
}; };
mgmt = (serviceOptions "mgmt") // { }
enable = lib.mkOption { { options.services.frr = (lib.genAttrs allDaemons serviceOptions); }
type = lib.types.bool; (lib.mkRemovedOptionModule [ "services" "frr" "zebra" "enable" ] "FRR zebra is always enabled")
default = isEnabled "static"; ]
defaultText = lib.literalExpression "config.services.frr.static.enable"; ++ (map (d: lib.mkRenamedOptionModule [ "services" "frr" d "enable" ] [ "services" "frr" "${d}d" "enable" ]) renamedServices)
description = '' ++ (map (d: lib.mkRenamedOptionModule [ "services" "frr" d "extraOptions" ] [ "services" "frr" "${d}d" "extraOptions" ]) (renamedServices ++ [ "static" "mgmt" ]))
Whether to enable the Configuration management daemon. ++ (map (d: lib.mkRemovedOptionModule [ "services" "frr" d "enable" ] "FRR ${d}d is always enabled") [ "static" "mgmt" ])
++ (map (d: lib.mkRemovedOptionModule [ "services" "frr" d "config" ] "FRR switched to integrated-vtysh-config, please use services.frr.config") obsoleteServices)
The Configuration management daemon is automatically ++ (map (d: lib.mkRemovedOptionModule [ "services" "frr" d "configFile" ] "FRR switched to integrated-vtysh-config, please use services.frr.config or services.frr.configFile") obsoleteServices)
enabled if needed, at the moment this is when staticd ++ (map (d: lib.mkRemovedOptionModule [ "services" "frr" d "vtyListenAddress" ] "Please change -A option in services.frr.${d}.options instead") obsoleteServices)
is enabled. ++ (map (d: lib.mkRemovedOptionModule [ "services" "frr" d "vtyListenPort" ] "Please use `-P «vtyListenPort»` option with services.frr.${d}.extraOptions instead, or change services.frr.${d}.options accordingly") obsoleteServices)
''; ;
};
};
};
}
{ options.services.frr = (lib.genAttrs services serviceOptions); }
];
###### implementation ###### implementation
config = lib.mkIf (lib.any isEnabled allServices) { config =
let
environment.systemPackages = [ daemonList = lib.concatStringsSep "\n" (map daemonLine daemons);
pkgs.frr # for the vtysh tool daemonOptionLine = d: "${d}_options=\"${lib.concatStringsSep " " (cfg.${d}.options ++ cfg.${d}.extraOptions)}\"";
]; daemonOptions = lib.concatStringsSep "\n" (map daemonOptionLine allDaemons);
users.users.frr = {
description = "FRR daemon user";
isSystemUser = true;
group = "frr";
};
users.groups = {
frr = {};
# Members of the frrvty group can use vtysh to inspect the FRR daemons
frrvty = { members = [ "frr" ]; };
};
environment.etc = let
mkEtcLink = service: {
name = "frr/${daemonName service}.conf";
value.source = configFile service;
};
in in
(builtins.listToAttrs lib.mkIf (lib.any isEnabled daemons || cfg.configFile != null || cfg.config != "") {
(map mkEtcLink (lib.filter isEnabled allServices))) // {
"frr/vtysh.conf".text = ""; environment.systemPackages = [
pkgs.frr # for the vtysh tool
];
users.users.frr = {
description = "FRR daemon user";
isSystemUser = true;
group = "frr";
}; };
systemd.tmpfiles.rules = [ users.groups = {
"d /run/frr 0750 frr frr -" frr = { };
]; # Members of the frrvty group can use vtysh to inspect the FRR daemons
frrvty = {
members = [ "frr" ];
};
};
systemd.services = environment.etc = {
let "frr/frr.conf".source = configFile;
frrService = service: "frr/vtysh.conf".text = ''
let service integrated-vtysh-config
scfg = cfg.${service}; '';
daemon = daemonName service; "frr/daemons".text = ''
in # This file tells the frr package which daemons to start.
lib.nameValuePair daemon ({ #
wantedBy = [ "multi-user.target" ]; # The watchfrr, zebra and staticd daemons are always started.
after = [ "network-pre.target" "systemd-sysctl.service" ] ++ lib.optionals (service != "zebra") [ "zebra.service" ]; #
bindsTo = lib.optionals (service != "zebra") [ "zebra.service" ]; # This part is auto-generated from services.frr.<daemon>.enable config
wants = [ "network.target" ]; ${daemonList}
description = if service == "zebra" then "FRR Zebra routing manager" # If this option is set the /etc/init.d/frr script automatically loads
else "FRR ${lib.toUpper service} routing daemon"; # the config via "vtysh -b" when the servers are started.
#
vtysh_enable=yes
unitConfig.Documentation = if service == "zebra" then "man:zebra(8)" # This part is auto-generated from services.frr.<daemon>.options or
else "man:${daemon}(8) man:zebra(8)"; # services.frr.<daemon>.extraOptions
${daemonOptions}
'';
};
restartTriggers = lib.mkIf (service != "mgmt") [ systemd.tmpfiles.rules = [ "d /run/frr 0750 frr frr -" ];
(configFile service)
];
reloadIfChanged = (service != "mgmt");
serviceConfig = { systemd.services.frr = {
PIDFile = "frr/${daemon}.pid"; description = "FRRouting";
ExecStart = "${pkgs.frr}/libexec/frr/${daemon}" documentation = [ "https://frrouting.readthedocs.io/en/latest/setup.html" ];
+ lib.optionalString (scfg.vtyListenAddress != "") " -A ${scfg.vtyListenAddress}" wants = [ "network.target" ];
+ lib.optionalString (scfg.vtyListenPort != null) " -P ${toString scfg.vtyListenPort}" after = [
+ " " + (lib.concatStringsSep " " scfg.extraOptions); "network-pre.target"
ExecReload = lib.mkIf (service != "mgmt") "${pkgs.python3.interpreter} ${pkgs.frr}/libexec/frr/frr-reload.py --reload --daemon ${daemon} --bindir ${pkgs.frr}/bin --rundir /run/frr /etc/frr/${daemon}.conf"; "systemd-sysctl.service"
Restart = "on-abnormal"; ];
}; before = [ "network.target" ];
}); wantedBy = [ "multi-user.target" ];
in startLimitIntervalSec = 180;
lib.listToAttrs (map frrService (lib.filter isEnabled allServices)); reloadIfChanged = true;
restartTriggers = [
}; configFile
daemonList
];
serviceConfig = {
Nice = -5;
Type = "forking";
NotifyAccess = "all";
StartLimitBurst = "3";
TimeoutSec = 120;
WatchdogSec = 60;
RestartSec = 5;
Restart = "always";
LimitNOFILE = cfg.openFilesLimit;
PIDFile = "/run/frr/watchfrr.pid";
ExecStart = "${pkgs.frr}/libexec/frr/frrinit.sh start";
ExecStop = "${pkgs.frr}/libexec/frr/frrinit.sh stop";
ExecReload = "${pkgs.frr}/libexec/frr/frrinit.sh reload";
};
};
};
meta.maintainers = with lib.maintainers; [ woffs ]; meta.maintainers = with lib.maintainers; [ woffs ];
} }

View file

@ -20,6 +20,11 @@
settingsFormat = pkgs.formats.yaml {}; settingsFormat = pkgs.formats.yaml {};
configFile = settingsFormat.generate "headscale.yaml" cfg.settings; configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
cliConfigFile = settingsFormat.generate "headscale.yaml" cliConfig; cliConfigFile = settingsFormat.generate "headscale.yaml" cliConfig;
assertRemovedOption = option: message: {
assertion = !lib.hasAttrByPath option cfg;
message = "The option `services.headscale.${lib.options.showOption option}` was removed. " + message;
};
in { in {
options = { options = {
services.headscale = { services.headscale = {
@ -82,21 +87,6 @@ in {
type = lib.types.submodule { type = lib.types.submodule {
freeformType = settingsFormat.type; freeformType = settingsFormat.type;
imports = with lib; [
(mkAliasOptionModule ["acl_policy_path"] ["policy" "path"])
(mkAliasOptionModule ["db_host"] ["database" "postgres" "host"])
(mkAliasOptionModule ["db_name"] ["database" "postgres" "name"])
(mkAliasOptionModule ["db_password_file"] ["database" "postgres" "password_file"])
(mkAliasOptionModule ["db_path"] ["database" "sqlite" "path"])
(mkAliasOptionModule ["db_port"] ["database" "postgres" "port"])
(mkAliasOptionModule ["db_type"] ["database" "type"])
(mkAliasOptionModule ["db_user"] ["database" "postgres" "user"])
(mkAliasOptionModule ["dns_config" "base_domain"] ["dns" "base_domain"])
(mkAliasOptionModule ["dns_config" "domains"] ["dns" "search_domains"])
(mkAliasOptionModule ["dns_config" "magic_dns"] ["dns" "magic_dns"])
(mkAliasOptionModule ["dns_config" "nameservers"] ["dns" "nameservers" "global"])
];
options = { options = {
server_url = lib.mkOption { server_url = lib.mkOption {
type = lib.types.str; type = lib.types.str;
@ -299,7 +289,6 @@ in {
default = true; default = true;
description = '' description = ''
Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/). Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
Only works if there is at least a nameserver defined.
''; '';
example = false; example = false;
}; };
@ -309,11 +298,13 @@ in {
default = ""; default = "";
description = '' description = ''
Defines the base domain to create the hostnames for MagicDNS. Defines the base domain to create the hostnames for MagicDNS.
{option}`baseDomain` must be a FQDNs, without the trailing dot. This domain must be different from the {option}`server_url`
The FQDN of the hosts will be domain.
`hostname.namespace.base_domain` (e.g. {option}`base_domain` must be a FQDN, without the trailing dot.
`myhost.mynamespace.example.com`). The FQDN of the hosts will be `hostname.base_domain` (e.g.
`myhost.tailnet.example.com`).
''; '';
example = "tailnet.example.com";
}; };
nameservers = { nameservers = {
@ -500,6 +491,30 @@ in {
]; ];
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
assertions = [
{
# This is stricter than it needs to be but is exactly what upstream does:
# https://github.com/kradalby/headscale/blob/adc084f20f843d7963c999764fa83939668d2d2c/hscontrol/types/config.go#L799
assertion = with cfg.settings; dns.use_username_in_magic_dns or false || dns.base_domain == "" || !lib.hasInfix dns.base_domain server_url;
message = "server_url cannot contain the base_domain, this will cause the headscale server and embedded DERP to become unreachable from the Tailscale node.";
}
{
assertion = with cfg.settings; dns.magic_dns -> dns.base_domain != "";
message = "dns.base_domain must be set when using MagicDNS";
}
(assertRemovedOption ["settings" "acl_policy_path"] "Use `policy.path` instead.")
(assertRemovedOption ["settings" "db_host"] "Use `database.postgres.host` instead.")
(assertRemovedOption ["settings" "db_name"] "Use `database.postgres.name` instead.")
(assertRemovedOption ["settings" "db_password_file"] "Use `database.postgres.password_file` instead.")
(assertRemovedOption ["settings" "db_path"] "Use `database.sqlite.path` instead.")
(assertRemovedOption ["settings" "db_port"] "Use `database.postgres.port` instead.")
(assertRemovedOption ["settings" "db_type"] "Use `database.type` instead.")
(assertRemovedOption ["settings" "db_user"] "Use `database.postgres.user` instead.")
(assertRemovedOption ["settings" "dns_config"] "Use `dns` instead.")
(assertRemovedOption ["settings" "dns_config" "domains"] "Use `dns.search_domains` instead.")
(assertRemovedOption ["settings" "dns_config" "nameservers"] "Use `dns.nameservers.global` instead.")
];
services.headscale.settings = lib.mkMerge [ services.headscale.settings = lib.mkMerge [
cliConfig cliConfig
{ {

View file

@ -337,6 +337,7 @@ in {
SystemCallFilter = [ SystemCallFilter = [
"@system-service" "@system-service"
"~@privileged" "~@privileged"
"@chown"
] ++ optionals (cfg.enableXDP) [ ] ++ optionals (cfg.enableXDP) [
"bpf" "bpf"
]; ];

View file

@ -62,6 +62,7 @@ in {
default = ""; default = "";
description = '' description = ''
Extra lines to be added verbatim to the generated configuration file. Extra lines to be added verbatim to the generated configuration file.
See upstream documentation <https://www.knot-resolver.cz/documentation/stable/config-overview.html> for more details.
''; '';
}; };
listenPlain = lib.mkOption { listenPlain = lib.mkOption {

View file

@ -349,6 +349,7 @@ in
RestrictRealtime = true; RestrictRealtime = true;
SystemCallArchitectures = "native"; SystemCallArchitectures = "native";
SystemCallFilter = "@system-service"; SystemCallFilter = "@system-service";
UMask = 027;
}; };
}; };

View file

@ -5,8 +5,6 @@ with lib;
let let
cfg = config.services.resilio; cfg = config.services.resilio;
resilioSync = pkgs.resilio-sync;
sharedFoldersRecord = map (entry: { sharedFoldersRecord = map (entry: {
dir = entry.directory; dir = entry.directory;
@ -83,6 +81,8 @@ in
''; '';
}; };
package = mkPackageOption pkgs "resilio-sync" { };
deviceName = mkOption { deviceName = mkOption {
type = types.str; type = types.str;
example = "Voltron"; example = "Voltron";
@ -285,7 +285,7 @@ in
RuntimeDirectory = "rslsync"; RuntimeDirectory = "rslsync";
ExecStartPre = "${createConfig}/bin/create-resilio-config"; ExecStartPre = "${createConfig}/bin/create-resilio-config";
ExecStart = '' ExecStart = ''
${resilioSync}/bin/rslsync --nodaemon --config ${runConfigPath} ${lib.getExe cfg.package} --nodaemon --config ${runConfigPath}
''; '';
}; };
}; };

View file

@ -58,7 +58,7 @@ in {
install -D -d -m 750 /var/lib/shorewall install -D -d -m 750 /var/lib/shorewall
install -D -d -m 755 /var/lock/subsys install -D -d -m 755 /var/lock/subsys
touch /var/log/shorewall.log touch /var/log/shorewall.log
chown 750 /var/log/shorewall.log chmod 750 /var/log/shorewall.log
''; '';
}; };
environment = { environment = {

View file

@ -58,7 +58,7 @@ in {
install -D -d -m 750 /var/lib/shorewall6 install -D -d -m 750 /var/lib/shorewall6
install -D -d -m 755 /var/lock/subsys install -D -d -m 755 /var/lock/subsys
touch /var/log/shorewall6.log touch /var/log/shorewall6.log
chown 750 /var/log/shorewall6.log chmod 750 /var/log/shorewall6.log
''; '';
}; };
environment = { environment = {

View file

@ -0,0 +1,159 @@
{
lib,
config,
pkgs,
...
}:
let
cfg = config.services.zapret;
whitelist = lib.optionalString (
cfg.whitelist != null
) "--hostlist ${pkgs.writeText "zapret-whitelist" (lib.concatStringsSep "\n" cfg.whitelist)}";
blacklist =
lib.optionalString (cfg.blacklist != null)
"--hostlist-exclude ${pkgs.writeText "zapret-blacklist" (lib.concatStringsSep "\n" cfg.blacklist)}";
ports = if cfg.httpSupport then "80,443" else "443";
in
{
options.services.zapret = {
enable = lib.mkEnableOption "the Zapret DPI bypass service.";
package = lib.mkPackageOption pkgs "zapret" { };
params = lib.mkOption {
default = [ ];
type = with lib.types; listOf str;
example = ''
[
"--dpi-desync=fake,disorder2"
"--dpi-desync-ttl=1"
"--dpi-desync-autottl=2"
];
'';
description = ''
Specify the bypass parameters for Zapret binary.
There are no universal parameters as they vary between different networks, so you'll have to find them yourself.
This can be done by running the `blockcheck` binary from zapret package, i.e. `nix-shell -p zapret --command blockcheck`.
It'll try different params and then tell you which params are working for your network.
'';
};
whitelist = lib.mkOption {
default = null;
type = with lib.types; nullOr (listOf str);
example = ''
[
"youtube.com"
"googlevideo.com"
"ytimg.com"
"youtu.be"
]
'';
description = ''
Specify a list of domains to bypass. All other domains will be ignored.
You can specify either whitelist or blacklist, but not both.
If neither are specified, then bypass all domains.
It is recommended to specify the whitelist. This will make sure that other resources won't be affected by this service.
'';
};
blacklist = lib.mkOption {
default = null;
type = with lib.types; nullOr (listOf str);
example = ''
[
"example.com"
]
'';
description = ''
Specify a list of domains NOT to bypass. All other domains will be bypassed.
You can specify either whitelist or blacklist, but not both.
If neither are specified, then bypass all domains.
'';
};
qnum = lib.mkOption {
default = 200;
type = lib.types.int;
description = ''
Routing queue number.
Only change this if you already use the default queue number somewhere else.
'';
};
configureFirewall = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Whether to setup firewall routing so that system http(s) traffic is forwarded via this service.
Disable if you want to set it up manually.
'';
};
httpSupport = lib.mkOption {
default = true;
type = lib.types.bool;
description = ''
Whether to route http traffic on port 80.
Http bypass rarely works and you might want to disable it if you don't utilise http connections.
'';
};
};
config = lib.mkIf cfg.enable (
lib.mkMerge [
{
assertions = [
{
assertion = (cfg.whitelist == null) || (cfg.blacklist == null);
message = "Can't specify both whitelist and blacklist.";
}
{
assertion = (builtins.length cfg.params) != 0;
message = "You have to specify zapret parameters. See the params option's description.";
}
];
systemd.services.zapret = {
description = "DPI bypass service";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${cfg.package}/bin/nfqws --pidfile=/run/nfqws.pid ${lib.concatStringsSep " " cfg.params} ${whitelist} ${blacklist} --qnum=${toString cfg.qnum}";
Type = "simple";
PIDFile = "/run/nfqws.pid";
Restart = "always";
RuntimeMaxSec = "1h"; # This service loves to crash silently or cause network slowdowns. It also restarts instantly. In my experience restarting it hourly provided the best experience.
# hardening
DevicePolicy = "closed";
KeyringMode = "private";
PrivateTmp = true;
PrivateMounts = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
ProtectProc = "invisible";
RemoveIPC = true;
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
};
};
}
# Route system traffic via service for specified ports.
(lib.mkIf cfg.configureFirewall {
networking.firewall.extraCommands = ''
iptables -t mangle -I POSTROUTING -p tcp -m multiport --dports ${ports} -m connbytes --connbytes-dir=original --connbytes-mode=packets --connbytes 1:6 -m mark ! --mark 0x40000000/0x40000000 -j NFQUEUE --queue-num ${toString cfg.qnum} --queue-bypass
'';
})
]
);
meta.maintainers = with lib.maintainers; [
voronind
nishimara
];
}

View file

@ -16,6 +16,11 @@ in
timezone up-to-date based on the current location. It uses geoclue2 to timezone up-to-date based on the current location. It uses geoclue2 to
determine the current location and systemd-timedated to actually set determine the current location and systemd-timedated to actually set
the timezone. the timezone.
To avoid silent overriding by the service, if you have explicitly set a
timezone, either remove it or ensure that it is set with a lower priority
than the default value using `lib.mkDefault` or `lib.mkOverride`. This is
to make the choice deliberate. An error will be presented otherwise.
''; '';
}; };
package = mkPackageOption pkgs "automatic-timezoned" { }; package = mkPackageOption pkgs "automatic-timezoned" { };
@ -23,6 +28,10 @@ in
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
# This will give users an error if they have set an explicit time
# zone, rather than having the service silently override it.
time.timeZone = null;
security.polkit.extraConfig = '' security.polkit.extraConfig = ''
polkit.addRule(function(action, subject) { polkit.addRule(function(action, subject) {
if (action.id == "org.freedesktop.timedate1.set-timezone" if (action.id == "org.freedesktop.timedate1.set-timezone"

View file

@ -16,6 +16,11 @@ in {
Enable `localtimed`, a simple daemon for keeping the Enable `localtimed`, a simple daemon for keeping the
system timezone up-to-date based on the current location. It uses system timezone up-to-date based on the current location. It uses
geoclue2 to determine the current location. geoclue2 to determine the current location.
To avoid silent overriding by the service, if you have explicitly set a
timezone, either remove it or ensure that it is set with a lower priority
than the default value using `lib.mkDefault` or `lib.mkOverride`. This is
to make the choice deliberate. An error will be presented otherwise.
''; '';
}; };
package = mkPackageOption pkgs "localtime" { }; package = mkPackageOption pkgs "localtime" { };
@ -24,6 +29,10 @@ in {
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
# This will give users an error if they have set an explicit time
# zone, rather than having the service silently override it.
time.timeZone = null;
services.geoclue2.appConfig.localtimed = { services.geoclue2.appConfig.localtimed = {
isAllowed = true; isAllowed = true;
isSystem = true; isSystem = true;

View file

@ -100,7 +100,7 @@ in
lib.nameValuePair (toString opts.home) { lib.nameValuePair (toString opts.home) {
d = { d = {
mode = opts.homeMode; mode = opts.homeMode;
user = username; user = opts.name;
inherit (opts) group; inherit (opts) group;
}; };
} }

View file

@ -55,8 +55,8 @@ in
ffmpeg = { ffmpeg = {
bin = mkOption { bin = mkOption {
type = path; type = path;
default = lib.getExe pkgs.ffmpeg_7-headless; default = lib.getExe pkgs.ffmpeg-headless;
defaultText = literalExpression "lib.getExe pkgs.ffmpeg_7-headless"; defaultText = literalExpression "lib.getExe pkgs.ffmpeg-headless";
description = '' description = ''
The ffmpeg package to use for transcoding. The ffmpeg package to use for transcoding.
''; '';

View file

@ -17,7 +17,10 @@ in
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
systemd.packages = [ cfg.package ]; systemd = {
packages = [ cfg.package ];
user.services.hypridle.wantedBy = [ "graphical-session.target" ];
};
}; };
meta.maintainers = with lib.maintainers; [ johnrtitor ]; meta.maintainers = with lib.maintainers; [ johnrtitor ];

View file

@ -49,10 +49,10 @@ let
in in
if isString v then toPhpString v if isString v then toPhpString v
# NOTE: If any value contains a , (comma) this will not get escaped # NOTE: If any value contains a , (comma) this will not get escaped
else if isList v && any lib.strings.isCoercibleToString v then toPhpString (concatMapStringsSep "," toString v) else if isList v && strings.isConvertibleWithToString v then toPhpString (concatMapStringsSep "," toString v)
else if isInt v then toString v else if isInt v then toString v
else if isBool v then toString (if v then 1 else 0) else if isBool v then toString (if v then 1 else 0)
else if isHasAttr "_file" then "trim(file_get_contents(${toPhpString v._file}))" else if isHasAttr "_file" then "trim(file_get_contents(${toPhpString (toString v._file)}))"
else if isHasAttr "_raw" then v._raw else if isHasAttr "_raw" then v._raw
else abort "The dokuwiki localConf value ${lib.generators.toPretty {} v} can not be encoded." else abort "The dokuwiki localConf value ${lib.generators.toPretty {} v} can not be encoded."
; ;

View file

@ -4,8 +4,6 @@ with lib;
let let
cfg = config.services.freshrss; cfg = config.services.freshrss;
poolName = "freshrss";
extension-env = pkgs.buildEnv { extension-env = pkgs.buildEnv {
name = "freshrss-extensions"; name = "freshrss-extensions";
paths = cfg.extensions; paths = cfg.extensions;
@ -141,8 +139,8 @@ in
}; };
pool = mkOption { pool = mkOption {
type = types.str; type = types.nullOr types.str;
default = poolName; default = "freshrss";
description = '' description = ''
Name of the php-fpm pool to use and setup. If not specified, a pool will be created Name of the php-fpm pool to use and setup. If not specified, a pool will be created
with default values. with default values.
@ -235,8 +233,8 @@ in
}; };
# Set up phpfpm pool # Set up phpfpm pool
services.phpfpm.pools = mkIf (cfg.pool == poolName) { services.phpfpm.pools = mkIf (cfg.pool != null) {
${poolName} = { ${cfg.pool} = {
user = "freshrss"; user = "freshrss";
settings = { settings = {
"listen.owner" = "nginx"; "listen.owner" = "nginx";
@ -271,9 +269,9 @@ in
let let
settingsFlags = concatStringsSep " \\\n " settingsFlags = concatStringsSep " \\\n "
(mapAttrsToList (k: v: "${k} ${toString v}") { (mapAttrsToList (k: v: "${k} ${toString v}") {
"--default_user" = ''"${cfg.defaultUser}"''; "--default-user" = ''"${cfg.defaultUser}"'';
"--auth_type" = ''"${cfg.authType}"''; "--auth-type" = ''"${cfg.authType}"'';
"--base_url" = ''"${cfg.baseUrl}"''; "--base-url" = ''"${cfg.baseUrl}"'';
"--language" = ''"${cfg.language}"''; "--language" = ''"${cfg.language}"'';
"--db-type" = ''"${cfg.database.type}"''; "--db-type" = ''"${cfg.database.type}"'';
# The following attributes are optional depending on the type of # The following attributes are optional depending on the type of

View file

@ -222,6 +222,27 @@ in
StandardOutput = "journal"; StandardOutput = "journal";
StateDirectory = "gerrit"; StateDirectory = "gerrit";
WorkingDirectory = "%S/gerrit"; WorkingDirectory = "%S/gerrit";
AmbientCapabilities = "";
CapabilityBoundingSet = "";
LockPersonality = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "noaccess";
ProtectSystem = "full";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
UMask = 027;
}; };
}; };
}; };

View file

@ -91,7 +91,7 @@ in
}; };
port = mkOption { port = mkOption {
type = types.port; type = types.port;
default = 3001; default = 2283;
description = "The port that immich will listen on."; description = "The port that immich will listen on.";
}; };
openFirewall = mkOption { openFirewall = mkOption {
@ -227,7 +227,6 @@ in
services.redis.servers = mkIf cfg.redis.enable { services.redis.servers = mkIf cfg.redis.enable {
immich = { immich = {
enable = true; enable = true;
user = cfg.user;
port = cfg.redis.port; port = cfg.redis.port;
bind = mkIf (!isRedisUnixSocket) cfg.redis.host; bind = mkIf (!isRedisUnixSocket) cfg.redis.host;
}; };
@ -286,6 +285,10 @@ in
RuntimeDirectory = "immich"; RuntimeDirectory = "immich";
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
# ensure that immich-server has permission to connect to the redis socket.
SupplementaryGroups = mkIf (cfg.redis.enable && isRedisUnixSocket) [
config.services.redis.servers.immich.group
];
}; };
}; };

View file

@ -31,7 +31,7 @@ let
mkPhpValue = v: mkPhpValue = v:
if isString v then escapeShellArg v if isString v then escapeShellArg v
# NOTE: If any value contains a , (comma) this will not get escaped # NOTE: If any value contains a , (comma) this will not get escaped
else if isList v && any lib.strings.isCoercibleToString v then escapeShellArg (concatMapStringsSep "," toString v) else if isList v && strings.isConvertibleWithToString v then escapeShellArg (concatMapStringsSep "," toString v)
else if isInt v then toString v else if isInt v then toString v
else if isBool v then boolToString v else if isBool v then boolToString v
else abort "The Invoiceplane config value ${lib.generators.toPretty {} v} can not be encoded." else abort "The Invoiceplane config value ${lib.generators.toPretty {} v} can not be encoded."

View file

@ -12,9 +12,12 @@ let
RAILS_ENV = "production"; RAILS_ENV = "production";
NODE_ENV = "production"; NODE_ENV = "production";
BOOTSNAP_CACHE_DIR="/var/cache/mastodon/precompile";
LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so"; LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
# mastodon-web concurrency. MASTODON_USE_LIBVIPS = "true";
# Concurrency mastodon-web
WEB_CONCURRENCY = toString cfg.webProcesses; WEB_CONCURRENCY = toString cfg.webProcesses;
MAX_THREADS = toString cfg.webThreads; MAX_THREADS = toString cfg.webThreads;
@ -24,7 +27,7 @@ let
DB_NAME = cfg.database.name; DB_NAME = cfg.database.name;
LOCAL_DOMAIN = cfg.localDomain; LOCAL_DOMAIN = cfg.localDomain;
SMTP_SERVER = cfg.smtp.host; SMTP_SERVER = cfg.smtp.host;
SMTP_PORT = toString(cfg.smtp.port); SMTP_PORT = toString cfg.smtp.port;
SMTP_FROM_ADDRESS = cfg.smtp.fromAddress; SMTP_FROM_ADDRESS = cfg.smtp.fromAddress;
PAPERCLIP_ROOT_PATH = "/var/lib/mastodon/public-system"; PAPERCLIP_ROOT_PATH = "/var/lib/mastodon/public-system";
PAPERCLIP_ROOT_URL = "/system"; PAPERCLIP_ROOT_URL = "/system";
@ -33,12 +36,12 @@ let
TRUSTED_PROXY_IP = cfg.trustedProxy; TRUSTED_PROXY_IP = cfg.trustedProxy;
} }
// lib.optionalAttrs (cfg.redis.host != null) { REDIS_HOST = cfg.redis.host; } // lib.optionalAttrs (cfg.redis.host != null) { REDIS_HOST = cfg.redis.host; }
// lib.optionalAttrs (cfg.redis.port != null) { REDIS_PORT = toString(cfg.redis.port); } // lib.optionalAttrs (cfg.redis.port != null) { REDIS_PORT = toString cfg.redis.port; }
// lib.optionalAttrs (cfg.redis.createLocally && cfg.redis.enableUnixSocket) { REDIS_URL = "unix://${config.services.redis.servers.mastodon.unixSocket}"; } // lib.optionalAttrs (cfg.redis.createLocally && cfg.redis.enableUnixSocket) { REDIS_URL = "unix://${config.services.redis.servers.mastodon.unixSocket}"; }
// lib.optionalAttrs (cfg.database.host != "/run/postgresql" && cfg.database.port != null) { DB_PORT = toString cfg.database.port; } // lib.optionalAttrs (cfg.database.host != "/run/postgresql" && cfg.database.port != null) { DB_PORT = toString cfg.database.port; }
// lib.optionalAttrs cfg.smtp.authenticate { SMTP_LOGIN = cfg.smtp.user; } // lib.optionalAttrs cfg.smtp.authenticate { SMTP_LOGIN = cfg.smtp.user; }
// lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_HOST = cfg.elasticsearch.host; } // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_HOST = cfg.elasticsearch.host; }
// lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PORT = toString(cfg.elasticsearch.port); } // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PORT = toString cfg.elasticsearch.port; }
// lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PRESET = cfg.elasticsearch.preset; } // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PRESET = cfg.elasticsearch.preset; }
// lib.optionalAttrs (cfg.elasticsearch.user != null) { ES_USER = cfg.elasticsearch.user; } // lib.optionalAttrs (cfg.elasticsearch.user != null) { ES_USER = cfg.elasticsearch.user; }
// cfg.extraConfig; // cfg.extraConfig;
@ -51,6 +54,9 @@ let
Group = cfg.group; Group = cfg.group;
# Working directory # Working directory
WorkingDirectory = cfg.package; WorkingDirectory = cfg.package;
# Cache directory and mode
CacheDirectory = "mastodon";
CacheDirectoryMode = "0750";
# State directory and mode # State directory and mode
StateDirectory = "mastodon"; StateDirectory = "mastodon";
StateDirectoryMode = "0750"; StateDirectoryMode = "0750";
@ -127,7 +133,7 @@ let
description = "Mastodon sidekiq${jobClassLabel}"; description = "Mastodon sidekiq${jobClassLabel}";
wantedBy = [ "mastodon.target" ]; wantedBy = [ "mastodon.target" ];
environment = env // { environment = env // {
PORT = toString(cfg.sidekiqPort); PORT = toString cfg.sidekiqPort;
DB_POOL = threads; DB_POOL = threads;
}; };
serviceConfig = { serviceConfig = {
@ -309,7 +315,7 @@ in {
Voluntary Application Server Identification. A new keypair can Voluntary Application Server Identification. A new keypair can
be generated by running: be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; bin/rake webpush:generate_keys` `nix build -f '<nixpkgs>' mastodon; cd result; RAILS_ENV=production bin/rake webpush:generate_keys`
If {option}`mastodon.vapidPrivateKeyFile`does not If {option}`mastodon.vapidPrivateKeyFile`does not
exist, it and this file will be created with a new keypair. exist, it and this file will be created with a new keypair.
@ -324,12 +330,57 @@ in {
type = lib.types.str; type = lib.types.str;
}; };
activeRecordEncryptionDeterministicKeyFile = lib.mkOption {
description = ''
This key must be set to enable the Active Record Encryption feature within
Rails that Mastodon uses to encrypt and decrypt some database attributes.
A new Active Record keys can be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; RAILS_ENV=production ./bin/rails db:encryption:init`
If this file does not exist, it will be created with a new Active Record
keys.
'';
default = "/var/lib/mastodon/secrets/active-record-encryption-deterministic-key";
type = lib.types.str;
};
activeRecordEncryptionKeyDerivationSaltFile = lib.mkOption {
description = ''
This key must be set to enable the Active Record Encryption feature within
Rails that Mastodon uses to encrypt and decrypt some database attributes.
A new Active Record keys can be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; RAILS_ENV=production ./bin/rails db:encryption:init`
If this file does not exist, it will be created with a new Active Record
keys.
'';
default = "/var/lib/mastodon/secrets/active-record-encryption-key-derivation-salt";
type = lib.types.str;
};
activeRecordEncryptionPrimaryKeyFile = lib.mkOption {
description = ''
This key must be set to enable the Active Record Encryption feature within
Rails that Mastodon uses to encrypt and decrypt some database attributes.
A new Active Record keys can be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; RAILS_ENV=production ./bin/rails db:encryption:init`
If this file does not exist, it will be created with a new Active Record
keys.
'';
default = "/var/lib/mastodon/secrets/active-record-encryption-primary-key";
type = lib.types.str;
};
secretKeyBaseFile = lib.mkOption { secretKeyBaseFile = lib.mkOption {
description = '' description = ''
Path to file containing the secret key base. Path to file containing the secret key base.
A new secret key base can be generated by running: A new secret key base can be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; bin/rake secret` `nix build -f '<nixpkgs>' mastodon; cd result; bin/bundle exec rails secret`
If this file does not exist, it will be created with a new secret key base. If this file does not exist, it will be created with a new secret key base.
''; '';
@ -342,7 +393,7 @@ in {
Path to file containing the OTP secret. Path to file containing the OTP secret.
A new OTP secret can be generated by running: A new OTP secret can be generated by running:
`nix build -f '<nixpkgs>' mastodon; cd result; bin/rake secret` `nix build -f '<nixpkgs>' mastodon; cd result; bin/bundle exec rails secret`
If this file does not exist, it will be created with a new OTP secret. If this file does not exist, it will be created with a new OTP secret.
''; '';
@ -708,13 +759,28 @@ in {
script = '' script = ''
umask 077 umask 077
if ! test -d /var/cache/mastodon/precompile; then
${cfg.package}/bin/bundle exec bootsnap precompile --gemfile ${cfg.package}/app ${cfg.package}/lib
fi
if ! test -f ${cfg.activeRecordEncryptionDeterministicKeyFile}; then
mkdir -p $(dirname ${cfg.activeRecordEncryptionDeterministicKeyFile})
bin/rails db:encryption:init | grep --only-matching "ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=[^ ]\+" | sed 's/^ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=//' > ${cfg.activeRecordEncryptionDeterministicKeyFile}
fi
if ! test -f ${cfg.activeRecordEncryptionKeyDerivationSaltFile}; then
mkdir -p $(dirname ${cfg.activeRecordEncryptionKeyDerivationSaltFile})
bin/rails db:encryption:init | grep --only-matching "ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=[^ ]\+" | sed 's/^ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=//' > ${cfg.activeRecordEncryptionKeyDerivationSaltFile}
fi
if ! test -f ${cfg.activeRecordEncryptionPrimaryKeyFile}; then
mkdir -p $(dirname ${cfg.activeRecordEncryptionPrimaryKeyFile})
bin/rails db:encryption:init | grep --only-matching "ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=[^ ]\+" | sed 's/^ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=//' > ${cfg.activeRecordEncryptionPrimaryKeyFile}
fi
if ! test -f ${cfg.secretKeyBaseFile}; then if ! test -f ${cfg.secretKeyBaseFile}; then
mkdir -p $(dirname ${cfg.secretKeyBaseFile}) mkdir -p $(dirname ${cfg.secretKeyBaseFile})
bin/rake secret > ${cfg.secretKeyBaseFile} bin/bundle exec rails secret > ${cfg.secretKeyBaseFile}
fi fi
if ! test -f ${cfg.otpSecretFile}; then if ! test -f ${cfg.otpSecretFile}; then
mkdir -p $(dirname ${cfg.otpSecretFile}) mkdir -p $(dirname ${cfg.otpSecretFile})
bin/rake secret > ${cfg.otpSecretFile} bin/bundle exec rails secret > ${cfg.otpSecretFile}
fi fi
if ! test -f ${cfg.vapidPrivateKeyFile}; then if ! test -f ${cfg.vapidPrivateKeyFile}; then
mkdir -p $(dirname ${cfg.vapidPrivateKeyFile}) $(dirname ${cfg.vapidPublicKeyFile}) mkdir -p $(dirname ${cfg.vapidPrivateKeyFile}) $(dirname ${cfg.vapidPublicKeyFile})
@ -724,6 +790,9 @@ in {
fi fi
cat > /var/lib/mastodon/.secrets_env <<EOF cat > /var/lib/mastodon/.secrets_env <<EOF
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY="$(cat ${cfg.activeRecordEncryptionDeterministicKeyFile})"
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT="$(cat ${cfg.activeRecordEncryptionKeyDerivationSaltFile})"
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY="$(cat ${cfg.activeRecordEncryptionPrimaryKeyFile})"
SECRET_KEY_BASE="$(cat ${cfg.secretKeyBaseFile})" SECRET_KEY_BASE="$(cat ${cfg.secretKeyBaseFile})"
OTP_SECRET="$(cat ${cfg.otpSecretFile})" OTP_SECRET="$(cat ${cfg.otpSecretFile})"
VAPID_PRIVATE_KEY="$(cat ${cfg.vapidPrivateKeyFile})" VAPID_PRIVATE_KEY="$(cat ${cfg.vapidPrivateKeyFile})"
@ -802,7 +871,7 @@ in {
description = "Mastodon web"; description = "Mastodon web";
environment = env // (if cfg.enableUnixSocket environment = env // (if cfg.enableUnixSocket
then { SOCKET = "/run/mastodon-web/web.socket"; } then { SOCKET = "/run/mastodon-web/web.socket"; }
else { PORT = toString(cfg.webPort); } else { PORT = toString cfg.webPort; }
); );
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/puma -C config/puma.rb"; ExecStart = "${cfg.package}/bin/puma -C config/puma.rb";
@ -816,7 +885,7 @@ in {
# System Call Filtering # System Call Filtering
SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ]; SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
} // cfgService; } // cfgService;
path = with pkgs; [ ffmpeg-headless file imagemagick ]; path = with pkgs; [ ffmpeg-headless file ];
}; };
systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable { systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable {
@ -851,7 +920,7 @@ in {
}; };
locations."@proxy" = { locations."@proxy" = {
proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString(cfg.webPort)}"); proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString cfg.webPort}");
proxyWebsockets = true; proxyWebsockets = true;
}; };
@ -903,7 +972,7 @@ in {
inherit (cfg) group; inherit (cfg) group;
}; };
}) })
(lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package pkgs.imagemagick ]) (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package ])
(lib.mkIf (cfg.redis.createLocally && cfg.redis.enableUnixSocket) {${config.services.mastodon.user}.extraGroups = [ "redis-mastodon" ];}) (lib.mkIf (cfg.redis.createLocally && cfg.redis.enableUnixSocket) {${config.services.mastodon.user}.extraGroups = [ "redis-mastodon" ];})
]; ];

Some files were not shown because too many files have changed in this diff Show more