Project import generated by Copybara.

GitOrigin-RevId: ae5c332cbb5827f6b1f02572496b141021de335f
This commit is contained in:
Default email 2024-01-25 23:12:00 +09:00
parent 8b4d882663
commit 23b612e36f
2830 changed files with 100580 additions and 60293 deletions

View file

@ -20,7 +20,7 @@
# Libraries # Libraries
/lib @infinisil /lib @infinisil
/lib/systems @alyssais @ericson2314 @amjoseph-nixpkgs /lib/systems @alyssais @ericson2314
/lib/generators.nix @infinisil @Profpatsch /lib/generators.nix @infinisil @Profpatsch
/lib/cli.nix @infinisil @Profpatsch /lib/cli.nix @infinisil @Profpatsch
/lib/debug.nix @infinisil @Profpatsch /lib/debug.nix @infinisil @Profpatsch
@ -41,17 +41,17 @@
/pkgs/top-level/stage.nix @Ericson2314 /pkgs/top-level/stage.nix @Ericson2314
/pkgs/top-level/splice.nix @Ericson2314 /pkgs/top-level/splice.nix @Ericson2314
/pkgs/top-level/release-cross.nix @Ericson2314 /pkgs/top-level/release-cross.nix @Ericson2314
/pkgs/stdenv/generic @Ericson2314 @amjoseph-nixpkgs /pkgs/stdenv/generic @Ericson2314
/pkgs/stdenv/generic/check-meta.nix @Ericson2314 @piegamesde /pkgs/stdenv/generic/check-meta.nix @Ericson2314 @piegamesde
/pkgs/stdenv/cross @Ericson2314 @amjoseph-nixpkgs /pkgs/stdenv/cross @Ericson2314
/pkgs/build-support/cc-wrapper @Ericson2314 @amjoseph-nixpkgs /pkgs/build-support/cc-wrapper @Ericson2314
/pkgs/build-support/bintools-wrapper @Ericson2314 /pkgs/build-support/bintools-wrapper @Ericson2314
/pkgs/build-support/setup-hooks @Ericson2314 /pkgs/build-support/setup-hooks @Ericson2314
/pkgs/build-support/setup-hooks/auto-patchelf.sh @layus /pkgs/build-support/setup-hooks/auto-patchelf.sh @layus
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus /pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/pkgs-lib @infinisil /pkgs/pkgs-lib @infinisil
## Format generators/serializers ## Format generators/serializers
/pkgs/pkgs-lib/formats/libconfig @ckiee /pkgs/pkgs-lib/formats/libconfig @ckiee @h7x4
# pkgs/by-name # pkgs/by-name
/pkgs/test/nixpkgs-check-by-name @infinisil /pkgs/test/nixpkgs-check-by-name @infinisil
@ -84,7 +84,6 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
/nixos/README.md @infinisil /nixos/README.md @infinisil
/pkgs/README.md @infinisil /pkgs/README.md @infinisil
/maintainers/README.md @infinisil /maintainers/README.md @infinisil
/maintainers/* @piegamesde @Janik-Haag
# User-facing development documentation # User-facing development documentation
/doc/development.md @infinisil /doc/development.md @infinisil
@ -158,7 +157,7 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda /doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
# C compilers # C compilers
/pkgs/development/compilers/gcc @amjoseph-nixpkgs /pkgs/development/compilers/gcc
/pkgs/development/compilers/llvm @RaitoBezarius /pkgs/development/compilers/llvm @RaitoBezarius
/pkgs/development/compilers/emscripten @raitobezarius /pkgs/development/compilers/emscripten @raitobezarius
/doc/languages-frameworks/emscripten.section.md @raitobezarius /doc/languages-frameworks/emscripten.section.md @raitobezarius
@ -220,6 +219,7 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/nixos/modules/services/networking/ntp @thoughtpolice /nixos/modules/services/networking/ntp @thoughtpolice
# Network # Network
/pkgs/tools/networking/octodns @Janik-Haag
/pkgs/tools/networking/kea/default.nix @mweinelt /pkgs/tools/networking/kea/default.nix @mweinelt
/pkgs/tools/networking/babeld/default.nix @mweinelt /pkgs/tools/networking/babeld/default.nix @mweinelt
/nixos/modules/services/networking/babeld.nix @mweinelt /nixos/modules/services/networking/babeld.nix @mweinelt
@ -340,9 +340,6 @@ nixos/tests/zfs.nix @raitobezarius
/pkgs/development/compilers/zig @figsoda /pkgs/development/compilers/zig @figsoda
/doc/hooks/zig.section.md @figsoda /doc/hooks/zig.section.md @figsoda
# Linux Kernel
pkgs/os-specific/linux/kernel/manual-config.nix @amjoseph-nixpkgs
# Buildbot # Buildbot
nixos/modules/services/continuous-integration/buildbot @Mic92 @zowoq nixos/modules/services/continuous-integration/buildbot @Mic92 @zowoq
nixos/tests/buildbot.nix @Mic92 @zowoq nixos/tests/buildbot.nix @Mic92 @zowoq

View file

@ -19,8 +19,8 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback # we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13 - uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with: with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere. # This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci name: nixpkgs-ci

View file

@ -4,11 +4,19 @@
# When you make changes to this workflow, also update pkgs/test/nixpkgs-check-by-name/scripts/run-local.sh adequately # When you make changes to this workflow, also update pkgs/test/nixpkgs-check-by-name/scripts/run-local.sh adequately
name: Check pkgs/by-name name: Check pkgs/by-name
# The pre-built tool is fetched from a channel, # The tool is pinned to a pre-built version on Hydra,
# making it work predictable on all PRs. # see pkgs/test/nixpkgs-check-by-name/scripts/README.md
on: on:
# Using pull_request_target instead of pull_request avoids having to approve first time contributors # Using pull_request_target instead of pull_request avoids having to approve first time contributors
pull_request_target pull_request_target:
# This workflow depends on the base branch of the PR,
# but changing the base branch is not included in the default trigger events,
# which would be `opened`, `synchronize` or `reopened`.
# Instead it causes an `edited` event, so we need to add it explicitly here
# While `edited` is also triggered when the PR title/body is changed,
# this PR action is fairly quick, and PR's don't get edited that often,
# so it shouldn't be a problem
types: [opened, synchronize, reopened, edited]
permissions: permissions:
# We need this permission to cancel the workflow run if there's a merge conflict # We need this permission to cancel the workflow run if there's a merge conflict
@ -90,9 +98,14 @@ jobs:
base=$(mktemp -d) base=$(mktemp -d)
git worktree add "$base" "$(git rev-parse HEAD^1)" git worktree add "$base" "$(git rev-parse HEAD^1)"
echo "base=$base" >> "$GITHUB_ENV" echo "base=$base" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
- name: Fetching the tool - name: Fetching the pinned tool
run: pkgs/test/nixpkgs-check-by-name/scripts/fetch-tool.sh "$GITHUB_BASE_REF" result # Update the pinned version using pkgs/test/nixpkgs-check-by-name/scripts/update-pinned-tool.sh
run: |
# Get the direct /nix/store path from the pin to avoid having to evaluate Nixpkgs
toolPath=$(jq -r '."ci-path"' pkgs/test/nixpkgs-check-by-name/scripts/pinned-tool.json)
# This asks the substituter for the path, which should be there because Hydra will have pre-built and pushed it
nix-store --realise "$toolPath" --add-root result
- name: Running nixpkgs-check-by-name - name: Running nixpkgs-check-by-name
run: | run: |
if result/bin/nixpkgs-check-by-name --base "$base" .; then if result/bin/nixpkgs-check-by-name --base "$base" .; then

View file

@ -16,7 +16,7 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true

View file

@ -28,7 +28,7 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with: with:
# nixpkgs commit is pinned so that it doesn't break # nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0 # editorconfig-checker 2.4.0

View file

@ -18,11 +18,11 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13 - uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with: with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere. # This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci name: nixpkgs-ci

View file

@ -19,11 +19,11 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true
- uses: cachix/cachix-action@6a2e08b5ebf7a9f285ff57b1870a4262b06e0bee # v13 - uses: cachix/cachix-action@18cf96c7c98e048e10a83abd92116114cd8504be # v14
with: with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere. # This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
name: nixpkgs-ci name: nixpkgs-ci

View file

@ -0,0 +1,42 @@
name: "Check whether nix files are parseable"
permissions: read-all
on:
# avoids approving first time contributors
pull_request_target:
branches-ignore:
- 'release-**'
jobs:
tests:
runs-on: ubuntu-latest
if: "github.repository_owner == 'NixOS' && !contains(github.event.pull_request.title, '[skip treewide]')"
steps:
- name: Get list of changed files from PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh api \
repos/NixOS/nixpkgs/pulls/${{github.event.number}}/files --paginate \
| jq --raw-output '.[] | select(.status != "removed" and (.filename | endswith(".nix"))) | .filename' \
> "$HOME/changed_files"
if [[ -s "$HOME/changed_files" ]]; then
echo "CHANGED_FILES=$HOME/changed_files" > "$GITHUB_ENV"
fi
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
if: ${{ env.CHANGED_FILES && env.CHANGED_FILES != '' }}
- uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: Parse all changed or added nix files
run: |
ret=0
while IFS= read -r file; do
out="$(nix-instantiate --parse "$file")" || { echo "$out" && ret=1; }
done < "$HOME/changed_files"
exit "$ret"
if: ${{ env.CHANGED_FILES && env.CHANGED_FILES != '' }}

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: cachix/install-nix-action@7ac1ec25491415c381d9b62f0657c7a028df52a7 # v24 - uses: cachix/install-nix-action@6004951b182f8860210c8d6f0d808ec5b1a33d28 # v25
with: with:
nix_path: nixpkgs=channel:nixpkgs-unstable nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup - name: setup

View file

@ -323,7 +323,7 @@ All the review template samples provided in this section are generic and meant a
To get more information about how to review specific parts of Nixpkgs, refer to the documents linked to in the [overview section][overview]. To get more information about how to review specific parts of Nixpkgs, refer to the documents linked to in the [overview section][overview].
If a pull request contains documentation changes that might require feedback from the documentation team, ping @NixOS/documentation-team on the pull request. If a pull request contains documentation changes that might require feedback from the documentation team, ping [@NixOS/documentation-reviewers](https://github.com/orgs/nixos/teams/documentation-reviewers) on the pull request.
If you consider having enough knowledge and experience in a topic and would like to be a long-term reviewer for related submissions, please contact the current reviewers for that topic. They will give you information about the reviewing process. The main reviewers for a topic can be hard to find as there is no list, but checking past pull requests to see who reviewed or git-blaming the code to see who committed to that topic can give some hints. If you consider having enough knowledge and experience in a topic and would like to be a long-term reviewer for related submissions, please contact the current reviewers for that topic. They will give you information about the reviewing process. The main reviewers for a topic can be hard to find as there is no list, but checking past pull requests to see who reviewed or git-blaming the code to see who committed to that topic can give some hints.
@ -378,7 +378,7 @@ The staging workflow exists to batch Hydra builds of many packages together.
It works by directing commits that cause [mass rebuilds][mass-rebuild] to a separate `staging` branch that isn't directly built by Hydra. It works by directing commits that cause [mass rebuilds][mass-rebuild] to a separate `staging` branch that isn't directly built by Hydra.
Regularly, the `staging` branch is _manually_ merged into a `staging-next` branch to be built by Hydra using the [`nixpkgs:staging-next` jobset](https://hydra.nixos.org/jobset/nixpkgs/staging-next). Regularly, the `staging` branch is _manually_ merged into a `staging-next` branch to be built by Hydra using the [`nixpkgs:staging-next` jobset](https://hydra.nixos.org/jobset/nixpkgs/staging-next).
The `staging-next` branch should then only receive direct commits in order to fix Hydra builds. The `staging-next` branch should then only receive direct commits in order to fix Hydra builds.
Once it is verified that there are no major regressions, it is merged into `master` using [a pull requests](https://github.com/NixOS/nixpkgs/pulls?q=head%3Astaging-next). Once it is verified that there are no major regressions, it is merged into `master` using [a pull request](https://github.com/NixOS/nixpkgs/pulls?q=head%3Astaging-next).
This is done manually in order to ensure it's a good use of Hydra's computing resources. This is done manually in order to ensure it's a good use of Hydra's computing resources.
By keeping the `staging-next` branch separate from `staging`, this batching does not block developers from merging changes into `staging`. By keeping the `staging-next` branch separate from `staging`, this batching does not block developers from merging changes into `staging`.

View file

@ -159,24 +159,28 @@ In an effort to keep the Nixpkgs manual in a consistent style, please follow the
In that case, please open an issue about the particular documentation convention and tag it with a "needs: documentation" label. In that case, please open an issue about the particular documentation convention and tag it with a "needs: documentation" label.
- Put each sentence in its own line. - Put each sentence in its own line.
This makes reviewing documentation much easier, since GitHub's review system is based on lines. This makes reviews and suggestions much easier, since GitHub's review system is based on lines.
It also helps identifying long sentences at a glance.
- Use the admonitions syntax for any callouts and examples (see [section above](#admonitions)). - Use the [admonition syntax](#admonitions) for callouts and examples.
- If you provide an example involving Nix code, make your example into a fully-working package (something that can be passed to `pkgs.callPackage`). - Provide at least one example per function, and make examples self-contained.
This will help others quickly test that the example works, and will also make it easier if we start automatically testing all example code to make sure it works. This is easier to understand for beginners.
For example, instead of providing something like: It also helps with testing that it actually works especially once we introduce automation.
``` Example code should be such that it can be passed to `pkgs.callPackage`.
Instead of something like:
```nix
pkgs.dockerTools.buildLayeredImage { pkgs.dockerTools.buildLayeredImage {
name = "hello"; name = "hello";
contents = [ pkgs.hello ]; contents = [ pkgs.hello ];
} }
``` ```
Provide something like: Write something like:
``` ```nix
{ dockerTools, hello }: { dockerTools, hello }:
dockerTools.buildLayeredImage { dockerTools.buildLayeredImage {
name = "hello"; name = "hello";
@ -200,6 +204,10 @@ In that case, please open an issue about the particular documentation convention
: Tag of the generated image. : Tag of the generated image.
_Default value:_ the output path's hash. _Default value:_ the output path's hash.
``` ```
## Getting help
If you need documentation-specific help or reviews, ping [@NixOS/documentation-reviewers](https://github.com/orgs/nixos/teams/documentation-reviewers) on your pull request.

View file

@ -1,33 +1,228 @@
# pkgs.dockerTools {#sec-pkgs-dockerTools} # pkgs.dockerTools {#sec-pkgs-dockerTools}
`pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#docker-image-specification-v120). Docker itself is not used to perform any of the operations done by these functions. `pkgs.dockerTools` is a set of functions for creating and manipulating Docker images according to the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
Docker itself is not used to perform any of the operations done by these functions.
## buildImage {#ssec-pkgs-dockerTools-buildImage} ## buildImage {#ssec-pkgs-dockerTools-buildImage}
This function is analogous to the `docker build` command, in that it can be used to build a Docker-compatible repository tarball containing a single image with one or multiple layers. As such, the result is suitable for being loaded in Docker with `docker load`. This function builds a Docker-compatible repository tarball containing a single image.
As such, the result is suitable for being loaded in Docker with `docker load` (see [](#ex-dockerTools-buildImage) for how to do this).
The parameters of `buildImage` with relative example values are described below: This function will create a single layer for all files (and dependencies) that are specified in its argument.
Only new dependencies that are not already in the existing layers will be copied.
If you prefer to create multiple layers for the files and dependencies you want to add to the image, see [](#ssec-pkgs-dockerTools-buildLayeredImage) or [](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
[]{#ex-dockerTools-buildImage} This function allows a script to be run during the layer generation process, allowing custom behaviour to affect the final results of the image (see the documentation of the `runAsRoot` and `extraCommands` attributes).
[]{#ex-dockerTools-buildImage-runAsRoot}
The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
By default, that image will use a static creation date (see documentation for the `created` attribute).
This allows `buildImage` to produce reproducible images.
:::{.tip}
When running an image built with `buildImage`, you might encounter certain errors depending on what you included in the image, especially if you did not start with any base image.
If you encounter errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)`, you may need to add the contents of `pkgs.iana-etc` in the `copyToRoot` attribute.
Similarly, if you encounter errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)`, you may need to add the contents of `pkgs.cacert` in the `copyToRoot` attribute.
:::
### Inputs {#ssec-pkgs-dockerTools-buildImage-inputs}
`buildImage` expects an argument with the following attributes:
`name` (String)
: The name of the generated image.
`tag` (String or Null; _optional_)
: Tag of the generated image.
If `null`, the hash of the nix derivation will be used as the tag.
_Default value:_ `null`.
`fromImage` (Path or Null; _optional_)
: The repository tarball of an image to be used as the base for the generated image.
It must be a valid Docker image, such as one exported by `docker save`, or another image built with the `dockerTools` utility functions.
This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
A value of `null` can be seen as an equivalent of `FROM scratch`.
If specified, the layer created by `buildImage` will be appended to the layers defined in the base image, resulting in an image with at least two layers (one or more layers from the base image, and the layer created by `buildImage`).
Otherwise, the resulting image with contain the single layer created by `buildImage`.
_Default value:_ `null`.
`fromImageName` (String or Null; _optional_)
: Used to specify the image within the repository tarball in case it contains multiple images.
A value of `null` means that `buildImage` will use the first image available in the repository.
:::{.note}
This must be used with `fromImageTag`. Using only `fromImageName` without `fromImageTag` will make `buildImage` use the first image available in the repository.
:::
_Default value:_ `null`.
`fromImageTag` (String or Null; _optional_)
: Used to specify the image within the repository tarball in case it contains multiple images.
A value of `null` means that `buildImage` will use the first image available in the repository.
:::{.note}
This must be used with `fromImageName`. Using only `fromImageTag` without `fromImageName` will make `buildImage` use the first image available in the repository
:::
_Default value:_ `null`.
`copyToRoot` (Path, List of Paths, or Null; _optional_)
: Files to add to the generated image.
Anything that coerces to a path (e.g. a derivation) can also be used.
This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
_Default value:_ `null`.
`keepContentsDirlinks` (Boolean; _optional_)
: When adding files to the generated image (as specified by `copyToRoot`), this attribute controls whether to preserve symlinks to directories.
If `false`, the symlinks will be transformed into directories.
This behaves the same as `rsync -k` when `keepContentsDirlinks` is `false`, and the same as `rsync -K` when `keepContentsDirlinks` is `true`.
_Default value:_ `false`.
`runAsRoot` (String or Null; _optional_)
: A bash script that will run as root inside a VM that contains the existing layers of the base image and the new generated layer (including the files from `copyToRoot`).
The script will be run with a working directory of `/`.
This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
A value of `null` means that this step in the image generation process will be skipped.
See [](#ex-dockerTools-buildImage-runAsRoot) for how to work with this attribute.
:::{.caution}
Using this attribute requires the `kvm` device to be available, see [`system-features`](https://nixos.org/manual/nix/stable/command-ref/conf-file.html#conf-system-features).
If the `kvm` device isn't available, you should consider using [`buildLayeredImage`](#ssec-pkgs-dockerTools-buildLayeredImage) or [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) instead.
Those functions allow scripts to be run as root without access to the `kvm` device.
:::
:::{.note}
At the time the script in `runAsRoot` is run, the files specified directly in `copyToRoot` will be present in the VM, but their dependencies might not be there yet.
Copying their dependencies into the generated image is a step that happens after `runAsRoot` finishes running.
:::
_Default value:_ `null`.
`extraCommands` (String; _optional_)
: A bash script that will run before the layer created by `buildImage` is finalised.
The script will be run on some (opaque) working directory which will become `/` once the layer is created.
This is similar to `runAsRoot`, but the script specified in `extraCommands` is **not** run as root, and does not involve creating a VM.
It is simply run as part of building the derivation that outputs the layer created by `buildImage`.
See [](#ex-dockerTools-buildImage-extraCommands) for how to work with this attribute, and subtle differences compared to `runAsRoot`.
_Default value:_ `""`.
`config` (Attribute Set; _optional_)
: Used to specify the configuration of the containers that will be started off the generated image.
Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
_Default value:_ `null`.
`architecture` (String; _optional_)
: Used to specify the image architecture.
This is useful for multi-architecture builds that don't need cross compiling.
If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
_Default value:_ the same value from `pkgs.go.GOARCH`.
`diskSize` (Number; _optional_)
: Controls the disk size (in megabytes) of the VM used to run the script specified in `runAsRoot`.
This attribute is ignored if `runAsRoot` is `null`.
_Default value:_ 1024.
`buildVMMemorySize` (Number; _optional_)
: Controls the amount of memory (in megabytes) provisioned for the VM used to run the script specified in `runAsRoot`.
This attribute is ignored if `runAsRoot` is `null`.
_Default value:_ 512.
`created` (String; _optional_)
: Specifies the time of creation of the generated image.
This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case `buildImage` will use the current date.
See [](#ex-dockerTools-buildImage-creatednow) for how to use `"now"`.
:::{.caution}
Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
:::
_Default value:_ `"1970-01-01T00:00:01Z"`.
`uid` (Number; _optional_)
: The uid of the user that will own the files packed in the new layer built by `buildImage`.
_Default value:_ 0.
`gid` (Number; _optional_)
: The gid of the group that will own the files packed in the new layer built by `buildImage`.
_Default value:_ 0.
`contents` **DEPRECATED**
: This attribute is deprecated, and users are encouraged to use `copyToRoot` instead.
### Passthru outputs {#ssec-pkgs-dockerTools-buildImage-passthru-outputs}
`buildImage` defines a few [`passthru`](#var-stdenv-passthru) attributes:
`buildArgs` (Attribute Set)
: The argument passed to `buildImage` itself.
This allows you to inspect all attributes specified in the argument, as described above.
`layer` (Attribute Set)
: The derivation with the layer created by `buildImage`.
This allows easier inspection of the contents added by `buildImage` in the generated image.
`imageTag` (String)
: The tag of the generated image.
This is useful if no tag was specified in the attributes of the argument to `buildImage`, because an automatic tag will be used instead.
`imageTag` allows you to retrieve the value of the tag used in this case.
### Examples {#ssec-pkgs-dockerTools-buildImage-examples}
:::{.example #ex-dockerTools-buildImage}
# Building a Docker image
The following package builds a Docker image that runs the `redis-server` executable from the `redis` package.
The Docker image will have name `redis` and tag `latest`.
```nix ```nix
buildImage { { dockerTools, buildEnv, redis }:
dockerTools.buildImage {
name = "redis"; name = "redis";
tag = "latest"; tag = "latest";
fromImage = someBaseImage; copyToRoot = buildEnv {
fromImageName = null;
fromImageTag = "latest";
copyToRoot = pkgs.buildEnv {
name = "image-root"; name = "image-root";
paths = [ pkgs.redis ]; paths = [ redis ];
pathsToLink = [ "/bin" ]; pathsToLink = [ "/bin" ];
}; };
runAsRoot = '' runAsRoot = ''
#!${pkgs.runtimeShell}
mkdir -p /data mkdir -p /data
''; '';
@ -36,68 +231,111 @@ buildImage {
WorkingDir = "/data"; WorkingDir = "/data";
Volumes = { "/data" = { }; }; Volumes = { "/data" = { }; };
}; };
diskSize = 1024;
buildVMMemorySize = 512;
} }
``` ```
The above example will build a Docker image `redis/latest` from the given base image. Loading and running this image in Docker results in `redis-server` being started automatically. The result of building this package is a `.tar.gz` file that can be loaded into Docker:
- `name` specifies the name of the resulting image. This is the only required argument for `buildImage`. ```shell
$ nix-build
(some output removed for clarity)
building '/nix/store/yw0adm4wpsw1w6j4fb5hy25b3arr9s1v-docker-image-redis.tar.gz.drv'...
Adding layer...
tar: Removing leading `/' from member names
Adding meta...
Cooking the image...
Finished.
/nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
- `tag` specifies the tag of the resulting image. By default it's `null`, which indicates that the nix output hash will be used as tag. $ docker load -i /nix/store/p4dsg62inh9d2ksy3c7bv58xa851dasr-docker-image-redis.tar.gz
(some output removed for clarity)
- `fromImage` is the repository tarball containing the base image. It must be a valid Docker image, such as exported by `docker save`. By default it's `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`. Loaded image: redis:latest
- `fromImageName` can be used to further specify the base image within the repository, in case it contains multiple images. By default it's `null`, in which case `buildImage` will peek the first image available in the repository.
- `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's `null`, in which case `buildImage` will peek the first tag available for the base image.
- `copyToRoot` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it's `null`.
- `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`.
> **_NOTE:_** Using this parameter requires the `kvm` device to be available.
- `config` is used to specify the configuration of the containers that will be started off the built image in Docker. The available options are listed in the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
- `architecture` is _optional_ and used to specify the image architecture, this is useful for multi-architecture builds that don't need cross compiling. If not specified it will default to `hostPlatform`.
- `diskSize` is used to specify the disk size of the VM used to build the image in megabytes. By default it's 1024 MiB.
- `buildVMMemorySize` is used to specify the memory size of the VM to build the image in megabytes. By default it's 512 MiB.
After the new layer has been created, its closure (to which `contents`, `config` and `runAsRoot` contribute) will be copied in the layer itself. Only new dependencies that are not already in the existing layers will be copied.
At the end of the process, only one new single layer will be produced and added to the resulting image.
The resulting repository will only list the single image `image/tag`. In the case of [the `buildImage` example](#ex-dockerTools-buildImage), it would be `redis/latest`.
It is possible to inspect the arguments with which an image was built using its `buildArgs` attribute.
> **_NOTE:_** If you see errors similar to `getProtocolByName: does not exist (no such protocol name: tcp)` you may need to add `pkgs.iana-etc` to `contents`.
> **_NOTE:_** If you see errors similar to `Error_Protocol ("certificate has unknown CA",True,UnknownCa)` you may need to add `pkgs.cacert` to `contents`.
By default `buildImage` will use a static date of one second past the UNIX Epoch. This allows `buildImage` to produce binary reproducible images. When listing images with `docker images`, the newly created images will be listed like this:
```ShellSession
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest 08c791c7846e 48 years ago 25.2MB
``` ```
:::
You can break binary reproducibility but have a sorted, meaningful `CREATED` column by setting `created` to `now`. :::{.example #ex-dockerTools-buildImage-runAsRoot}
# Building a Docker image with `runAsRoot`
The following package builds a Docker image with the `hello` executable from the `hello` package.
It uses `runAsRoot` to create a directory and a file inside the image.
This works the same as [](#ex-dockerTools-buildImage-extraCommands), but uses `runAsRoot` instead of `extraCommands`.
```nix ```nix
pkgs.dockerTools.buildImage { { dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello"; name = "hello";
tag = "latest"; tag = "latest";
created = "now";
copyToRoot = pkgs.buildEnv { copyToRoot = buildEnv {
name = "image-root"; name = "image-root";
paths = [ pkgs.hello ]; paths = [ hello ];
pathsToLink = [ "/bin" ];
};
runAsRoot = ''
mkdir -p /data
echo "some content" > my-file
'';
config = {
Cmd = [ "/bin/hello" ];
WorkingDir = "/data";
};
}
```
:::
:::{.example #ex-dockerTools-buildImage-extraCommands}
# Building a Docker image with `extraCommands`
The following package builds a Docker image with the `hello` executable from the `hello` package.
It uses `extraCommands` to create a directory and a file inside the image.
This works the same as [](#ex-dockerTools-buildImage-runAsRoot), but uses `extraCommands` instead of `runAsRoot`.
Note that with `extraCommands`, we can't directly reference `/` and must create files and directories as if we were already on `/`.
```nix
{ dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello";
tag = "latest";
copyToRoot = buildEnv {
name = "image-root";
paths = [ hello ];
pathsToLink = [ "/bin" ];
};
extraCommands = ''
mkdir -p data
echo "some content" > my-file
'';
config = {
Cmd = [ "/bin/hello" ];
WorkingDir = "/data";
};
}
```
:::
:::{.example #ex-dockerTools-buildImage-creatednow}
# Building a Docker image with a creation date set to the current time
Note that using a value of `"now"` in the `created` attribute will break reproducibility.
```nix
{ dockerTools, buildEnv, hello }:
dockerTools.buildImage {
name = "hello";
tag = "latest";
created = "now";
copyToRoot = buildEnv {
name = "image-root";
paths = [ hello ];
pathsToLink = [ "/bin" ]; pathsToLink = [ "/bin" ];
}; };
@ -105,139 +343,376 @@ pkgs.dockerTools.buildImage {
} }
``` ```
Now the Docker CLI will display a reasonable date and sort the images as expected: After importing the generated repository tarball with Docker, its CLI will display a reasonable date and sort the images as expected:
```ShellSession ```ShellSession
$ docker images $ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE REPOSITORY TAG IMAGE ID CREATED SIZE
hello latest de2bf4786de6 About a minute ago 25.2MB hello latest de2bf4786de6 About a minute ago 25.2MB
``` ```
:::
However, the produced images will not be binary reproducible.
## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage} ## buildLayeredImage {#ssec-pkgs-dockerTools-buildLayeredImage}
Create a Docker image with many of the store paths being on their own layer to improve sharing between images. The image is realized into the Nix store as a gzipped tarball. Depending on the intended usage, many users might prefer to use `streamLayeredImage` instead, which this function uses internally. `buildLayeredImage` uses [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) underneath to build a compressed Docker-compatible repository tarball.
Basically, `buildLayeredImage` runs the script created by `streamLayeredImage` to save the compressed image in the Nix store.
`buildLayeredImage` supports the same options as `streamLayeredImage`, see [`streamLayeredImage`](#ssec-pkgs-dockerTools-streamLayeredImage) for details.
`name` :::{.note}
Despite the similar name, [`buildImage`](#ssec-pkgs-dockerTools-buildImage) works completely differently from `buildLayeredImage` and `streamLayeredImage`.
: The name of the resulting image. Even though some of the arguments may seem related, they cannot be interchanged.
:::
`tag` _optional_ You can use this function to load an image in Docker with `docker load`.
See [](#ex-dockerTools-buildLayeredImage-hello) to see how to do that.
: Tag of the generated image. ### Examples {#ssec-pkgs-dockerTools-buildLayeredImage-examples}
*Default:* the output path's hash :::{.example #ex-dockerTools-buildLayeredImage-hello}
# Building a layered Docker image
`fromImage` _optional_ The following package builds a layered Docker image that runs the `hello` executable from the `hello` package.
The Docker image will have name `hello` and tag `latest`.
: The repository tarball containing the base image. It must be a valid Docker image, such as one exported by `docker save`.
*Default:* `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
`contents` _optional_
: Top-level paths in the container. Either a single derivation, or a list of derivations.
*Default:* `[]`
`config` _optional_
`architecture` is _optional_ and used to specify the image architecture, this is useful for multi-architecture builds that don't need cross compiling. If not specified it will default to `hostPlatform`.
: Run-time configuration of the container. A full list of the options available is in the [Docker Image Specification v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
*Default:* `{}`
`created` _optional_
: Date and time the layers were created. Follows the same `now` exception supported by `buildImage`.
*Default:* `1970-01-01T00:00:01Z`
`maxLayers` _optional_
: Maximum number of layers to create.
*Default:* `100`
*Maximum:* `125`
`extraCommands` _optional_
: Shell commands to run while building the final layer, without access to most of the layer contents. Changes to this layer are "on top" of all the other layers, so can create additional directories and files.
`fakeRootCommands` _optional_
: Shell commands to run while creating the archive for the final layer in a fakeroot environment. Unlike `extraCommands`, you can run `chown` to change the owners of the files in the archive, changing fakeroot's state instead of the real filesystem. The latter would require privileges that the build user does not have. Static binaries do not interact with the fakeroot environment. By default all files in the archive will be owned by root.
`enableFakechroot` _optional_
: Whether to run in `fakeRootCommands` in `fakechroot`, making programs behave as though `/` is the root of the image being created, while files in the Nix store are available as usual. This allows scripts that perform installation in `/` to work as expected. Considering that `fakechroot` is implemented via the same mechanism as `fakeroot`, the same caveats apply.
*Default:* `false`
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents}
Each path directly listed in `contents` will have a symlink in the root of the image.
For example:
```nix ```nix
pkgs.dockerTools.buildLayeredImage { { dockerTools, hello }:
dockerTools.buildLayeredImage {
name = "hello"; name = "hello";
contents = [ pkgs.hello ]; tag = "latest";
contents = [ hello ];
config.Cmd = [ "/bin/hello" ];
} }
``` ```
will create symlinks for all the paths in the `hello` package: The result of building this package is a `.tar.gz` file that can be loaded into Docker:
```ShellSession ```shell
/bin/hello -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/bin/hello $ nix-build
/share/info/hello.info -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/info/hello.info (some output removed for clarity)
/share/locale/bg/LC_MESSAGES/hello.mo -> /nix/store/h1zb1padqbbb7jicsvkmrym3r6snphxg-hello-2.10/share/locale/bg/LC_MESSAGES/hello.mo building '/nix/store/bk8bnrbw10nq7p8pvcmdr0qf57y6scha-hello.tar.gz.drv'...
No 'fromImage' provided
Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
Creating layer 6 with customisation...
Adding manifests...
Done.
/nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
$ docker load -i /nix/store/hxcz7snvw7f8rzhbh6mv8jq39d992905-hello.tar.gz
(some output removed for clarity)
Loaded image: hello:latest
``` ```
:::
### Automatic inclusion of `config` references {#dockerTools-buildLayeredImage-arg-config}
The closure of `config` is automatically included in the closure of the final image.
This allows you to make very simple Docker images with very little code. This container will start up and run `hello`:
```nix
pkgs.dockerTools.buildLayeredImage {
name = "hello";
config.Cmd = [ "${pkgs.hello}/bin/hello" ];
}
```
### Adjusting `maxLayers` {#dockerTools-buildLayeredImage-arg-maxLayers}
Increasing the `maxLayers` increases the number of layers which have a chance to be shared between different images.
Modern Docker installations support up to 128 layers, but older versions support as few as 42.
If the produced image will not be extended by other Docker builds, it is safe to set `maxLayers` to `128`. However, it will be impossible to extend the image further.
The first (`maxLayers-2`) most "popular" paths will have their own individual layers, then layer \#`maxLayers-1` will contain all the remaining "unpopular" paths, and finally layer \#`maxLayers` will contain the Image configuration.
Docker's Layers are not inherently ordered, they are content-addressable and are not explicitly layered until they are composed in to an Image.
## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage} ## streamLayeredImage {#ssec-pkgs-dockerTools-streamLayeredImage}
Builds a script which, when run, will stream an uncompressed tarball of a Docker image to stdout. The arguments to this function are as for `buildLayeredImage`. This method of constructing an image does not realize the image into the Nix store, so it saves on IO and disk/cache space, particularly with large images. `streamLayeredImage` builds a **script** which, when run, will stream to stdout a Docker-compatible repository tarball containing a single image, using multiple layers to improve sharing between images.
This means that `streamLayeredImage` does not output an image into the Nix store, but only a script that builds the image, saving on IO and disk/cache space, particularly with large images.
The image produced by running the output script can be piped directly into `docker load`, to load it into the local docker daemon: You can use this function to load an image in Docker with `docker load`.
See [](#ex-dockerTools-streamLayeredImage-hello) to see how to do that.
```ShellSession For this function, you specify a [store path](https://nixos.org/manual/nix/stable/store/store-path) or a list of store paths to be added to the image, and the functions will automatically include any dependencies of those paths in the image.
$(nix-build) | docker load The function will attempt to create one layer per object in the Nix store that needs to be added to the image.
In case there are more objects to include than available layers, the function will put the most ["popular"](https://github.com/NixOS/nixpkgs/tree/release-23.11/pkgs/build-support/references-by-popularity) objects in their own layers, and group all remaining objects into a single layer.
An additional layer will be created with symlinks to the store paths you specified to be included in the image.
These symlinks are built with [`symlinkJoin`](#trivial-builder-symlinkJoin), so they will be included in the root of the image.
See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand how these symlinks are laid out in the generated image.
`streamLayeredImage` allows scripts to be run when creating the additional layer with symlinks, allowing custom behaviour to affect the final results of the image (see the documentation of the `extraCommands` and `fakeRootCommands` attributes).
The resulting repository tarball will list a single image as specified by the `name` and `tag` attributes.
By default, that image will use a static creation date (see documentation for the `created` attribute).
This allows the function to produce reproducible images.
### Inputs {#ssec-pkgs-dockerTools-streamLayeredImage-inputs}
`streamLayeredImage` expects one argument with the following attributes:
`name` (String)
: The name of the generated image.
`tag` (String; _optional_)
: Tag of the generated image.
If `null`, the hash of the nix derivation will be used as the tag.
_Default value:_ `null`.
`fromImage`(Path or Null; _optional_)
: The repository tarball of an image to be used as the base for the generated image.
It must be a valid Docker image, such as one exported by `docker save`, or another image built with the `dockerTools` utility functions.
This can be seen as an equivalent of `FROM fromImage` in a `Dockerfile`.
A value of `null` can be seen as an equivalent of `FROM scratch`.
If specified, the created layers will be appended to the layers defined in the base image.
_Default value:_ `null`.
`contents` (Path or List of Paths; _optional_) []{#dockerTools-buildLayeredImage-arg-contents}
: Directories whose contents will be added to the generated image.
Things that coerce to paths (e.g. a derivation) can also be used.
This can be seen as an equivalent of `ADD contents/ /` in a `Dockerfile`.
All the contents specified by `contents` will be added as a final layer in the generated image.
They will be added as links to the actual files (e.g. links to the store paths).
The actual files will be added in previous layers.
_Default value:_ `[]`
`config` (Attribute Set; _optional_) []{#dockerTools-buildLayeredImage-arg-config}
: Used to specify the configuration of the containers that will be started off the generated image.
Must be an attribute set, with each attribute as listed in the [Docker Image Specification v1.3.0](https://github.com/moby/moby/blob/46f7ab808b9504d735d600e259ca0723f76fb164/image/spec/spec.md#image-json-field-descriptions).
If any packages are used directly in `config`, they will be automatically included in the generated image.
See [](#ex-dockerTools-streamLayeredImage-configclosure) for an example.
_Default value:_ `null`.
`architecture` (String; _optional_)
: Used to specify the image architecture.
This is useful for multi-architecture builds that don't need cross compiling.
If specified, its value should follow the [OCI Image Configuration Specification](https://github.com/opencontainers/image-spec/blob/main/config.md#properties), which should still be compatible with Docker.
According to the linked specification, all possible values for `$GOARCH` in [the Go docs](https://go.dev/doc/install/source#environment) should be valid, but will commonly be one of `386`, `amd64`, `arm`, or `arm64`.
_Default value:_ the same value from `pkgs.go.GOARCH`.
`created` (String; _optional_)
: Specifies the time of creation of the generated image.
This should be either a date and time formatted according to [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) or `"now"`, in which case the current date will be used.
:::{.caution}
Using `"now"` means that the generated image will not be reproducible anymore (because the date will always change whenever it's built).
:::
_Default value:_ `"1970-01-01T00:00:01Z"`.
`maxLayers` (Number; _optional_) []{#dockerTools-buildLayeredImage-arg-maxLayers}
: The maximum number of layers that will be used by the generated image.
If a `fromImage` was specified, the number of layers used by `fromImage` will be subtracted from `maxLayers` to ensure that the image generated will have at most `maxLayers`.
:::{.caution}
Depending on the tool/runtime where the image will be used, there might be a limit to the number of layers that an image can have.
For Docker, see [this issue on GitHub](https://github.com/docker/docs/issues/8230).
:::
_Default value:_ 100.
`extraCommands` (String; _optional_)
: A bash script that will run in the context of the layer created with the contents specified by `contents`.
At the moment this script runs, only the contents directly specified by `contents` will be available as links.
_Default value:_ `""`.
`fakeRootCommands` (String; _optional_)
: A bash script that will run in the context of the layer created with the contents specified by `contents`.
During the process to generate that layer, the script in `extraCommands` will be run first, if specified.
After that, a {manpage}`fakeroot(1)` environment will be entered.
The script specified in `fakeRootCommands` runs inside the fakeroot environment, and the layer is then generated from the view of the files inside the fakeroot environment.
This is useful to change the owners of the files in the layer (by running `chown`, for example), or performing any other privileged operations related to file manipulation (by default, all files in the layer will be owned by root, and the build environment doesn't have enough privileges to directly perform privileged operations on these files).
For more details, see the manpage for {manpage}`fakeroot(1)`.
:::{.caution}
Due to how fakeroot works, static binaries cannot perform privileged file operations in `fakeRootCommands`, unless `enableFakechroot` is set to `true`.
:::
_Default value:_ `""`.
`enableFakechroot` (Boolean; _optional_)
: By default, the script specified in `fakeRootCommands` only runs inside a fakeroot environment.
If `enableFakechroot` is `true`, a more complete chroot environment will be created using [`proot`](https://proot-me.github.io/) before running the script in `fakeRootCommands`.
Files in the Nix store will be available.
This allows scripts that perform installation in `/` to work as expected.
This can be seen as an equivalent of `RUN ...` in a `Dockerfile`.
_Default value:_ `false`
`includeStorePaths` (Boolean; _optional_)
: The files specified in `contents` are put into layers in the generated image.
If `includeStorePaths` is `false`, the actual files will not be included in the generated image, and only links to them will be added instead.
It is **not recommended** to set this to `false` unless you have other tooling to insert the store paths via other means (such as bind mounting the host store) when running containers with the generated image.
If you don't provide any extra tooling, the generated image won't run properly.
See [](#ex-dockerTools-streamLayeredImage-exploringlayers) to understand the impact of setting `includeStorePaths` to `false`.
_Default value:_ `true`
`passthru` (Attribute Set; _optional_)
: Use this to pass any attributes as [passthru](#var-stdenv-passthru) for the resulting derivation.
_Default value:_ `{}`
### Passthru outputs {#ssec-pkgs-dockerTools-streamLayeredImage-passthru-outputs}
`streamLayeredImage` also defines its own [`passthru`](#var-stdenv-passthru) attributes:
`imageTag` (String)
: The tag of the generated image.
This is useful if no tag was specified in the attributes of the argument to the function, because an automatic tag will be used instead.
`imageTag` allows you to retrieve the value of the tag used in this case.
### Examples {#ssec-pkgs-dockerTools-streamLayeredImage-examples}
:::{.example #ex-dockerTools-streamLayeredImage-hello}
# Streaming a layered Docker image
The following package builds a **script** which, when run, will stream a layered Docker image that runs the `hello` executable from the `hello` package.
The Docker image will have name `hello` and tag `latest`.
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
contents = [ hello ];
config.Cmd = [ "/bin/hello" ];
}
``` ```
Alternatively, the image be piped via `gzip` into `skopeo`, e.g., to copy it into a registry: The result of building this package is a script.
Running this script and piping it into `docker load` gives you the same image that was built in [](#ex-dockerTools-buildLayeredImage-hello).
Note that in this case, the image is never added to the Nix store, but instead streamed directly into Docker.
```ShellSession ```shell
$(nix-build) | gzip --fast | skopeo copy docker-archive:/dev/stdin docker://some_docker_registry/myimage:tag $ nix-build
(output removed for clarity)
/nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello
$ /nix/store/wsz2xl8ckxnlb769irvq6jv1280dfvxd-stream-hello | docker load
No 'fromImage' provided
Creating layer 1 from paths: ['/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1']
Creating layer 2 from paths: ['/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4']
Creating layer 3 from paths: ['/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc']
Creating layer 4 from paths: ['/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27']
Creating layer 5 from paths: ['/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1']
Creating layer 6 with customisation...
Adding manifests...
Done.
(some output removed for clarity)
Loaded image: hello:latest
``` ```
:::
:::{.example #ex-dockerTools-streamLayeredImage-exploringlayers}
# Exploring the layers in an image built with `streamLayeredImage`
Assume the following package, which builds a layered Docker image with the `hello` package.
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
contents = [ hello ];
}
```
The `hello` package depends on 4 other packages:
```shell
$ nix-store --query -R $(nix-build -A hello)
/nix/store/i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
/nix/store/ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
/nix/store/ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
/nix/store/9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
/nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
```
This means that all these packages will be included in the image generated by `streamLayeredImage`.
It will put each package in its own layer, for a total of 5 layers with actual files in them.
A final layer will be created only with symlinks for the `hello` package.
The image generated will have the following directory structure (some directories were collapsed for readability):
```
├── bin
│ └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
├── nix
│ └── store
│ ├─⊕ 9y8pmvk8gdwwznmkzxa6pwyah52xy3nk-glibc-2.38-27
│ ├─⊕ i93s7xxblavsacpy82zdbn4kplsyq48l-libunistring-1.1
│ ├─⊕ ji01n9vinnj22nbrb86nx8a1ssgpilx8-libidn2-2.3.4
│ ├─⊕ ldrslljw4rg026nw06gyrdwl78k77vyq-xgcc-12.3.0-libgcc
│ └─⊕ zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1
└── share
├── info
│ └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
├─⊕ locale
└── man
└── man1
└── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
```
Each of the packages in `/nix/store` comes from a layer in the image.
The final layer adds the `/bin` and `/share` directories, but they only contain links to the actual files in `/nix/store`.
If our package sets `includeStorePaths` to `false`, we'll end up with only the final layer with the links, but the actual files won't exist in the image:
```nix
{ dockerTools, hello }:
dockerTools.streamLayeredImage {
name = "hello";
contents = [ hello ];
}
```
After building this package, the image will have the following directory structure:
```
├── bin
│ └── hello → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/bin/hello
└── share
├── info
│ └── hello.info → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/info/hello.info
├─⊕ locale
└── man
└── man1
└── hello.1.gz → /nix/store/zhl06z4lrfrkw5rp0hnjjfrgsclzvxpm-hello-2.12.1/share/man/man1/hello.1.gz
```
Note how the links point to paths in `/nix/store`, but they're not included in the image itself.
This is why you need extra tooling when using `includeStorePaths`:
a container created from such image won't find any of the files it needs to run otherwise.
:::
::: {.example #ex-dockerTools-streamLayeredImage-configclosure}
# Building a layered Docker image with packages directly in `config`
The closure of `config` is automatically included in the generated image.
The following package shows a more compact way to create the same output generated in [](#ex-dockerTools-streamLayeredImage-hello).
```nix
{ dockerTools, hello, lib }:
dockerTools.streamLayeredImage {
name = "hello";
tag = "latest";
config.Cmd = [ "${lib.getExe hello}" ];
}
```
:::
## pullImage {#ssec-pkgs-dockerTools-fetchFromRegistry} ## pullImage {#ssec-pkgs-dockerTools-fetchFromRegistry}

View file

@ -29,6 +29,10 @@ pkgs.mkShell {
... all the attributes of `stdenv.mkDerivation`. ... all the attributes of `stdenv.mkDerivation`.
## Variants {#sec-pkgs-mkShell-variants}
`pkgs.mkShellNoCC` is a variant that uses `stdenvNoCC` instead of `stdenv` as base environment. This is useful if no C compiler is needed in the shell environment.
## Building the shell {#sec-pkgs-mkShell-building} ## Building the shell {#sec-pkgs-mkShell-building}
This derivation output will contain a text file that contains a reference to This derivation output will contain a text file that contains a reference to

View file

@ -1,6 +1,7 @@
# Trivial build helpers {#chap-trivial-builders} # Trivial build helpers {#chap-trivial-builders}
Nixpkgs provides a couple of functions that help with building derivations. The most important one, `stdenv.mkDerivation`, has already been documented above. The following functions wrap `stdenv.mkDerivation`, making it easier to use in certain cases. Nixpkgs provides a variety of wrapper functions that help build commonly useful derivations.
Like [`stdenv.mkDerivation`](#sec-using-stdenv), each of these build helpers creates a derivation, but the arguments passed are different (usually simpler) from those required by `stdenv.mkDerivation`.
## `runCommand` {#trivial-builder-runCommand} ## `runCommand` {#trivial-builder-runCommand}
@ -58,63 +59,416 @@ Variant of `runCommand` that forces the derivation to be built locally, it is no
This sets [`allowSubstitutes` to `false`](https://nixos.org/nix/manual/#adv-attr-allowSubstitutes), so only use `runCommandLocal` if you are certain the user will always have a builder for the `system` of the derivation. This should be true for most trivial use cases (e.g., just copying some files to a different location or adding symlinks) because there the `system` is usually the same as `builtins.currentSystem`. This sets [`allowSubstitutes` to `false`](https://nixos.org/nix/manual/#adv-attr-allowSubstitutes), so only use `runCommandLocal` if you are certain the user will always have a builder for the `system` of the derivation. This should be true for most trivial use cases (e.g., just copying some files to a different location or adding symlinks) because there the `system` is usually the same as `builtins.currentSystem`.
::: :::
## `writeTextFile`, `writeText`, `writeTextDir`, `writeScript`, `writeScriptBin` {#trivial-builder-writeText} ## Writing text files {#trivial-builder-text-writing}
These functions write `text` to the Nix store. This is useful for creating scripts from Nix expressions. `writeTextFile` takes an attribute set and expects two arguments, `name` and `text`. `name` corresponds to the name used in the Nix store path. `text` will be the contents of the file. You can also set `executable` to true to make this file have the executable bit set. Nixpkgs provides the following functions for producing derivations which write text files or executable scripts into the Nix store.
They are useful for creating files from Nix expression, and are all implemented as convenience wrappers around `writeTextFile`.
Many more commands wrap `writeTextFile` including `writeText`, `writeTextDir`, `writeScript`, and `writeScriptBin`. These are convenience functions over `writeTextFile`. Each of these functions will cause a derivation to be produced.
When you coerce the result of each of these functions to a string with [string interpolation](https://nixos.org/manual/nix/stable/language/string-interpolation) or [`builtins.toString`](https://nixos.org/manual/nix/stable/language/builtins#builtins-toString), it will evaluate to the [store path](https://nixos.org/manual/nix/stable/store/store-path) of this derivation.
:::: {.note}
Some of these functions will put the resulting files within a directory inside the [derivation output](https://nixos.org/manual/nix/stable/language/derivations#attr-outputs).
If you need to refer to the resulting files somewhere else in a Nix expression, append their path to the derivation's store path.
For example, if the file destination is a directory:
```nix
my-file = writeTextFile {
name = "my-file";
text = ''
Contents of File
'';
destination = "/share/my-file";
}
```
Remember to append "/share/my-file" to the resulting store path when using it elsewhere:
```nix
writeShellScript "evaluate-my-file.sh" ''
cat ${my-file}/share/my-file
'';
```
::::
### `writeTextFile` {#trivial-builder-writeTextFile}
Write a text file to the Nix store.
`writeTextFile` takes an attribute set with the following possible attributes:
`name` (String)
: Corresponds to the name used in the Nix store path identifier.
`text` (String)
: The contents of the file.
`executable` (Bool, _optional_)
: Make this file have the executable bit set.
Default: `false`
`destination` (String, _optional_)
: A subpath under the derivation's output path into which to put the file.
Subdirectories are created automatically when the derivation is realised.
By default, the store path itself will be a file containing the text contents.
Default: `""`
`checkPhase` (String, _optional_)
: Commands to run after generating the file.
Default: `""`
`meta` (Attribute set, _optional_)
: Additional metadata for the derivation.
Default: `{}`
`allowSubstitutes` (Bool, _optional_)
: Whether to allow substituting from a binary cache.
Passed through to [`allowSubsitutes`](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-allowSubstitutes) of the underlying call to `builtins.derivation`.
It defaults to `false`, as running the derivation's simple `builder` executable locally is assumed to be faster than network operations.
Set it to true if the `checkPhase` step is expensive.
Default: `false`
`preferLocalBuild` (Bool, _optional_)
: Whether to prefer building locally, even if faster [remote build machines](https://nixos.org/manual/nix/stable/command-ref/conf-file#conf-substituters) are available.
Passed through to [`preferLocalBuild`](https://nixos.org/manual/nix/stable/language/advanced-attributes#adv-attr-preferLocalBuild) of the underlying call to `builtins.derivation`.
It defaults to `true` for the same reason `allowSubstitutes` defaults to `false`.
Default: `true`
The resulting store path will include some variation of the name, and it will be a file unless `destination` is used, in which case it will be a directory.
::: {.example #ex-writeTextFile}
# Usage 1 of `writeTextFile`
Write `my-file` to `/nix/store/<store path>/some/subpath/my-cool-script`, making it executable.
Also run a check on the resulting file in a `checkPhase`, and supply values for the less-used options.
```nix
writeTextFile {
name = "my-cool-script";
text = ''
#!/bin/sh
echo "This is my cool script!"
'';
executable = true;
destination = "/some/subpath/my-cool-script";
checkPhase = ''
${pkgs.shellcheck}/bin/shellcheck $out/some/subpath/my-cool-script
'';
meta = {
license = pkgs.lib.licenses.cc0;
};
allowSubstitutes = true;
preferLocalBuild = false;
};
```
:::
::: {.example #ex2-writeTextFile}
# Usage 2 of `writeTextFile`
Write the string `Contents of File` to `/nix/store/<store path>`.
See also the [](#trivial-builder-writeText) helper function.
Here are a few examples:
```nix ```nix
# Writes my-file to /nix/store/<store path>
writeTextFile { writeTextFile {
name = "my-file"; name = "my-file";
text = '' text = ''
Contents of File Contents of File
''; '';
} }
# See also the `writeText` helper function below. ```
:::
# Writes executable my-file to /nix/store/<store path>/bin/my-file ::: {.example #ex3-writeTextFile}
# Usage 3 of `writeTextFile`
Write an executable script `my-script` to `/nix/store/<store path>/bin/my-script`.
See also the [](#trivial-builder-writeScriptBin) helper function.
```nix
writeTextFile {
name = "my-script";
text = ''
echo "hi"
'';
executable = true;
destination = "/bin/my-script";
}
```
:::
### `writeText` {#trivial-builder-writeText}
Write a text file to the Nix store
`writeText` takes the following arguments:
a string.
`name` (String)
: The name used in the Nix store path.
`text` (String)
: The contents of the file.
The store path will include the name, and it will be a file.
::: {.example #ex-writeText}
# Usage of `writeText`
Write the string `Contents of File` to `/nix/store/<store path>`:
```nix
writeText "my-file"
''
Contents of File
'';
```
:::
This is equivalent to:
```nix
writeTextFile {
name = "my-file";
text = ''
Contents of File
'';
}
```
### `writeTextDir` {#trivial-builder-writeTextDir}
Write a text file within a subdirectory of the Nix store.
`writeTextDir` takes the following arguments:
`path` (String)
: The destination within the Nix store path under which to create the file.
`text` (String)
: The contents of the file.
The store path will be a directory.
::: {.example #ex-writeTextDir}
# Usage of `writeTextDir`
Write the string `Contents of File` to `/nix/store/<store path>/share/my-file`:
```nix
writeTextDir "share/my-file"
''
Contents of File
'';
```
:::
This is equivalent to:
```nix
writeTextFile {
name = "my-file";
text = ''
Contents of File
'';
destination = "share/my-file";
}
```
### `writeScript` {#trivial-builder-writeScript}
Write an executable script file to the Nix store.
`writeScript` takes the following arguments:
`name` (String)
: The name used in the Nix store path.
`text` (String)
: The contents of the file.
The created file is marked as executable.
The store path will include the name, and it will be a file.
::: {.example #ex-writeScript}
# Usage of `writeScript`
Write the string `Contents of File` to `/nix/store/<store path>` and make the file executable.
```nix
writeScript "my-file"
''
Contents of File
'';
```
:::
This is equivalent to:
```nix
writeTextFile { writeTextFile {
name = "my-file"; name = "my-file";
text = '' text = ''
Contents of File Contents of File
''; '';
executable = true; executable = true;
destination = "/bin/my-file";
} }
# Writes contents of file to /nix/store/<store path> ```
writeText "my-file"
''
Contents of File
'';
# Writes contents of file to /nix/store/<store path>/share/my-file
writeTextDir "share/my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path> and makes executable
writeScript "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path>/bin/my-file and makes executable.
writeScriptBin "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path> and makes executable.
writeShellScript "my-file"
''
Contents of File
'';
# Writes my-file to /nix/store/<store path>/bin/my-file and makes executable.
writeShellScriptBin "my-file"
''
Contents of File
'';
### `writeScriptBin` {#trivial-builder-writeScriptBin}
Write a script within a `bin` subirectory of a directory in the Nix store.
This is for consistency with the convention of software packages placing executables under `bin`.
`writeScriptBin` takes the following arguments:
`name` (String)
: The name used in the Nix store path and within the file created under the store path.
`text` (String)
: The contents of the file.
The created file is marked as executable.
The file's contents will be put into `/nix/store/<store path>/bin/<name>`.
The store path will include the the name, and it will be a directory.
::: {.example #ex-writeScriptBin}
# Usage of `writeScriptBin`
```nix
writeScriptBin "my-script"
''
echo "hi"
'';
```
:::
This is equivalent to:
```nix
writeTextFile {
name = "my-script";
text = ''
echo "hi"
'';
executable = true;
destination = "bin/my-script"
}
```
### `writeShellScript` {#trivial-builder-writeShellScript}
Write a Bash script to the store.
`writeShellScript` takes the following arguments:
`name` (String)
: The name used in the Nix store path.
`text` (String)
: The contents of the file.
The created file is marked as executable.
The store path will include the name, and it will be a file.
This function is almost exactly like [](#trivial-builder-writeScript), except that it prepends to the file a [shebang](https://en.wikipedia.org/wiki/Shebang_%28Unix%29) line that points to the version of Bash used in Nixpkgs.
<!-- this cannot be changed in practice, so there is no point pretending it's somehow generic -->
::: {.example #ex-writeShellScript}
# Usage of `writeShellScript`
```nix
writeShellScript "my-script"
''
echo "hi"
'';
```
:::
This is equivalent to:
```nix
writeTextFile {
name = "my-script";
text = ''
#! ${pkgs.runtimeShell}
echo "hi"
'';
executable = true;
}
```
### `writeShellScriptBin` {#trivial-builder-writeShellScriptBin}
Write a Bash script to a "bin" subdirectory of a directory in the Nix store.
`writeShellScriptBin` takes the following arguments:
`name` (String)
: The name used in the Nix store path and within the file generated under the store path.
`text` (String)
: The contents of the file.
The file's contents will be put into `/nix/store/<store path>/bin/<name>`.
The store path will include the the name, and it will be a directory.
This function is a combination of [](#trivial-builder-writeShellScript) and [](#trivial-builder-writeScriptBin).
::: {.example #ex-writeShellScriptBin}
# Usage of `writeShellScriptBin`
```nix
writeShellScriptBin "my-script"
''
echo "hi"
'';
```
:::
This is equivalent to:
```nix
writeTextFile {
name = "my-script";
text = ''
#! ${pkgs.runtimeShell}
echo "hi"
'';
executable = true;
destination = "bin/my-script"
}
``` ```
## `concatTextFile`, `concatText`, `concatScript` {#trivial-builder-concatText} ## `concatTextFile`, `concatText`, `concatScript` {#trivial-builder-concatText}

View file

@ -80,6 +80,8 @@ Do _not_ use `dart run <package_name>`, as this will attempt to download depende
### Usage with nix-shell {#ssec-dart-applications-nix-shell} ### Usage with nix-shell {#ssec-dart-applications-nix-shell}
#### Using dependencies from the Nix store {#ssec-dart-applications-nix-shell-deps}
As `buildDartApplication` provides dependencies instead of `pub get`, Dart needs to be explicitly told where to find them. As `buildDartApplication` provides dependencies instead of `pub get`, Dart needs to be explicitly told where to find them.
Run the following commands in the source directory to configure Dart appropriately. Run the following commands in the source directory to configure Dart appropriately.
@ -103,6 +105,9 @@ flutter.buildFlutterApplication {
pname = "firmware-updater"; pname = "firmware-updater";
version = "unstable-2023-04-30"; version = "unstable-2023-04-30";
# To build for the Web, use the targetFlutterPlatform argument.
# targetFlutterPlatform = "web";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "canonical"; owner = "canonical";
repo = "firmware-updater"; repo = "firmware-updater";
@ -117,4 +122,15 @@ flutter.buildFlutterApplication {
### Usage with nix-shell {#ssec-dart-flutter-nix-shell} ### Usage with nix-shell {#ssec-dart-flutter-nix-shell}
See the [Dart documentation](#ssec-dart-applications-nix-shell) for nix-shell instructions. Flutter-specific `nix-shell` usage notes are included here. See the [Dart documentation](#ssec-dart-applications-nix-shell) for general `nix-shell` instructions.
#### Entering the shell {#ssec-dart-flutter-nix-shell-enter}
By default, dependencies for only the `targetFlutterPlatform` are available in the
build environment. This is useful for keeping closures small, but be problematic
during development. It's common, for example, to build Web apps for Linux during
development to take advantage of native features such as stateful hot reload.
To enter a shell with all the usual target platforms available, use the `multiShell` attribute.
e.g. `nix-shell '<nixpkgs>' -A fluffychat-web.multiShell`.

View file

@ -86,9 +86,9 @@ One advantage is that when `pkgs.zlib` is updated, it will automatically update
postPatch = pkgs.lib.optionalString pkgs.stdenv.isDarwin '' postPatch = pkgs.lib.optionalString pkgs.stdenv.isDarwin ''
substituteInPlace configure \ substituteInPlace configure \
--replace '/usr/bin/libtool' 'ar' \ --replace-fail '/usr/bin/libtool' 'ar' \
--replace 'AR="libtool"' 'AR="ar"' \ --replace-fail 'AR="libtool"' 'AR="ar"' \
--replace 'ARFLAGS="-o"' 'ARFLAGS="-r"' --replace-fail 'ARFLAGS="-o"' 'ARFLAGS="-r"'
''; '';
}) })
``` ```

View file

@ -1,26 +1,41 @@
# Go {#sec-language-go} # Go {#sec-language-go}
## Go modules {#ssec-language-go} ## Building Go modules with `buildGoModule` {#ssec-language-go}
The function `buildGoModule` builds Go programs managed with Go modules. It builds a [Go Modules](https://github.com/golang/go/wiki/Modules) through a two phase build: The function `buildGoModule` builds Go programs managed with Go modules. It builds [Go Modules](https://github.com/golang/go/wiki/Modules) through a two phase build:
- An intermediate fetcher derivation. This derivation will be used to fetch all of the dependencies of the Go module. - An intermediate fetcher derivation called `goModules`. This derivation will be used to fetch all the dependencies of the Go module.
- A final derivation will use the output of the intermediate derivation to build the binaries and produce the final output. - A final derivation will use the output of the intermediate derivation to build the binaries and produce the final output.
### Attributes of `buildGoModule` {#buildgomodule-parameters}
The `buildGoModule` function accepts the following parameters in addition to the [attributes accepted by both Go builders](#ssec-go-common-attributes):
- `vendorHash`: is the hash of the output of the intermediate fetcher derivation (the dependencies of the Go modules).
`vendorHash` can be set to `null`.
In that case, rather than fetching the dependencies, the dependencies already vendored in the `vendor` directory of the source repo will be used.
To avoid updating this field when dependencies change, run `go mod vendor` in your source repo and set `vendorHash = null;`.
You can read more about [vendoring in the Go documentation](https://go.dev/ref/mod#vendoring).
To obtain the actual hash, set `vendorHash = lib.fakeHash;` and run the build ([more details here](#sec-source-hashes)).
- `proxyVendor`: If `true`, the intermediate fetcher downloads dependencies from the
[Go module proxy](https://go.dev/ref/mod#module-proxy) (using `go mod download`) instead of vendoring them. The resulting
[module cache](https://go.dev/ref/mod#module-cache) is then passed to the final derivation.
This is useful if your code depends on C code and `go mod tidy` does not include the needed sources to build or
if any dependency has case-insensitive conflicts which will produce platform-dependent `vendorHash` checksums.
Defaults to `false`.
- `modPostBuild`: Shell commands to run after the build of the goModules executes `go mod vendor`, and before calculating fixed output derivation's `vendorHash`.
Note that if you change this attribute, you need to update `vendorHash` attribute.
- `modRoot`: The root directory of the Go module that contains the `go.mod` file.
Defaults to `./`, which is the root of `src`.
### Example for `buildGoModule` {#ex-buildGoModule} ### Example for `buildGoModule` {#ex-buildGoModule}
In the following is an example expression using `buildGoModule`, the following arguments are of special significance to the function: The following is an example expression using `buildGoModule`:
- `vendorHash`: is the hash of the output of the intermediate fetcher derivation.
`vendorHash` can also be set to `null`.
In that case, rather than fetching the dependencies and vendoring them, the dependencies vendored in the source repo will be used.
To avoid updating this field when dependencies change, run `go mod vendor` in your source repo and set `vendorHash = null;`
To obtain the actual hash, set `vendorHash = lib.fakeHash;` and run the build ([more details here](#sec-source-hashes)).
- `proxyVendor`: Fetches (go mod download) and proxies the vendor directory. This is useful if your code depends on c code and go mod tidy does not include the needed sources to build or if any dependency has case-insensitive conflicts which will produce platform-dependent `vendorHash` checksums.
- `modPostBuild`: Shell commands to run after the build of the goModules executes `go mod vendor`, and before calculating fixed output derivation's `vendorHash`. Note that if you change this attribute, you need to update `vendorHash` attribute.
```nix ```nix
pet = buildGoModule rec { pet = buildGoModule rec {
@ -51,7 +66,7 @@ The function `buildGoPackage` builds legacy Go programs, not supporting Go modul
### Example for `buildGoPackage` {#example-for-buildgopackage} ### Example for `buildGoPackage` {#example-for-buildgopackage}
In the following is an example expression using buildGoPackage, the following arguments are of special significance to the function: In the following is an example expression using `buildGoPackage`, the following arguments are of special significance to the function:
- `goPackagePath` specifies the package's canonical Go import path. - `goPackagePath` specifies the package's canonical Go import path.
- `goDeps` is where the Go dependencies of a Go program are listed as a list of package source identified by Go import path. It could be imported as a separate `deps.nix` file for readability. The dependency data structure is described below. - `goDeps` is where the Go dependencies of a Go program are listed as a list of package source identified by Go import path. It could be imported as a separate `deps.nix` file for readability. The dependency data structure is described below.
@ -103,7 +118,7 @@ The `goDeps` attribute can be imported from a separate `nix` file that defines w
] ]
``` ```
To extract dependency information from a Go package in automated way use [go2nix](https://github.com/kamilchm/go2nix). It can produce complete derivation and `goDeps` file for Go programs. To extract dependency information from a Go package in automated way use [go2nix (deprecated)](https://github.com/kamilchm/go2nix). It can produce complete derivation and `goDeps` file for Go programs.
You may use Go packages installed into the active Nix profiles by adding the following to your ~/.bashrc: You may use Go packages installed into the active Nix profiles by adding the following to your ~/.bashrc:
@ -113,7 +128,7 @@ for p in $NIX_PROFILES; do
done done
``` ```
## Attributes used by the builders {#ssec-go-common-attributes} ## Attributes used by both builders {#ssec-go-common-attributes}
Many attributes [controlling the build phase](#variables-controlling-the-build-phase) are respected by both `buildGoModule` and `buildGoPackage`. Note that `buildGoModule` reads the following attributes also when building the `vendor/` goModules fixed output derivation as well: Many attributes [controlling the build phase](#variables-controlling-the-build-phase) are respected by both `buildGoModule` and `buildGoPackage`. Note that `buildGoModule` reads the following attributes also when building the `vendor/` goModules fixed output derivation as well:
@ -124,11 +139,18 @@ Many attributes [controlling the build phase](#variables-controlling-the-build-p
- [`postPatch`](#var-stdenv-postPatch) - [`postPatch`](#var-stdenv-postPatch)
- [`preBuild`](#var-stdenv-preBuild) - [`preBuild`](#var-stdenv-preBuild)
To control test execution of the build derivation, the following attributes are of interest:
- [`checkInputs`](#var-stdenv-checkInputs)
- [`preCheck`](#var-stdenv-preCheck)
- [`checkFlags`](#var-stdenv-checkFlags)
In addition to the above attributes, and the many more variables respected also by `stdenv.mkDerivation`, both `buildGoModule` and `buildGoPackage` respect Go-specific attributes that tweak them to behave slightly differently: In addition to the above attributes, and the many more variables respected also by `stdenv.mkDerivation`, both `buildGoModule` and `buildGoPackage` respect Go-specific attributes that tweak them to behave slightly differently:
### `ldflags` {#var-go-ldflags} ### `ldflags` {#var-go-ldflags}
Arguments to pass to the Go linker tool via the `-ldflags` argument of `go build`. The most common use case for this argument is to make the resulting executable aware of its own version. For example: A string list of flags to pass to the Go linker tool via the `-ldflags` argument of `go build`. Possible values can be retrieved by running `go tool link --help`.
The most common use case for this argument is to make the resulting executable aware of its own version by injecting the value of string variable using the `-X` flag. For example:
```nix ```nix
ldflags = [ ldflags = [
@ -139,7 +161,7 @@ Arguments to pass to the Go linker tool via the `-ldflags` argument of `go build
### `tags` {#var-go-tags} ### `tags` {#var-go-tags}
Arguments to pass to the Go via the `-tags` argument of `go build`. For example: A string list of [Go build tags (also called build constraints)](https://pkg.go.dev/cmd/go#hdr-Build_constraints) that are passed via the `-tags` argument of `go build`. These constraints control whether Go files from the source should be included in the build. For example:
```nix ```nix
tags = [ tags = [
@ -148,18 +170,101 @@ Arguments to pass to the Go via the `-tags` argument of `go build`. For example:
]; ];
``` ```
Tags can also be set conditionally:
```nix ```nix
tags = [ "production" ] ++ lib.optionals withSqlite [ "sqlite" ]; tags = [ "production" ] ++ lib.optionals withSqlite [ "sqlite" ];
``` ```
### `deleteVendor` {#var-go-deleteVendor} ### `deleteVendor` {#var-go-deleteVendor}
Removes the pre-existing vendor directory. This should only be used if the dependencies included in the vendor folder are broken or incomplete. If set to `true`, removes the pre-existing vendor directory. This should only be used if the dependencies included in the vendor folder are broken or incomplete.
### `subPackages` {#var-go-subPackages} ### `subPackages` {#var-go-subPackages}
Specified as a string or list of strings. Limits the builder from building child packages that have not been listed. If `subPackages` is not specified, all child packages will be built. Specified as a string or list of strings. Limits the builder from building child packages that have not been listed. If `subPackages` is not specified, all child packages will be built.
Many Go projects keep the main package in a `cmd` directory.
Following example could be used to only build the example-cli and example-server binaries:
```nix
subPackages = [
"cmd/example-cli"
"cmd/example-server"
];
```
### `excludedPackages` {#var-go-excludedPackages} ### `excludedPackages` {#var-go-excludedPackages}
Specified as a string or list of strings. Causes the builder to skip building child packages that match any of the provided values. If `excludedPackages` is not specified, all child packages will be built. Specified as a string or list of strings. Causes the builder to skip building child packages that match any of the provided values.
### `CGO_ENABLED` {#var-go-CGO_ENABLED}
When set to `0`, the [cgo](https://pkg.go.dev/cmd/cgo) command is disabled. As consequence, the build
program can't link against C libraries anymore, and the resulting binary is statically linked.
When building with CGO enabled, Go will likely link some packages from the Go standard library against C libraries,
even when the target code does not explicitly call into C dependencies. With `CGO_ENABLED = 0;`, Go
will always use the Go native implementation of these internal packages. For reference see
[net](https://pkg.go.dev/net#hdr-Name_Resolution) and [os/user](https://pkg.go.dev/os/user#pkg-overview) packages.
Notice that the decision whether these packages should use native Go implementation or not can also be controlled
on a per package level using build tags (`tags`). In case CGO is disabled, these tags have no additional effect.
When a Go program depends on C libraries, place those dependencies in `buildInputs`:
```nix
buildInputs = [
libvirt
libxml2
];
```
`CGO_ENABLED` defaults to `1`.
### `enableParallelBuilding` {#var-go-enableParallelBuilding}
Whether builds and tests should run in parallel.
Defaults to `true`.
### `allowGoReference` {#var-go-allowGoReference}
Whether the build result should be allowed to contain references to the Go tool chain. This might be needed for programs that are coupled with the compiler, but shouldn't be set without a good reason.
Defaults to `false`
## Controlling the Go environment {#ssec-go-environment}
The Go build can be further tweaked by setting environment variables. In most cases, this isn't needed. Possible values can be found in the [Go documentation of accepted environment variables](https://pkg.go.dev/cmd/go#hdr-Environment_variables). Notice that some of these flags are set by the builder itself and should not be set explicitly. If in doubt, grep the implementation of the builder.
## Skipping tests {#ssec-skip-go-tests}
`buildGoModule` runs tests by default. Failing tests can be disabled using the `checkFlags` parameter.
This is done with the [`-skip` or `-run`](https://pkg.go.dev/cmd/go#hdr-Testing_flags) flags of the `go test` command.
For example, only a selection of tests could be run with:
```nix
# -run and -skip accept regular expressions
checkFlags = [
"-run=^Test(Simple|Fast)$"
];
```
If a larger amount of tests should be skipped, the following pattern can be used:
```nix
checkFlags =
let
# Skip tests that require network access
skippedTests = [
"TestNetwork"
"TestDatabase/with_mysql" # exclude only the subtest
"TestIntegration"
];
in
[ "-skip=^${builtins.concatStringsSep "$|^" skippedTests}$" ];
```
To disable tests altogether, set `doCheck = false;`.
`buildGoPackage` does not execute tests by default.

View file

@ -0,0 +1,47 @@
# Idris2 {#sec-idris2}
In addition to exposing the Idris2 compiler itself, Nixpkgs exposes an `idris2Packages.buildIdris` helper to make it a bit more ergonomic to build Idris2 executables or libraries.
The `buildIdris` function takes a package set that defines at a minimum the `src` and `projectName` of the package to be built and any `idrisLibraries` required to build it. The `src` is the same source you're familiar with but the `projectName` must be the name of the `ipkg` file for the project (omitting the `.ipkg` extension). The `idrisLibraries` is a list of other library derivations created with `buildIdris`. You can optionally specify other derivation properties as needed but sensible defaults for `configurePhase`, `buildPhase`, and `installPhase` are provided.
Importantly, `buildIdris` does not create a single derivation but rather an attribute set with two properties: `executable` and `library`. The `executable` property is a derivation and the `library` property is a function that will return a derivation for the library with or without source code included. Source code need not be included unless you are aiming to use IDE or LSP features that are able to jump to definitions within an editor.
A simple example of a fully packaged library would be the [`LSP-lib`](https://github.com/idris-community/LSP-lib) found in the `idris-community` GitHub organization.
```nix
{ fetchFromGitHub, idris2Packages }:
let lspLibPkg = idris2Packages.buildIdris {
projectName = "lsp-lib";
src = fetchFromGitHub {
owner = "idris-community";
repo = "LSP-lib";
rev = "main";
hash = "sha256-EvSyMCVyiy9jDZMkXQmtwwMoLaem1GsKVFqSGNNHHmY=";
};
idrisLibraries = [ ];
};
in lspLibPkg.library
```
The above results in a derivation with the installed library results (with sourcecode).
A slightly more involved example of a fully packaged executable would be the [`idris2-lsp`](https://github.com/idris-community/idris2-lsp) which is an Idris2 language server that uses the `LSP-lib` found above.
```nix
{ callPackage, fetchFromGitHub, idris2Packages }:
# Assuming the previous example lives in `lsp-lib.nix`:
let lspLib = callPackage ./lsp-lib.nix { };
lspPkg = idris2Packages.buildIdris {
projectName = "idris2-lsp";
src = fetchFromGitHub {
owner = "idris-community";
repo = "idris2-lsp";
rev = "main";
hash = "sha256-vQTzEltkx7uelDtXOHc6QRWZ4cSlhhm5ziOqWA+aujk=";
};
idrisLibraries = [(idris2Packages.idris2Api { }) (lspLib { })];
};
in lspPkg.executable
```
The above uses the default value of `withSource = false` for both of the two required Idris libraries that the `idris2-lsp` executable depends on. `idris2Api` in the above derivation comes built in with `idris2Packages`. This library exposes many of the otherwise internal APIs of the Idris2 compiler.

View file

@ -21,6 +21,7 @@ go.section.md
haskell.section.md haskell.section.md
hy.section.md hy.section.md
idris.section.md idris.section.md
idris2.section.md
ios.section.md ios.section.md
java.section.md java.section.md
javascript.section.md javascript.section.md

View file

@ -354,6 +354,7 @@ mkYarnPackage rec {
- The `echo 9` steps comes from this answer: <https://stackoverflow.com/a/49139496> - The `echo 9` steps comes from this answer: <https://stackoverflow.com/a/49139496>
- Exporting the headers in `npm_config_nodedir` comes from this issue: <https://github.com/nodejs/node-gyp/issues/1191#issuecomment-301243919> - Exporting the headers in `npm_config_nodedir` comes from this issue: <https://github.com/nodejs/node-gyp/issues/1191#issuecomment-301243919>
- `offlineCache` (described [above](#javascript-yarn2nix-preparation)) must be specified to avoid [Import From Derivation](#ssec-import-from-derivation) (IFD) when used inside Nixpkgs.
## Outside Nixpkgs {#javascript-outside-nixpkgs} ## Outside Nixpkgs {#javascript-outside-nixpkgs}

View file

@ -26,6 +26,17 @@ It is important to import Qt modules directly, that is: `qtbase`, `qtdeclarative
Additionally all Qt packages must include `wrapQtAppsHook` in `nativeBuildInputs`, or you must explicitly set `dontWrapQtApps`. Additionally all Qt packages must include `wrapQtAppsHook` in `nativeBuildInputs`, or you must explicitly set `dontWrapQtApps`.
`pkgs.callPackage` does not provide injections for `qtbase` or the like.
Instead you want to either use `pkgs.libsForQt5.callPackage`, or `pkgs.qt6Packages.callPackage`, depending on the Qt version you want to use.
For example (from [here](https://github.com/NixOS/nixpkgs/blob/2f9286912cb215969ece465147badf6d07aa43fe/pkgs/top-level/all-packages.nix#L30106))
```nix
zeal-qt5 = libsForQt5.callPackage ../data/documentation/zeal { };
zeal-qt6 = qt6Packages.callPackage ../data/documentation/zeal { };
zeal = zeal-qt5;
```
## Locating runtime dependencies {#qt-runtime-dependencies} ## Locating runtime dependencies {#qt-runtime-dependencies}
Qt applications must be wrapped to find runtime dependencies. Qt applications must be wrapped to find runtime dependencies.

View file

@ -44,21 +44,22 @@ rustPlatform.buildRustPackage rec {
} }
``` ```
`buildRustPackage` requires either the `cargoSha256` or the `buildRustPackage` requires either the `cargoHash` or the `cargoSha256`
`cargoHash` attribute which is computed over all crate sources of this attribute which is computed over all crate sources of this package.
package. `cargoHash256` is used for traditional Nix SHA-256 hashes, `cargoSha256` is used for traditional Nix SHA-256 hashes. `cargoHash` should
such as the one in the example above. `cargoHash` should instead be instead be used for [SRI](https://www.w3.org/TR/SRI/) hashes and should be
used for [SRI](https://www.w3.org/TR/SRI/) hashes. For example: preferred. For example:
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the`cargoLock`
section.
```nix ```nix
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8="; cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
``` ```
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the `cargoLock`
section.
Both types of hashes are permitted when contributing to nixpkgs. The Both types of hashes are permitted when contributing to nixpkgs. The
Cargo hash is obtained by inserting a fake checksum into the Cargo hash is obtained by inserting a fake checksum into the
expression and building the package once. The correct checksum can expression and building the package once. The correct checksum can
@ -700,7 +701,7 @@ with import <nixpkgs> {};
hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") { hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") {
postPatch = '' postPatch = ''
substituteInPlace lib/zoneinfo.rs \ substituteInPlace lib/zoneinfo.rs \
--replace "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo" --replace-fail "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
''; '';
}; };
}; };

View file

@ -12,20 +12,6 @@ Both functions have an argument `kernelPatches` which should be a list of `{name
The kernel derivation created with `pkgs.buildLinux` exports an attribute `features` specifying whether optional functionality is or isnt enabled. This is used in NixOS to implement kernel-specific behaviour. The kernel derivation created with `pkgs.buildLinux` exports an attribute `features` specifying whether optional functionality is or isnt enabled. This is used in NixOS to implement kernel-specific behaviour.
:::{.example #ex-skip-package-from-kernel-feature}
# Skipping an external package because of a kernel feature
For instance, if the kernel has the `iwlwifi` feature (i.e., has built-in support for Intel wireless chipsets), then NixOS doesnt have to build the external `iwlwifi` package:
```nix
modulesTree = [kernel]
++ pkgs.lib.optional (!kernel.features ? iwlwifi) kernelPackages.iwlwifi
++ ...;
```
:::
If you are using a kernel packaged in Nixpkgs, you can customize it by overriding its arguments. For details on how each argument affects the generated kernel, refer to [the `pkgs.buildLinux` source code](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/generic.nix). If you are using a kernel packaged in Nixpkgs, you can customize it by overriding its arguments. For details on how each argument affects the generated kernel, refer to [the `pkgs.buildLinux` source code](https://github.com/NixOS/nixpkgs/blob/d77bda728d5041c1294a68fb25c79e2d161f62b9/pkgs/os-specific/linux/kernel/generic.nix).
:::{.example #ex-overriding-kernel-derivation} :::{.example #ex-overriding-kernel-derivation}

View file

@ -8,4 +8,4 @@ HTTP has a couple of different mechanisms for caching to prevent clients from ha
Fortunately, HTTP supports an alternative (and more effective) caching mechanism: the [`ETag`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag) response header. The value of the `ETag` header specifies some identifier for the particular content that the server is sending (e.g., a hash). When a client makes a second request for the same resource, it sends that value back in an `If-None-Match` header. If the ETag value is unchanged, then the server does not need to resend the content. Fortunately, HTTP supports an alternative (and more effective) caching mechanism: the [`ETag`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag) response header. The value of the `ETag` header specifies some identifier for the particular content that the server is sending (e.g., a hash). When a client makes a second request for the same resource, it sends that value back in an `If-None-Match` header. If the ETag value is unchanged, then the server does not need to resend the content.
As of NixOS 19.09, the nginx package in Nixpkgs is patched such that when nginx serves a file out of `/nix/store`, the hash in the store path is used as the `ETag` header in the HTTP response, thus providing proper caching functionality. This happens automatically; you do not need to do modify any configuration to get this behavior. As of NixOS 19.09, the nginx package in Nixpkgs is patched such that when nginx serves a file out of `/nix/store`, the hash in the store path is used as the `ETag` header in the HTTP response, thus providing proper caching functionality. With NixOS 24.05 and later, the `ETag` additionally includes the response content length, to ensure files served with static compression do not share `ETag`s with their uncompressed version. This `ETag` functionality is enabled automatically; you do not need to do modify any configuration to get this behavior.

View file

@ -54,7 +54,7 @@ Some common issues when packaging software for Darwin:
# ... # ...
prePatch = '' prePatch = ''
substituteInPlace Makefile \ substituteInPlace Makefile \
--replace '/usr/bin/xcrun clang' clang --replace-fail '/usr/bin/xcrun clang' clang
''; '';
} }
``` ```

View file

@ -230,9 +230,9 @@ stdenv.mkDerivation rec {
postInstall = '' postInstall = ''
substituteInPlace $out/bin/solo5-virtio-mkimage \ substituteInPlace $out/bin/solo5-virtio-mkimage \
--replace "/usr/lib/syslinux" "${syslinux}/share/syslinux" \ --replace-fail "/usr/lib/syslinux" "${syslinux}/share/syslinux" \
--replace "/usr/share/syslinux" "${syslinux}/share/syslinux" \ --replace-fail "/usr/share/syslinux" "${syslinux}/share/syslinux" \
--replace "cp " "cp --no-preserve=mode " --replace-fail "cp " "cp --no-preserve=mode "
wrapProgram $out/bin/solo5-virtio-mkimage \ wrapProgram $out/bin/solo5-virtio-mkimage \
--prefix PATH : ${lib.makeBinPath [ dosfstools mtools parted syslinux ]} --prefix PATH : ${lib.makeBinPath [ dosfstools mtools parted syslinux ]}
@ -475,11 +475,11 @@ A script to be run by `maintainers/scripts/update.nix` when the package is match
```nix ```nix
passthru.updateScript = writeScript "update-zoom-us" '' passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell #!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts #!nix-shell -i bash -p curl pcre2 common-updater-scripts
set -eu -o pipefail set -eu -o pipefail
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')" version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcre2grep -o1 '/(([0-9]\.?)+)/')"
update-source-version zoom-us "$version" update-source-version zoom-us "$version"
''; '';
``` ```
@ -1253,9 +1253,20 @@ postInstall = ''
Performs string substitution on the contents of \<infile\>, writing the result to \<outfile\>. The substitutions in \<subs\> are of the following form: Performs string substitution on the contents of \<infile\>, writing the result to \<outfile\>. The substitutions in \<subs\> are of the following form:
#### `--replace` \<s1\> \<s2\> {#fun-substitute-replace} #### `--replace-fail` \<s1\> \<s2\> {#fun-substitute-replace-fail}
Replace every occurrence of the string \<s1\> by \<s2\>. Replace every occurrence of the string \<s1\> by \<s2\>.
Will error if no change is made.
#### `--replace-warn` \<s1\> \<s2\> {#fun-substitute-replace-warn}
Replace every occurrence of the string \<s1\> by \<s2\>.
Will print a warning if no change is made.
#### `--replace-quiet` \<s1\> \<s2\> {#fun-substitute-replace-quiet}
Replace every occurrence of the string \<s1\> by \<s2\>.
Will do nothing if no change can be made.
#### `--subst-var` \<varName\> {#fun-substitute-subst-var} #### `--subst-var` \<varName\> {#fun-substitute-subst-var}
@ -1269,8 +1280,8 @@ Example:
```shell ```shell
substitute ./foo.in ./foo.out \ substitute ./foo.in ./foo.out \
--replace /usr/bin/bar $bar/bin/bar \ --replace-fail /usr/bin/bar $bar/bin/bar \
--replace "a string containing spaces" "some other text" \ --replace-fail "a string containing spaces" "some other text" \
--subst-var someVar --subst-var someVar
``` ```

View file

@ -62,7 +62,8 @@ rec {
is32bit = { cpu = { bits = 32; }; }; is32bit = { cpu = { bits = 32; }; };
is64bit = { cpu = { bits = 64; }; }; is64bit = { cpu = { bits = 64; }; };
isILP32 = map (a: { abi = { abi = a; }; }) [ "n32" "ilp32" "x32" ]; isILP32 = [ { cpu = { family = "wasm"; bits = 32; }; } ] ++
map (a: { abi = { abi = a; }; }) [ "n32" "ilp32" "x32" ];
isBigEndian = { cpu = { significantByte = significantBytes.bigEndian; }; }; isBigEndian = { cpu = { significantByte = significantBytes.bigEndian; }; };
isLittleEndian = { cpu = { significantByte = significantBytes.littleEndian; }; }; isLittleEndian = { cpu = { significantByte = significantBytes.littleEndian; }; };

View file

@ -599,6 +599,16 @@
githubId = 4732885; githubId = 4732885;
name = "Ivan Jager"; name = "Ivan Jager";
}; };
aikooo7 = {
name = "Diogo Fernandes";
email = "prozinhopro1973@gmail.com";
matrix = "@aikoo7:matrix.org";
github = "aikooo7";
githubId = 79667753;
keys = [{
fingerprint = "B0D7 2955 235F 6AB5 ACFA 1619 8C7F F5BB 1ADE F191";
}];
};
aiotter = { aiotter = {
email = "git@aiotter.com"; email = "git@aiotter.com";
github = "aiotter"; github = "aiotter";
@ -1278,6 +1288,7 @@
a-n-n-a-l-e-e = { a-n-n-a-l-e-e = {
github = "a-n-n-a-l-e-e"; github = "a-n-n-a-l-e-e";
githubId = 150648636; githubId = 150648636;
matrix = "@a-n-n-a-l-e-e:matrix.org";
name = "annalee"; name = "annalee";
}; };
anoa = { anoa = {
@ -1406,6 +1417,11 @@
fingerprint = "BF8B F725 DA30 E53E 7F11 4ED8 AAA5 0652 F047 9205"; fingerprint = "BF8B F725 DA30 E53E 7F11 4ED8 AAA5 0652 F047 9205";
}]; }];
}; };
appsforartists = {
github = "appsforartists";
githubId = 926648;
name = "Brenton Simpson";
};
apraga = { apraga = {
email = "alexis.praga@proton.me"; email = "alexis.praga@proton.me";
github = "apraga"; github = "apraga";
@ -2542,6 +2558,12 @@
githubId = 52386117; githubId = 52386117;
name = "Blusk"; name = "Blusk";
}; };
bmanuel = {
name = "Benjamin Manuel";
email = "ben@benmanuel.com";
github = "bmanuel";
githubId = 3662307;
};
bmilanov = { bmilanov = {
name = "Biser Milanov"; name = "Biser Milanov";
email = "bmilanov11+nixpkgs@gmail.com"; email = "bmilanov11+nixpkgs@gmail.com";
@ -2857,12 +2879,6 @@
githubId = 382011; githubId = 382011;
name = "c4605"; name = "c4605";
}; };
caadar = {
email = "v88m@posteo.net";
github = "caadar";
githubId = 15320726;
name = "Car Cdr";
};
caarlos0 = { caarlos0 = {
name = "Carlos A Becker"; name = "Carlos A Becker";
email = "carlos@becker.software"; email = "carlos@becker.software";
@ -3005,6 +3021,12 @@
fingerprint = "8BC7 74E4 A2EC 7507 3B61 A647 0BBB 1C8B 1C36 39EE"; fingerprint = "8BC7 74E4 A2EC 7507 3B61 A647 0BBB 1C8B 1C36 39EE";
}]; }];
}; };
carloscraveiro = {
email = "carlos.craveiro@usp.br";
github = "CarlosCraveiro";
githubId = 85318248;
name = "Carlos Henrique Craveiro Aquino Veras";
};
carlosdagos = { carlosdagos = {
email = "m@cdagostino.io"; email = "m@cdagostino.io";
github = "carlosdagos"; github = "carlosdagos";
@ -3395,8 +3417,7 @@
}; };
chuangzhu = { chuangzhu = {
name = "Chuang Zhu"; name = "Chuang Zhu";
email = "chuang@melty.land"; email = "nixos@chuang.cz";
matrix = "@chuangzhu:matrix.org";
github = "chuangzhu"; github = "chuangzhu";
githubId = 31200881; githubId = 31200881;
keys = [{ keys = [{
@ -3520,6 +3541,12 @@
githubId = 46303707; githubId = 46303707;
name = "Christian Lütke-Stetzkamp"; name = "Christian Lütke-Stetzkamp";
}; };
clr-cera = {
email = "clrcera05@gmail.com";
github = "clr-cera";
githubId = 93736542;
name = "Clr";
};
cmacrae = { cmacrae = {
email = "hi@cmacr.ae"; email = "hi@cmacr.ae";
github = "cmacrae"; github = "cmacrae";
@ -4659,12 +4686,28 @@
githubId = 13730968; githubId = 13730968;
name = "Justin Restivo"; name = "Justin Restivo";
}; };
dietmarw = {
name = "Dietmar Winkler";
email = "dietmar.winkler@dwe.no";
github = "dietmarw";
githubId = 9332;
};
diffumist = { diffumist = {
email = "git@diffumist.me"; email = "git@diffumist.me";
github = "Diffumist"; github = "Diffumist";
githubId = 32810399; githubId = 32810399;
name = "Diffumist"; name = "Diffumist";
}; };
diogotcorreia = {
name = "Diogo Correia";
email = "me@diogotc.com";
matrix = "@dtc:diogotc.com";
github = "diogotcorreia";
githubId = 7467891;
keys = [{
fingerprint = "111F 91B7 5F61 99D8 985B 4C70 12CF 31FD FF17 2B77";
}];
};
diogox = { diogox = {
name = "Diogo Xavier"; name = "Diogo Xavier";
github = "diogox"; github = "diogox";
@ -5212,6 +5255,12 @@
matrix = "@edrex:matrix.org"; matrix = "@edrex:matrix.org";
name = "Eric Drechsel"; name = "Eric Drechsel";
}; };
edswordsmith = {
email = "eduardo.espadeiro@tecnico.ulisboa.pt";
github = "EdSwordsmith";
githubId = 22300113;
name = "Eduardo Espadeiro";
};
eduarrrd = { eduarrrd = {
email = "e.bachmakov@gmail.com"; email = "e.bachmakov@gmail.com";
github = "eduarrrd"; github = "eduarrrd";
@ -6757,6 +6806,12 @@
githubId = 127353; githubId = 127353;
name = "Geoffrey Huntley"; name = "Geoffrey Huntley";
}; };
gigahawk = {
email = "Jasper Chan";
name = "jasperchan515@gmail.com";
github = "Gigahawk";
githubId = 10356230;
};
gigglesquid = { gigglesquid = {
email = "jack.connors@protonmail.com"; email = "jack.connors@protonmail.com";
github = "gigglesquid"; github = "gigglesquid";
@ -7377,6 +7432,12 @@
githubId = 1401179; githubId = 1401179;
name = "Guanpeng Xu"; name = "Guanpeng Xu";
}; };
herbetom = {
email = "nixos@tomherbers.de";
github = "herbetom";
githubId = 15121114;
name = "Tom Herbers";
};
hexa = { hexa = {
email = "hexa@darmstadt.ccc.de"; email = "hexa@darmstadt.ccc.de";
matrix = "@hexa:lossy.network"; matrix = "@hexa:lossy.network";
@ -10285,6 +10346,12 @@
githubId = 21087104; githubId = 21087104;
name = "Laurent Fainsin"; name = "Laurent Fainsin";
}; };
lavafroth = {
email = "lavafroth@protonmail.com";
github = "lavafroth";
githubId = 107522312;
name = "Himadri Bhattacharjee";
};
layus = { layus = {
email = "layus.on@gmail.com"; email = "layus.on@gmail.com";
github = "layus"; github = "layus";
@ -11323,6 +11390,12 @@
githubId = 1651325; githubId = 1651325;
name = "maralorn"; name = "maralorn";
}; };
marble = {
email = "nixpkgs@computer-in.love";
github = "cyber-murmel";
githubId = 30078229;
name = "marble";
};
marcovergueira = { marcovergueira = {
email = "vergueira.marco@gmail.com"; email = "vergueira.marco@gmail.com";
github = "marcovergueira"; github = "marcovergueira";
@ -11603,6 +11676,12 @@
githubId = 279868; githubId = 279868;
name = "Matti Kariluoma"; name = "Matti Kariluoma";
}; };
mattpolzin = {
email = "matt.polzin@gmail.com";
github = "mattpolzin";
githubId = 2075353;
name = "Matt Polzin";
};
matt-snider = { matt-snider = {
email = "matt.snider@protonmail.com"; email = "matt.snider@protonmail.com";
github = "matt-snider"; github = "matt-snider";
@ -12035,6 +12114,12 @@
github = "michaelBelsanti"; github = "michaelBelsanti";
githubId = 62124625; githubId = 62124625;
}; };
michaelBrunner = {
email = "michael.brunn3r@gmail.com";
name = "Michael Brunner";
github = "MichaelBrunn3r";
githubId = 19626539;
};
michaelCTS = { michaelCTS = {
email = "michael.vogel@cts.co"; email = "michael.vogel@cts.co";
name = "Michael Vogel"; name = "Michael Vogel";
@ -12309,6 +12394,12 @@
fingerprint = "7088 C742 1873 E0DB 97FF 17C2 245C AB70 B4C2 25E9"; fingerprint = "7088 C742 1873 E0DB 97FF 17C2 245C AB70 B4C2 25E9";
}]; }];
}; };
mistydemeo = {
email = "misty@axo.dev";
github = "mistydemeo";
githubId = 780485;
name = "Misty De Méo";
};
misuzu = { misuzu = {
email = "bakalolka@gmail.com"; email = "bakalolka@gmail.com";
github = "misuzu"; github = "misuzu";
@ -13615,6 +13706,13 @@
githubId = 13149442; githubId = 13149442;
name = "Nico Pulido-Mateo"; name = "Nico Pulido-Mateo";
}; };
nrabulinski = {
email = "1337-nix@nrab.lol";
matrix = "@niko:nrab.lol";
github = "nrabulinski";
githubId = 24574288;
name = "Nikodem Rabuliński";
};
nrdxp = { nrdxp = {
email = "tim.deh@pm.me"; email = "tim.deh@pm.me";
matrix = "@timdeh:matrix.org"; matrix = "@timdeh:matrix.org";
@ -14258,6 +14356,12 @@
githubId = 15645854; githubId = 15645854;
name = "Brad Christensen"; name = "Brad Christensen";
}; };
patwid = {
email = "patrick.widmer@tbwnet.ch";
github = "patwid";
githubId = 25278658;
name = "Patrick Widmer";
};
paulsmith = { paulsmith = {
email = "paulsmith@pobox.com"; email = "paulsmith@pobox.com";
github = "paulsmith"; github = "paulsmith";
@ -14517,6 +14621,12 @@
githubId = 5737016; githubId = 5737016;
name = "Philipp Schuster"; name = "Philipp Schuster";
}; };
phlip9 = {
email = "philiphayes9@gmail.com";
github = "phlip9";
githubId = 918989;
name = "Philip Hayes";
};
Phlogistique = { Phlogistique = {
email = "noe.rubinstein@gmail.com"; email = "noe.rubinstein@gmail.com";
github = "Phlogistique"; github = "Phlogistique";
@ -15445,6 +15555,16 @@
githubId = 1891350; githubId = 1891350;
name = "Michael Raskin"; name = "Michael Raskin";
}; };
ratcornu = {
email = "ratcornu@skaven.org";
github = "RatCornu";
githubId = 98173832;
name = "Balthazar Patiachvili";
matrix = "@ratcornu:skweel.skaven.org";
keys = [{
fingerprint = "1B91 F087 3D06 1319 D3D0 7F91 FA47 BDA2 6048 9ADA";
}];
};
ratsclub = { ratsclub = {
email = "victor@freire.dev.br"; email = "victor@freire.dev.br";
github = "ratsclub"; github = "ratsclub";
@ -17213,6 +17333,17 @@
githubId = 3789764; githubId = 3789764;
name = "skykanin"; name = "skykanin";
}; };
skyrina = {
email = "sorryu02@gmail.com";
github = "skyrina";
githubId = 116099351;
name = "Skylar";
};
slam-bert = {
github = "slam-bert";
githubId = 106779009;
name = "Slambert";
};
slbtty = { slbtty = {
email = "shenlebantongying@gmail.com"; email = "shenlebantongying@gmail.com";
github = "shenlebantongying"; github = "shenlebantongying";
@ -18395,6 +18526,15 @@
fingerprint = "D2A2 F0A1 E7A8 5E6F B711 DEE5 63A4 4817 A52E AB7B"; fingerprint = "D2A2 F0A1 E7A8 5E6F B711 DEE5 63A4 4817 A52E AB7B";
}]; }];
}; };
theaninova = {
name = "Thea Schöbl";
email = "dev@theaninova.de";
github = "Theaninova";
githubId = 19289296;
keys = [{
fingerprint = "6C9E EFC5 1AE0 0131 78DE B9C8 68FF FB1E C187 88CA";
}];
};
the-argus = { the-argus = {
email = "i.mcfarlane2002@gmail.com"; email = "i.mcfarlane2002@gmail.com";
github = "the-argus"; github = "the-argus";
@ -19194,6 +19334,11 @@
githubId = 1607770; githubId = 1607770;
name = "Ulrik Strid"; name = "Ulrik Strid";
}; };
umlx5h = {
github = "umlx5h";
githubId = 20206121;
name = "umlx5h";
};
unclamped = { unclamped = {
name = "Maru"; name = "Maru";
email = "clear6860@tutanota.com"; email = "clear6860@tutanota.com";
@ -20864,6 +21009,12 @@
githubId = 81353; githubId = 81353;
name = "Alexandre Macabies"; name = "Alexandre Macabies";
}; };
zoriya = {
email = "zoe.roux@zoriya.dev";
github = "zoriya";
githubId = 32224410;
name = "Zoe Roux";
};
zowoq = { zowoq = {
github = "zowoq"; github = "zowoq";
githubId = 59103226; githubId = 59103226;

View file

@ -1,4 +1,5 @@
#!/usr/bin/env bash #!/usr/bin/env nix-shell
#!nix-shell -i bash -p jq -I nixpkgs=../../../..
set -o pipefail -o errexit -o nounset set -o pipefail -o errexit -o nounset
@ -13,6 +14,7 @@ cleanup() {
[[ -e "$tmp/base" ]] && git worktree remove --force "$tmp/base" [[ -e "$tmp/base" ]] && git worktree remove --force "$tmp/base"
[[ -e "$tmp/merged" ]] && git worktree remove --force "$tmp/merged" [[ -e "$tmp/merged" ]] && git worktree remove --force "$tmp/merged"
[[ -e "$tmp/tool-nixpkgs" ]] && git worktree remove --force "$tmp/tool-nixpkgs"
rm -rf "$tmp" rm -rf "$tmp"
@ -61,7 +63,20 @@ trace -n "Merging base branch into the HEAD commit in $tmp/merged.. "
git -C "$tmp/merged" merge -q --no-edit "$baseSha" git -C "$tmp/merged" merge -q --no-edit "$baseSha"
trace -e "\e[34m$(git -C "$tmp/merged" rev-parse HEAD)\e[0m" trace -e "\e[34m$(git -C "$tmp/merged" rev-parse HEAD)\e[0m"
"$tmp/merged/pkgs/test/nixpkgs-check-by-name/scripts/fetch-tool.sh" "$baseBranch" "$tmp/tool" trace -n "Reading pinned nixpkgs-check-by-name revision from pinned-tool.json.. "
toolSha=$(jq -r .rev "$tmp/merged/pkgs/test/nixpkgs-check-by-name/scripts/pinned-tool.json")
trace -e "\e[34m$toolSha\e[0m"
trace -n "Creating Git worktree for the nixpkgs-check-by-name revision in $tmp/tool-nixpkgs.. "
git worktree add -q "$tmp/tool-nixpkgs" "$toolSha"
trace "Done"
trace "Building/fetching nixpkgs-check-by-name.."
nix-build -o "$tmp/tool" "$tmp/tool-nixpkgs" \
-A tests.nixpkgs-check-by-name \
--arg config '{}' \
--arg overlays '[]' \
-j 0
trace "Running nixpkgs-check-by-name.." trace "Running nixpkgs-check-by-name.."
"$tmp/tool/bin/nixpkgs-check-by-name" --base "$tmp/base" "$tmp/merged" "$tmp/tool/bin/nixpkgs-check-by-name" --base "$tmp/base" "$tmp/merged"

View file

@ -307,6 +307,20 @@ with lib.maintainers; {
shortName = "Flying Circus employees"; shortName = "Flying Circus employees";
}; };
formatter = {
members = [
piegames
infinisil
das_j
tomberek
_0x4A6F
# Not in the maintainer list
# Sereja313
];
scope = "Tentative Nix formatter team to be established in https://github.com/NixOS/rfcs/pull/166";
shortName = "Nix formatter team";
};
freedesktop = { freedesktop = {
members = [ jtojnar ]; members = [ jtojnar ];
scope = "Maintain Freedesktop.org packages for graphical desktop."; scope = "Maintain Freedesktop.org packages for graphical desktop.";

View file

@ -77,7 +77,7 @@ Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
There are several ways to change the configuration of the container. There are several ways to change the configuration of the container.
First, on the host, you can edit First, on the host, you can edit
`/var/lib/container/name/etc/nixos/configuration.nix`, and run `/var/lib/nixos-containers/foo/etc/nixos/configuration.nix`, and run
```ShellSession ```ShellSession
# nixos-container update foo # nixos-container update foo

View file

@ -92,6 +92,24 @@ To use your custom kernel package in your NixOS configuration, set
boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel; boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel;
``` ```
## Rust {#sec-linux-rust}
The Linux kernel does not have Rust language support enabled by
default. For kernel versions 6.7 or newer, experimental Rust support
can be enabled. In a NixOS configuration, set:
```nix
boot.kernelPatches = [
{
name = "Rust Support";
patch = null;
features = {
rust = true;
};
}
];
```
## Developing kernel modules {#sec-linux-config-developing-modules} ## Developing kernel modules {#sec-linux-config-developing-modules}
This section was moved to the [Nixpkgs manual](https://nixos.org/nixpkgs/manual#sec-linux-kernel-developing-modules). This section was moved to the [Nixpkgs manual](https://nixos.org/nixpkgs/manual#sec-linux-kernel-developing-modules).

View file

@ -29,6 +29,7 @@ profiles/graphical.section.md
profiles/hardened.section.md profiles/hardened.section.md
profiles/headless.section.md profiles/headless.section.md
profiles/installation-device.section.md profiles/installation-device.section.md
profiles/perlless.section.md
profiles/minimal.section.md profiles/minimal.section.md
profiles/qemu-guest.section.md profiles/qemu-guest.section.md
``` ```

View file

@ -0,0 +1,11 @@
# Perlless {#sec-perlless}
::: {.warning}
If you enable this profile, you will NOT be able to switch to a new
configuration and thus you will not be able to rebuild your system with
nixos-rebuild!
:::
Render your system completely perlless (i.e. without the perl interpreter). This
includes a mechanism so that your build fails if it contains a Nix store path
that references the string "perl".

View file

@ -38,8 +38,6 @@ The file system can be configured in NixOS via the usual [fileSystems](#opt-file
Here's a typical setup: Here's a typical setup:
```nix ```nix
{ {
system.fsPackages = [ pkgs.sshfs ];
fileSystems."/mnt/my-dir" = { fileSystems."/mnt/my-dir" = {
device = "my-user@example.com:/my-dir/"; device = "my-user@example.com:/my-dir/";
fsType = "sshfs"; fsType = "sshfs";

View file

@ -89,3 +89,18 @@ A user can be deleted using `userdel`:
The flag `-r` deletes the user's home directory. Accounts can be The flag `-r` deletes the user's home directory. Accounts can be
modified using `usermod`. Unix groups can be managed using `groupadd`, modified using `usermod`. Unix groups can be managed using `groupadd`,
`groupmod` and `groupdel`. `groupmod` and `groupdel`.
## Create users and groups with `systemd-sysusers` {#sec-systemd-sysusers}
::: {.note}
This is experimental.
:::
Instead of using a custom perl script to create users and groups, you can use
systemd-sysusers:
```nix
systemd.sysusers.enable = true;
```
The primary benefit of this is to remove a dependency on perl.

View file

@ -0,0 +1,36 @@
# `/etc` via overlay filesystem {#sec-etc-overlay}
::: {.note}
This is experimental and requires a kernel version >= 6.6 because it uses
new overlay features and relies on the new mount API.
:::
Instead of using a custom perl script to activate `/etc`, you activate it via an
overlay filesystem:
```nix
system.etc.overlay.enable = true;
```
Using an overlay has two benefits:
1. it removes a dependency on perl
2. it makes activation faster (up to a few seconds)
By default, the `/etc` overlay is mounted writable (i.e. there is a writable
upper layer). However, you can also mount `/etc` immutably (i.e. read-only) by
setting:
```nix
system.etc.overlay.mutable = false;
```
The overlay is atomically replaced during system switch. However, files that
have been modified will NOT be overwritten. This is the biggest change compared
to the perl-based system.
If you manually make changes to `/etc` on your system and then switch to a new
configuration where `system.etc.overlay.mutable = false;`, you will not be able
to see the previously made changes in `/etc` anymore. However the changes are
not completely gone, they are still in the upperdir of the previous overlay in
`/.rw-etc/upper`.

View file

@ -63,3 +63,42 @@ checks:
is **restart**ed with the others. If it is set, both the service and the is **restart**ed with the others. If it is set, both the service and the
socket are **stop**ped and the socket is **start**ed, leaving socket socket are **stop**ped and the socket is **start**ed, leaving socket
activation to start the service when it's needed. activation to start the service when it's needed.
## Sysinit reactivation {#sec-sysinit-reactivation}
[`sysinit.target`](https://www.freedesktop.org/software/systemd/man/latest/systemd.special.html#sysinit.target)
is a systemd target that encodes system initialization (i.e. early startup). A
few units that need to run very early in the bootup process are ordered to
finish before this target is reached. Probably the most notable one of these is
`systemd-tmpfiles-setup.service`. We will refer to these units as "sysinit
units".
"Normal" systemd units, by default, are ordered AFTER `sysinit.target`. In
other words, these "normal" units expect all services ordered before
`sysinit.target` to have finished without explicity declaring this dependency
relationship for each dependency. See the [systemd
bootup](https://www.freedesktop.org/software/systemd/man/latest/bootup.html)
for more details on the bootup process.
When restarting both a unit ordered before `sysinit.target` as well as one
after, this presents a problem because they would be started at the same time
as they do not explicitly declare their dependency relations.
To solve this, NixOS has an artificial `sysinit-reactivation.target` which
allows you to ensure that services ordered before `sysinit.target` are
restarted correctly. This applies both to the ordering between these sysinit
services as well as ensuring that sysinit units are restarted before "normal"
units.
To make an existing sysinit service restart correctly during system switch, you
have to declare:
```nix
systemd.services.my-sysinit = {
requiredBy = [ "sysinit-reactivation.target" ];
before = [ "sysinit-reactivation.target" ];
restartTriggers = [ config.environment.etc."my-sysinit.d".source ];
};
```
You need to configure appropriate `restartTriggers` specific to your service.

View file

@ -37,7 +37,7 @@ of actions is always the same:
- Forget about the failed state of units (`systemctl reset-failed`) - Forget about the failed state of units (`systemctl reset-failed`)
- Reload systemd (`systemctl daemon-reload`) - Reload systemd (`systemctl daemon-reload`)
- Reload systemd user instances (`systemctl --user daemon-reload`) - Reload systemd user instances (`systemctl --user daemon-reload`)
- Set up tmpfiles (`systemd-tmpfiles --create`) - Reactivate sysinit (`systemctl restart sysinit-reactivation.target`)
- Reload units (`systemctl reload`) - Reload units (`systemctl reload`)
- Restart units (`systemctl restart`) - Restart units (`systemctl restart`)
- Start units (`systemctl start`) - Start units (`systemctl start`)
@ -56,4 +56,5 @@ explained in the next sections.
unit-handling.section.md unit-handling.section.md
activation-script.section.md activation-script.section.md
non-switchable-systems.section.md non-switchable-systems.section.md
etc-overlay.section.md
``` ```

View file

@ -35,7 +35,7 @@ select the image, select the USB flash drive and click "Write".
4. Then use the `dd` utility to write the image to the USB flash drive. 4. Then use the `dd` utility to write the image to the USB flash drive.
```ShellSession ```ShellSession
sudo dd if=<path-to-image> of=/dev/sdX bs=4M conv=fsync sudo dd bs=4M conv=fsync oflag=direct status=progress if=<path-to-image> of=/dev/sdX
``` ```
## Creating bootable USB flash drive from a Terminal on macOS {#sec-booting-from-usb-macos} ## Creating bootable USB flash drive from a Terminal on macOS {#sec-booting-from-usb-macos}

View file

@ -10,7 +10,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- `screen`'s module has been cleaned, and will now require you to set `programs.screen.enable` in order to populate `screenrc` and add the program to the environment. - `screen`'s module has been cleaned, and will now require you to set `programs.screen.enable` in order to populate `screenrc` and add the program to the environment.
- `linuxPackages_testing_bcachefs` is now fully deprecated by `linuxPackages_testing`, and is therefore no longer available. - `linuxPackages_testing_bcachefs` is now fully deprecated by `linuxPackages_latest`, and is therefore no longer available.
- NixOS now installs a stub ELF loader that prints an informative error message when users attempt to run binaries not made for NixOS. - NixOS now installs a stub ELF loader that prints an informative error message when users attempt to run binaries not made for NixOS.
- This can be disabled through the `environment.stub-ld.enable` option. - This can be disabled through the `environment.stub-ld.enable` option.
@ -18,6 +18,22 @@ In addition to numerous new and upgraded packages, this release has the followin
- Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`. - Julia environments can now be built with arbitrary packages from the ecosystem using the `.withPackages` function. For example: `julia.withPackages ["Plots"]`.
- A new option `systemd.sysusers.enable` was added. If enabled, users and
groups are created with systemd-sysusers instead of with a custom perl script.
- A new option `system.etc.overlay.enable` was added. If enabled, `/etc` is
mounted via an overlayfs instead of being created by a custom perl script.
- It is now possible to have a completely perlless system (i.e. a system
without perl). Previously, the NixOS activation depended on two perl scripts
which can now be replaced via an opt-in mechanism. To make your system
perlless, you can use the new perlless profile:
```
{ modulesPath, ... }: {
imports = [ "${modulesPath}/profiles/perlless.nix" ];
}
```
## New Services {#sec-release-24.05-new-services} ## New Services {#sec-release-24.05-new-services}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -37,12 +53,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable). - [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares. The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
- [Suwayomi Server](https://github.com/Suwayomi/Suwayomi-Server), a free and open source manga reader server that runs extensions built for [Tachiyomi](https://tachiyomi.org). Available as [services.suwayomi-server](#opt-services.suwayomi-server.enable).
- [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable). - [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
- [TigerBeetle](https://tigerbeetle.com/), a distributed financial accounting database designed for mission critical safety and performance. Available as [services.tigerbeetle](#opt-services.tigerbeetle.enable).
- [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable). - [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
- [TuxClocker](https://github.com/Lurkki14/tuxclocker), a hardware control and monitoring program. Available as [programs.tuxclocker](#opt-programs.tuxclocker.enable). - [TuxClocker](https://github.com/Lurkki14/tuxclocker), a hardware control and monitoring program. Available as [programs.tuxclocker](#opt-programs.tuxclocker.enable).
- [RustDesk](https://rustdesk.com), a full-featured open source remote control alternative for self-hosting and security with minimal configuration. Alternative to TeamViewer.
## Backward Incompatibilities {#sec-release-24.05-incompatibilities} ## Backward Incompatibilities {#sec-release-24.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -63,10 +85,21 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857) - Invidious has changed its default database username from `kemal` to `invidious`. Setups involving an externally provisioned database (i.e. `services.invidious.database.createLocally == false`) should adjust their configuration accordingly. The old `kemal` user will not be removed automatically even when the database is provisioned automatically.(https://github.com/NixOS/nixpkgs/pull/265857)
- `inetutils` now has a lower priority to avoid shadowing the commonly used `util-linux`. If one wishes to restore the default priority, simply use `lib.setPrio 5 inetutils` or override with `meta.priority = 5`.
- `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`. - `paperless`' `services.paperless.extraConfig` setting has been removed and converted to the freeform type and option named `services.paperless.settings`.
- `mkosi` was updated to v19. Parts of the user interface have changed. Consult the - The legacy and long deprecated systemd target `network-interfaces.target` has been removed. Use `network.target` instead.
[release notes](https://github.com/systemd/mkosi/releases/tag/v19) for a list of changes.
- `services.frp.settings` now generates the frp configuration file in TOML format as [recommended by upstream](https://github.com/fatedier/frp#configuration-files), instead of the legacy INI format. This has also introduced other changes in the configuration file structure and options.
- The `settings.common` section in the configuration is no longer valid and all the options form inside it now goes directly under `settings`.
- The `_` separating words in the configuration options is removed so the options are now in camel case. For example: `server_addr` becomes `serverAddr`, `server_port` becomes `serverPort` etc.
- Proxies are now defined with a new option `settings.proxies` which takes a list of proxies.
- Consult the [upstream documentation](https://github.com/fatedier/frp#example-usage) for more details on the changes.
- `mkosi` was updated to v20. Parts of the user interface have changed. Consult the
release notes of [v19](https://github.com/systemd/mkosi/releases/tag/v19) and
[v20](https://github.com/systemd/mkosi/releases/tag/v20) for a list of changes.
- `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block. - `services.nginx` will no longer advertise HTTP/3 availability automatically. This must now be manually added, preferably to each location block.
Example: Example:
@ -82,25 +115,59 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
``` ```
- The `kanata` package has been updated to v1.5.0, which includes [breaking changes](https://github.com/jtroo/kanata/releases/tag/v1.5.0). - The `kanata` package has been updated to v1.5.0, which includes [breaking changes](https://github.com/jtroo/kanata/releases/tag/v1.5.0).
- The `craftos-pc` package has been updated to v2.8, which includes [breaking changes](https://github.com/MCJack123/craftos2/releases/tag/v2.8).
- Files are now handled in binary mode; this could break programs with embedded UTF-8 characters.
- The ROM was updated to match ComputerCraft version v1.109.2.
- The bundled Lua was updated to Lua v5.2, which includes breaking changes. See the [Lua manual](https://www.lua.org/manual/5.2/manual.html#8) for more information.
- The WebSocket API [was rewritten](https://github.com/MCJack123/craftos2/issues/337), which introduced breaking changes.
- The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows: - The latest available version of Nextcloud is v28 (available as `pkgs.nextcloud28`). The installation logic is as follows:
- If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**) - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
- If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default. - If [`system.stateVersion`](#opt-system.stateVersion) is >=24.05, `pkgs.nextcloud28` will be installed by default.
- If [`system.stateVersion`](#opt-system.stateVersion) is >=23.11, `pkgs.nextcloud27` will be installed by default. - If [`system.stateVersion`](#opt-system.stateVersion) is >=23.11, `pkgs.nextcloud27` will be installed by default.
- Please note that an upgrade from v26 (or older) to v28 directly is not possible. Please upgrade to `nextcloud27` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud27;`](options.html#opt-services.nextcloud.package). - Please note that an upgrade from v26 (or older) to v28 directly is not possible. Please upgrade to `nextcloud27` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud27;`](options.html#opt-services.nextcloud.package).
- The vendored third party libraries have been mostly removed from `cudaPackages.nsight_systems`, which we now only ship for `cudaPackages_11_8` and later due to outdated dependencies. Users comfortable with the vendored dependencies may use `overrideAttrs` to amend the `postPatch` phase and the `meta.broken` correspondingly. Alternatively, one could package the deprecated `boost170` locally, as required for `cudaPackages_11_4.nsight_systems`.
- The `cudaPackages` package scope has been updated to `cudaPackages_12`.
- `services.resolved.fallbackDns` can now be used to disable the upstream fallback servers entirely by setting it to an empty list. To get the previous behaviour of the upstream defaults set it to null, the new default, instead. - `services.resolved.fallbackDns` can now be used to disable the upstream fallback servers entirely by setting it to an empty list. To get the previous behaviour of the upstream defaults set it to null, the new default, instead.
- `xxd` has been moved from `vim` default output to its own output to reduce closure size. The canonical way to reference it across all platforms is `unixtools.xxd`.
- `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively. - `services.avahi.nssmdns` got split into `services.avahi.nssmdns4` and `services.avahi.nssmdns6` which enable the mDNS NSS switch for IPv4 and IPv6 respectively.
Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts. Since most mDNS responders only register IPv4 addresses, most users want to keep the IPv6 support disabled to avoid long timeouts.
- `multi-user.target` no longer depends on `network-online.target`.
This will potentially break services that assumed this was the case in the past.
This was changed for consistency with other distributions as well as improved boot times.
We have added a warning for services that are
`after = [ "network-online.target" ]` but do not depend on it (e.g. using `wants`).
- `services.archisteamfarm` no longer uses the abbreviation `asf` for its state directory (`/var/lib/asf`), user and group (both `asf`). Instead the long name `archisteamfarm` is used.
Configurations with `system.stateVersion` 23.11 or earlier, default to the old stateDirectory until the 24.11 release and must either set the option explicitly or move the data to the new directory.
- `networking.iproute2.enable` now does not set `environment.etc."iproute2/rt_tables".text`. - `networking.iproute2.enable` now does not set `environment.etc."iproute2/rt_tables".text`.
Setting `environment.etc."iproute2/{CONFIG_FILE_NAME}".text` will override the whole configuration file instead of appending it to the upstream configuration file. Setting `environment.etc."iproute2/{CONFIG_FILE_NAME}".text` will override the whole configuration file instead of appending it to the upstream configuration file.
`CONFIG_FILE_NAME` includes `bpf_pinning`, `ematch_map`, `group`, `nl_protos`, `rt_dsfield`, `rt_protos`, `rt_realms`, `rt_scopes`, and `rt_tables`. `CONFIG_FILE_NAME` includes `bpf_pinning`, `ematch_map`, `group`, `nl_protos`, `rt_dsfield`, `rt_protos`, `rt_realms`, `rt_scopes`, and `rt_tables`.
- `netbox` was updated to v3.7. `services.netbox.package` still defaults
to v3.6 if `stateVersion` is earlier than 24.05. Refer to upstream's breaking
changes [for
v3.7.0](https://github.com/netbox-community/netbox/releases/tag/v3.7.0) and
upgrade NetBox by changing `services.netbox.package`. Database migrations
will be run automatically.
- The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher. - The executable file names for `firefox-devedition`, `firefox-beta`, `firefox-esr` now matches their package names, which is consistent with the `firefox-*-bin` packages. The desktop entries are also updated so that you can have multiple editions of firefox in your app launcher.
- switch-to-configuration does not directly call systemd-tmpfiles anymore.
Instead, the new artificial sysinit-reactivation.target is introduced which
allows to restart multiple services that are ordered before sysinit.target
and respect the ordering between the services.
- The `systemd.oomd` module behavior is changed as: - The `systemd.oomd` module behavior is changed as:
- Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358). - Raise ManagedOOMMemoryPressureLimit from 50% to 80%. This should make systemd-oomd kill things less often, and fix issues like [this](https://pagure.io/fedora-workstation/issue/358).
@ -115,6 +182,20 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- `security.pam.enableSSHAgentAuth` now requires `services.openssh.authorizedKeysFiles` to be non-empty, - `security.pam.enableSSHAgentAuth` now requires `services.openssh.authorizedKeysFiles` to be non-empty,
which is the case when `services.openssh.enable` is true. Previously, `pam_ssh_agent_auth` silently failed to work. which is the case when `services.openssh.enable` is true. Previously, `pam_ssh_agent_auth` silently failed to work.
- The configuration format for `services.prometheus.exporters.snmp` changed with release 0.23.0.
The module now includes an optional config check, that is enabled by default, to make the change obvious before any deployment.
More information about the configuration syntax change is available in the [upstream repository](https://github.com/prometheus/snmp_exporter/blob/b75fc6b839ee3f3ccbee68bee55f1ae99555084a/auth-split-migration.md).
- [watchdogd](https://troglobit.com/projects/watchdogd/), a system and process supervisor using watchdog timers. Available as [services.watchdogd](#opt-services.watchdogd.enable).
- The `jdt-language-server` package now uses upstream's provided python wrapper instead of our own custom wrapper. This results in the following breaking and notable changes:
- The main binary for the package is now named `jdtls` instead of `jdt-language-server`, equivalent to what most editors expect the binary to be named.
- JVM arguments should now be provided with the `--jvm-arg` flag instead of setting `JAVA_OPTS`.
- The `-data` path is no longer required to run the package, and will be set to point to a folder in `$TMP` if missing.
## Other Notable Changes {#sec-release-24.05-notable-changes} ## Other Notable Changes {#sec-release-24.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -152,6 +233,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- The option [`services.nextcloud.config.dbport`] of the Nextcloud module was removed to match upstream. - The option [`services.nextcloud.config.dbport`] of the Nextcloud module was removed to match upstream.
The port can be specified in [`services.nextcloud.config.dbhost`](#opt-services.nextcloud.config.dbhost). The port can be specified in [`services.nextcloud.config.dbhost`](#opt-services.nextcloud.config.dbhost).
- `stdenv`: The `--replace` flag in `substitute`, `substituteInPlace`, `substituteAll`, `substituteAllStream`, and `substituteStream` is now deprecated if favor of the new `--replace-fail`, `--replace-warn` and `--replace-quiet`. The deprecated `--replace` equates to `--replace-warn`.
- The Yama LSM is now enabled by default in the kernel, which prevents ptracing - The Yama LSM is now enabled by default in the kernel, which prevents ptracing
non-child processes. This means you will not be able to attach gdb to an non-child processes. This means you will not be able to attach gdb to an
existing process, but will need to start that process from gdb (so it is a existing process, but will need to start that process from gdb (so it is a
@ -172,6 +255,8 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
- `services.zfs.zed.enableMail` now uses the global `sendmail` wrapper defined by an email module - `services.zfs.zed.enableMail` now uses the global `sendmail` wrapper defined by an email module
(such as msmtp or Postfix). It no longer requires using a special ZFS build with email support. (such as msmtp or Postfix). It no longer requires using a special ZFS build with email support.
- `nextcloud-setup.service` no longer changes the group of each file & directory inside `/var/lib/nextcloud/{config,data,store-apps}` if one of these directories has the wrong owner group. This was part of transitioning the group used for `/var/lib/nextcloud`, but isn't necessary anymore.
- The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`. - The `krb5` module has been rewritten and moved to `security.krb5`, moving all options but `security.krb5.enable` and `security.krb5.package` into `security.krb5.settings`.
- Gitea 1.21 upgrade has several breaking changes, including: - Gitea 1.21 upgrade has several breaking changes, including:
@ -185,5 +270,7 @@ The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been m
replaces the need for the `extraPackages` option, this option will be replaces the need for the `extraPackages` option, this option will be
deprecated in future releases. deprecated in future releases.
- The `mpich` package expression now requires `withPm` to be a list, e.g. `"hydra:gforker"` becomes `[ "hydra" "gforker" ]`.
- QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS). - QtMultimedia has changed its default backend to `QT_MEDIA_BACKEND=ffmpeg` (previously `gstreamer` on Linux or `darwin` on MacOS).
The previous native backends remain available but are now minimally maintained. Refer to [upstream documentation](https://doc.qt.io/qt-6/qtmultimedia-index.html#ffmpeg-as-the-default-backend) for further details about each platform. The previous native backends remain available but are now minimally maintained. Refer to [upstream documentation](https://doc.qt.io/qt-6/qtmultimedia-index.html#ffmpeg-as-the-default-backend) for further details about each platform.

View file

@ -110,6 +110,7 @@ let
withExtraAttrs = configuration: configuration // { withExtraAttrs = configuration: configuration // {
inherit extraArgs; inherit extraArgs;
inherit (configuration._module.args) pkgs; inherit (configuration._module.args) pkgs;
inherit lib;
extendModules = args: withExtraAttrs (configuration.extendModules args); extendModules = args: withExtraAttrs (configuration.extendModules args);
}; };
in in

View file

@ -14,6 +14,7 @@
let let
pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles; pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles;
compFlag = if comp == null then "-no-compression" else "-comp ${comp}";
in in
stdenv.mkDerivation { stdenv.mkDerivation {
name = "${fileName}.img"; name = "${fileName}.img";
@ -39,7 +40,7 @@ stdenv.mkDerivation {
# Generate the squashfs image. # Generate the squashfs image.
mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \ mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \
-no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 -comp ${comp} \ -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 ${compFlag} \
-processors $NIX_BUILD_CORES -processors $NIX_BUILD_CORES
''; '';
} }

View file

@ -360,9 +360,13 @@ in rec {
}; };
}; };
commonUnitText = def: '' commonUnitText = def: lines: ''
[Unit] [Unit]
${attrsToSection def.unitConfig} ${attrsToSection def.unitConfig}
'' + lines + lib.optionalString (def.wantedBy != [ ]) ''
[Install]
WantedBy=${concatStringsSep " " def.wantedBy}
''; '';
targetToUnit = name: def: targetToUnit = name: def:
@ -376,7 +380,7 @@ in rec {
serviceToUnit = name: def: serviceToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + '' text = commonUnitText def (''
[Service] [Service]
'' + (let env = cfg.globalEnvironment // def.environment; '' + (let env = cfg.globalEnvironment // def.environment;
in concatMapStrings (n: in concatMapStrings (n:
@ -392,63 +396,57 @@ in rec {
'' else "") '' else "")
+ optionalString (def ? stopIfChanged && !def.stopIfChanged) '' + optionalString (def ? stopIfChanged && !def.stopIfChanged) ''
X-StopIfChanged=false X-StopIfChanged=false
'' + attrsToSection def.serviceConfig; '' + attrsToSection def.serviceConfig);
}; };
socketToUnit = name: def: socketToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Socket]
[Socket] ${attrsToSection def.socketConfig}
${attrsToSection def.socketConfig} ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)} ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)} '';
'';
}; };
timerToUnit = name: def: timerToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Timer]
[Timer] ${attrsToSection def.timerConfig}
${attrsToSection def.timerConfig} '';
'';
}; };
pathToUnit = name: def: pathToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Path]
[Path] ${attrsToSection def.pathConfig}
${attrsToSection def.pathConfig} '';
'';
}; };
mountToUnit = name: def: mountToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Mount]
[Mount] ${attrsToSection def.mountConfig}
${attrsToSection def.mountConfig} '';
'';
}; };
automountToUnit = name: def: automountToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Automount]
[Automount] ${attrsToSection def.automountConfig}
${attrsToSection def.automountConfig} '';
'';
}; };
sliceToUnit = name: def: sliceToUnit = name: def:
{ inherit (def) aliases wantedBy requiredBy enable overrideStrategy; { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
text = commonUnitText def + text = commonUnitText def ''
'' [Slice]
[Slice] ${attrsToSection def.sliceConfig}
${attrsToSection def.sliceConfig} '';
'';
}; };
# Create a directory that contains systemd definition files from an attrset # Create a directory that contains systemd definition files from an attrset

View file

@ -9,17 +9,17 @@
# This file is made to be used as follow: # This file is made to be used as follow:
# #
# $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval # $ nix-instantiate ./option-usages.nix --argstr testOption service.xserver.enable -A txtContent --eval
# #
# or # or
# #
# $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt # $ nix-build ./option-usages.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
# #
# Other targets exists such as `dotContent`, `dot`, and `pdf`. If you are # Other targets exists such as `dotContent`, `dot`, and `pdf`. If you are
# looking for the option usage of multiple options, you can provide a list # looking for the option usage of multiple options, you can provide a list
# as argument. # as argument.
# #
# $ nix-build ./option-usage.nix --arg testOptions \ # $ nix-build ./option-usages.nix --arg testOptions \
# '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \ # '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
# -A txt -o gummiboot.list # -A txt -o gummiboot.list
# #

View file

@ -214,7 +214,8 @@ in
'' ''
# Create the required /bin/sh symlink; otherwise lots of things # Create the required /bin/sh symlink; otherwise lots of things
# (notably the system() function) won't work. # (notably the system() function) won't work.
mkdir -m 0755 -p /bin mkdir -p /bin
chmod 0755 /bin
ln -sfn "${cfg.binsh}" /bin/.sh.tmp ln -sfn "${cfg.binsh}" /bin/.sh.tmp
mv /bin/.sh.tmp /bin/sh # atomically replace /bin/sh mv /bin/.sh.tmp /bin/sh # atomically replace /bin/sh
''; '';

View file

@ -685,7 +685,7 @@ in {
shadow.gid = ids.gids.shadow; shadow.gid = ids.gids.shadow;
}; };
system.activationScripts.users = { system.activationScripts.users = if !config.systemd.sysusers.enable then {
supportsDryActivation = true; supportsDryActivation = true;
text = '' text = ''
install -m 0700 -d /root install -m 0700 -d /root
@ -694,7 +694,7 @@ in {
${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON ])}/bin/perl \ ${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON ])}/bin/perl \
-w ${./update-users-groups.pl} ${spec} -w ${./update-users-groups.pl} ${spec}
''; '';
}; } else ""; # keep around for backwards compatibility
system.activationScripts.update-lingering = let system.activationScripts.update-lingering = let
lingerDir = "/var/lib/systemd/linger"; lingerDir = "/var/lib/systemd/linger";
@ -711,7 +711,9 @@ in {
''; '';
# Warn about user accounts with deprecated password hashing schemes # Warn about user accounts with deprecated password hashing schemes
system.activationScripts.hashes = { # This does not work when the users and groups are created by
# systemd-sysusers because the users are created too late then.
system.activationScripts.hashes = if !config.systemd.sysusers.enable then {
deps = [ "users" ]; deps = [ "users" ];
text = '' text = ''
users=() users=()
@ -729,7 +731,7 @@ in {
printf ' - %s\n' "''${users[@]}" printf ' - %s\n' "''${users[@]}"
fi fi
''; '';
}; } else ""; # keep around for backwards compatibility
# for backwards compatibility # for backwards compatibility
system.activationScripts.groups = stringAfter [ "users" ] ""; system.activationScripts.groups = stringAfter [ "users" ] "";

View file

@ -12,6 +12,10 @@ in
Add your user to the corectrl group to run corectrl without needing to enter your password Add your user to the corectrl group to run corectrl without needing to enter your password
''); '');
package = mkPackageOption pkgs "corectrl" {
extraDescription = "Useful for overriding the configuration options used for the package.";
};
gpuOverclock = { gpuOverclock = {
enable = mkEnableOption (lib.mdDoc '' enable = mkEnableOption (lib.mdDoc ''
GPU overclocking GPU overclocking
@ -32,9 +36,9 @@ in
config = mkIf cfg.enable (lib.mkMerge [ config = mkIf cfg.enable (lib.mkMerge [
{ {
environment.systemPackages = [ pkgs.corectrl ]; environment.systemPackages = [ cfg.package ];
services.dbus.packages = [ pkgs.corectrl ]; services.dbus.packages = [ cfg.package ];
users.groups.corectrl = { }; users.groups.corectrl = { };

View file

@ -10,6 +10,8 @@
, systemd , systemd
, fakeroot , fakeroot
, util-linux , util-linux
# filesystem tools
, dosfstools , dosfstools
, mtools , mtools
, e2fsprogs , e2fsprogs
@ -18,8 +20,13 @@
, btrfs-progs , btrfs-progs
, xfsprogs , xfsprogs
# compression tools
, zstd
, xz
# arguments # arguments
, name , imageFileBasename
, compression
, fileSystems , fileSystems
, partitions , partitions
, split , split
@ -52,14 +59,25 @@ let
}; };
fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems; fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
compressionPkg = {
"zstd" = zstd;
"xz" = xz;
}."${compression.algorithm}";
compressionCommand = {
"zstd" = "zstd --no-progress --threads=0 -${toString compression.level}";
"xz" = "xz --keep --verbose --threads=0 -${toString compression.level}";
}."${compression.algorithm}";
in in
runCommand name runCommand imageFileBasename
{ {
nativeBuildInputs = [ nativeBuildInputs = [
systemd systemd
fakeroot fakeroot
util-linux util-linux
compressionPkg
] ++ fileSystemTools; ] ++ fileSystemTools;
} '' } ''
amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory}) amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
@ -67,6 +85,7 @@ runCommand name
mkdir -p $out mkdir -p $out
cd $out cd $out
echo "Building image with systemd-repart..."
unshare --map-root-user fakeroot systemd-repart \ unshare --map-root-user fakeroot systemd-repart \
--dry-run=no \ --dry-run=no \
--empty=create \ --empty=create \
@ -75,6 +94,17 @@ runCommand name
--definitions="$amendedRepartDefinitions" \ --definitions="$amendedRepartDefinitions" \
--split="${lib.boolToString split}" \ --split="${lib.boolToString split}" \
--json=pretty \ --json=pretty \
image.raw \ ${imageFileBasename}.raw \
| tee repart-output.json | tee repart-output.json
# Compression is implemented in the same derivation as opposed to in a
# separate derivation to allow users to save disk space. Disk images are
# already very space intensive so we want to allow users to mitigate this.
if ${lib.boolToString compression.enable}; then
for f in ${imageFileBasename}*; do
echo "Compressing $f with ${compression.algorithm}..."
# Keep the original file when compressing and only delete it afterwards
${compressionCommand} $f && rm $f
done
fi
'' ''

View file

@ -66,7 +66,53 @@ in
name = lib.mkOption { name = lib.mkOption {
type = lib.types.str; type = lib.types.str;
description = lib.mdDoc "The name of the image."; description = lib.mdDoc ''
Name of the image.
If this option is unset but config.system.image.id is set,
config.system.image.id is used as the default value.
'';
};
version = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = config.system.image.version;
defaultText = lib.literalExpression "config.system.image.version";
description = lib.mdDoc "Version of the image";
};
imageFileBasename = lib.mkOption {
type = lib.types.str;
readOnly = true;
description = lib.mdDoc ''
Basename of the image filename without any extension (e.g. `image_1`).
'';
};
imageFile = lib.mkOption {
type = lib.types.str;
readOnly = true;
description = lib.mdDoc ''
Filename of the image including all extensions (e.g `image_1.raw` or
`image_1.raw.zst`).
'';
};
compression = {
enable = lib.mkEnableOption (lib.mdDoc "Image compression");
algorithm = lib.mkOption {
type = lib.types.enum [ "zstd" "xz" ];
default = "zstd";
description = lib.mdDoc "Compression algorithm";
};
level = lib.mkOption {
type = lib.types.int;
description = lib.mdDoc ''
Compression level. The available range depends on the used algorithm.
'';
};
}; };
seed = lib.mkOption { seed = lib.mkOption {
@ -131,6 +177,32 @@ in
config = { config = {
image.repart =
let
version = config.image.repart.version;
versionInfix = if version != null then "_${version}" else "";
compressionSuffix = lib.optionalString cfg.compression.enable
{
"zstd" = ".zst";
"xz" = ".xz";
}."${cfg.compression.algorithm}";
in
{
name = lib.mkIf (config.system.image.id != null) (lib.mkOptionDefault config.system.image.id);
imageFileBasename = cfg.name + versionInfix;
imageFile = cfg.imageFileBasename + ".raw" + compressionSuffix;
compression = {
# Generally default to slightly faster than default compression
# levels under the assumption that most of the building will be done
# for development and release builds will be customized.
level = lib.mkOptionDefault {
"zstd" = 3;
"xz" = 3;
}."${cfg.compression.algorithm}";
};
};
system.build.image = system.build.image =
let let
fileSystems = lib.filter fileSystems = lib.filter
@ -160,7 +232,7 @@ in
in in
pkgs.callPackage ./repart-image.nix { pkgs.callPackage ./repart-image.nix {
systemd = cfg.package; systemd = cfg.package;
inherit (cfg) name split seed; inherit (cfg) imageFileBasename compression split seed;
inherit fileSystems definitionsDirectory partitions; inherit fileSystems definitionsDirectory partitions;
}; };

View file

@ -18,7 +18,7 @@
# not including it may cause annoying cache misses in the case of the NixOS manual. # not including it may cause annoying cache misses in the case of the NixOS manual.
documentation.doc.enable = lib.mkOverride 500 true; documentation.doc.enable = lib.mkOverride 500 true;
fonts.fontconfig.enable = lib.mkForce false; fonts.fontconfig.enable = lib.mkOverride 500 false;
isoImage.edition = lib.mkForce "minimal"; isoImage.edition = lib.mkOverride 500 "minimal";
} }

View file

@ -512,9 +512,10 @@ in
+ lib.optionalString isAarch "-Xbcj arm" + lib.optionalString isAarch "-Xbcj arm"
+ lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc" + lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
+ lib.optionalString (isSparc) "-Xbcj sparc"; + lib.optionalString (isSparc) "-Xbcj sparc";
type = lib.types.str; type = lib.types.nullOr lib.types.str;
description = lib.mdDoc '' description = lib.mdDoc ''
Compression settings to use for the squashfs nix store. Compression settings to use for the squashfs nix store.
`null` disables compression.
''; '';
example = "zstd -Xcompression-level 6"; example = "zstd -Xcompression-level 6";
}; };

View file

@ -28,6 +28,8 @@ let
DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html"; DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html";
SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html"; SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html";
BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues"; BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues";
IMAGE_ID = lib.optionalString (config.system.image.id != null) config.system.image.id;
IMAGE_VERSION = lib.optionalString (config.system.image.version != null) config.system.image.version;
} // lib.optionalAttrs (cfg.variant_id != null) { } // lib.optionalAttrs (cfg.variant_id != null) {
VARIANT_ID = cfg.variant_id; VARIANT_ID = cfg.variant_id;
}; };
@ -110,6 +112,38 @@ in
example = "installer"; example = "installer";
}; };
image = {
id = lib.mkOption {
type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
default = null;
description = lib.mdDoc ''
Image identifier.
This corresponds to the IMAGE_ID field in os-release. See the
upstream docs for more details on valid characters for this field:
https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_ID=
You would only want to set this option if you're build NixOS appliance images.
'';
};
version = lib.mkOption {
type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
default = null;
description = lib.mdDoc ''
Image version.
This corresponds to the IMAGE_VERSION field in os-release. See the
upstream docs for more details on valid characters for this field:
https://www.freedesktop.org/software/systemd/man/latest/os-release.html#IMAGE_VERSION=
You would only want to set this option if you're build NixOS appliance images.
'';
};
};
stateVersion = mkOption { stateVersion = mkOption {
type = types.str; type = types.str;
# TODO Remove this and drop the default of the option so people are forced to set it. # TODO Remove this and drop the default of the option so people are forced to set it.

View file

@ -195,7 +195,6 @@
./programs/haguichi.nix ./programs/haguichi.nix
./programs/hamster.nix ./programs/hamster.nix
./programs/htop.nix ./programs/htop.nix
./programs/hyprland.nix
./programs/iay.nix ./programs/iay.nix
./programs/iftop.nix ./programs/iftop.nix
./programs/i3lock.nix ./programs/i3lock.nix
@ -273,6 +272,7 @@
./programs/wavemon.nix ./programs/wavemon.nix
./programs/wayland/cardboard.nix ./programs/wayland/cardboard.nix
./programs/wayland/labwc.nix ./programs/wayland/labwc.nix
./programs/wayland/hyprland.nix
./programs/wayland/river.nix ./programs/wayland/river.nix
./programs/wayland/sway.nix ./programs/wayland/sway.nix
./programs/wayland/waybar.nix ./programs/wayland/waybar.nix
@ -446,6 +446,7 @@
./services/databases/postgresql.nix ./services/databases/postgresql.nix
./services/databases/redis.nix ./services/databases/redis.nix
./services/databases/surrealdb.nix ./services/databases/surrealdb.nix
./services/databases/tigerbeetle.nix
./services/databases/victoriametrics.nix ./services/databases/victoriametrics.nix
./services/desktops/accountsservice.nix ./services/desktops/accountsservice.nix
./services/desktops/ayatana-indicators.nix ./services/desktops/ayatana-indicators.nix
@ -506,7 +507,7 @@
./services/editors/haste.nix ./services/editors/haste.nix
./services/editors/infinoted.nix ./services/editors/infinoted.nix
./services/finance/odoo.nix ./services/finance/odoo.nix
./services/games/asf.nix ./services/games/archisteamfarm.nix
./services/games/crossfire-server.nix ./services/games/crossfire-server.nix
./services/games/deliantra-server.nix ./services/games/deliantra-server.nix
./services/games/factorio.nix ./services/games/factorio.nix
@ -832,6 +833,7 @@
./services/monitoring/riemann-dash.nix ./services/monitoring/riemann-dash.nix
./services/monitoring/riemann-tools.nix ./services/monitoring/riemann-tools.nix
./services/monitoring/riemann.nix ./services/monitoring/riemann.nix
./services/monitoring/rustdesk-server.nix
./services/monitoring/scollector.nix ./services/monitoring/scollector.nix
./services/monitoring/smartd.nix ./services/monitoring/smartd.nix
./services/monitoring/snmpd.nix ./services/monitoring/snmpd.nix
@ -849,6 +851,7 @@
./services/monitoring/vmagent.nix ./services/monitoring/vmagent.nix
./services/monitoring/vmalert.nix ./services/monitoring/vmalert.nix
./services/monitoring/vnstat.nix ./services/monitoring/vnstat.nix
./services/monitoring/watchdogd.nix
./services/monitoring/zabbix-agent.nix ./services/monitoring/zabbix-agent.nix
./services/monitoring/zabbix-proxy.nix ./services/monitoring/zabbix-proxy.nix
./services/monitoring/zabbix-server.nix ./services/monitoring/zabbix-server.nix
@ -1044,6 +1047,7 @@
./services/networking/ntopng.nix ./services/networking/ntopng.nix
./services/networking/ntp/chrony.nix ./services/networking/ntp/chrony.nix
./services/networking/ntp/ntpd.nix ./services/networking/ntp/ntpd.nix
./services/networking/ntp/ntpd-rs.nix
./services/networking/ntp/openntpd.nix ./services/networking/ntp/openntpd.nix
./services/networking/nullidentdmod.nix ./services/networking/nullidentdmod.nix
./services/networking/nylon.nix ./services/networking/nylon.nix
@ -1338,6 +1342,7 @@
./services/web-apps/restya-board.nix ./services/web-apps/restya-board.nix
./services/web-apps/rimgo.nix ./services/web-apps/rimgo.nix
./services/web-apps/sftpgo.nix ./services/web-apps/sftpgo.nix
./services/web-apps/suwayomi-server.nix
./services/web-apps/rss-bridge.nix ./services/web-apps/rss-bridge.nix
./services/web-apps/selfoss.nix ./services/web-apps/selfoss.nix
./services/web-apps/shiori.nix ./services/web-apps/shiori.nix
@ -1465,6 +1470,7 @@
./system/boot/stratisroot.nix ./system/boot/stratisroot.nix
./system/boot/modprobe.nix ./system/boot/modprobe.nix
./system/boot/networkd.nix ./system/boot/networkd.nix
./system/boot/uki.nix
./system/boot/unl0kr.nix ./system/boot/unl0kr.nix
./system/boot/plymouth.nix ./system/boot/plymouth.nix
./system/boot/resolved.nix ./system/boot/resolved.nix
@ -1485,6 +1491,7 @@
./system/boot/systemd/repart.nix ./system/boot/systemd/repart.nix
./system/boot/systemd/shutdown.nix ./system/boot/systemd/shutdown.nix
./system/boot/systemd/sysupdate.nix ./system/boot/systemd/sysupdate.nix
./system/boot/systemd/sysusers.nix
./system/boot/systemd/tmpfiles.nix ./system/boot/systemd/tmpfiles.nix
./system/boot/systemd/user.nix ./system/boot/systemd/user.nix
./system/boot/systemd/userdbd.nix ./system/boot/systemd/userdbd.nix
@ -1512,6 +1519,7 @@
./tasks/filesystems/nfs.nix ./tasks/filesystems/nfs.nix
./tasks/filesystems/ntfs.nix ./tasks/filesystems/ntfs.nix
./tasks/filesystems/reiserfs.nix ./tasks/filesystems/reiserfs.nix
./tasks/filesystems/sshfs.nix
./tasks/filesystems/squashfs.nix ./tasks/filesystems/squashfs.nix
./tasks/filesystems/unionfs-fuse.nix ./tasks/filesystems/unionfs-fuse.nix
./tasks/filesystems/vboxsf.nix ./tasks/filesystems/vboxsf.nix

View file

@ -0,0 +1,31 @@
# WARNING: If you enable this profile, you will NOT be able to switch to a new
# configuration and thus you will not be able to rebuild your system with
# nixos-rebuild!
{ lib, ... }:
{
# Disable switching to a new configuration. This is not a necessary
# limitation of a perlless system but just a current one. In the future,
# perlless switching might be possible.
system.switch.enable = lib.mkDefault false;
# Remove perl from activation
boot.initrd.systemd.enable = lib.mkDefault true;
system.etc.overlay.enable = lib.mkDefault true;
systemd.sysusers.enable = lib.mkDefault true;
# Random perl remnants
system.disableInstallerTools = lib.mkDefault true;
programs.less.lessopen = lib.mkDefault null;
programs.command-not-found.enable = lib.mkDefault false;
boot.enableContainers = lib.mkDefault false;
environment.defaultPackages = lib.mkDefault [ ];
documentation.info.enable = lib.mkDefault false;
# Check that the system does not contain a Nix store path that contains the
# string "perl".
system.forbiddenDependenciesRegex = "perl";
}

View file

@ -12,6 +12,7 @@ let
'' ''
#! ${pkgs.runtimeShell} -e #! ${pkgs.runtimeShell} -e
export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')" export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
export XAUTHORITY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^XAUTHORITY=\(.*\)/\1/; t; d')"
export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')" export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
exec ${cfg.askPassword} "$@" exec ${cfg.askPassword} "$@"
''; '';

View file

@ -1,13 +1,21 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
cfg = config.programs.starship; cfg = config.programs.starship;
settingsFormat = pkgs.formats.toml { }; settingsFormat = pkgs.formats.toml { };
settingsFile = settingsFormat.generate "starship.toml" cfg.settings; userSettingsFile = settingsFormat.generate "starship.toml" cfg.settings;
settingsFile = if cfg.presets == [] then userSettingsFile else pkgs.runCommand "starship.toml"
{
nativeBuildInputs = [ pkgs.yq ];
} ''
tomlq -s -t 'reduce .[] as $item ({}; . * $item)' \
${lib.concatStringsSep " " (map (f: "${pkgs.starship}/share/starship/presets/${f}.toml") cfg.presets)} \
${userSettingsFile} \
> $out
'';
initOption = initOption =
if cfg.interactiveOnly then if cfg.interactiveOnly then
@ -18,19 +26,28 @@ let
in in
{ {
options.programs.starship = { options.programs.starship = {
enable = mkEnableOption (lib.mdDoc "the Starship shell prompt"); enable = lib.mkEnableOption (lib.mdDoc "the Starship shell prompt");
interactiveOnly = mkOption { interactiveOnly = lib.mkOption {
default = true; default = true;
example = false; example = false;
type = types.bool; type = lib.types.bool;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether to enable starship only when the shell is interactive. Whether to enable starship only when the shell is interactive.
Some plugins require this to be set to false to function correctly. Some plugins require this to be set to false to function correctly.
''; '';
}; };
settings = mkOption { presets = lib.mkOption {
default = [ ];
example = [ "nerd-font-symbols" ];
type = with lib.types; listOf str;
description = lib.mdDoc ''
Presets files to be merged with settings in order.
'';
};
settings = lib.mkOption {
inherit (settingsFormat) type; inherit (settingsFormat) type;
default = { }; default = { };
description = lib.mdDoc '' description = lib.mdDoc ''
@ -41,7 +58,7 @@ in
}; };
}; };
config = mkIf cfg.enable { config = lib.mkIf cfg.enable {
programs.bash.${initOption} = '' programs.bash.${initOption} = ''
if [[ $TERM != "dumb" ]]; then if [[ $TERM != "dumb" ]]; then
# don't set STARSHIP_CONFIG automatically if there's a user-specified # don't set STARSHIP_CONFIG automatically if there's a user-specified

View file

@ -72,7 +72,7 @@ services.nginx = {
}; };
}; };
}; };
} };
``` ```
## Using ACME certificates in Apache/httpd {#module-security-acme-httpd} ## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
@ -111,7 +111,7 @@ services.nginx = {
}; };
}; };
}; };
} };
# Alternative config for Apache # Alternative config for Apache
users.users.wwwrun.extraGroups = [ "acme" ]; users.users.wwwrun.extraGroups = [ "acme" ];
services.httpd = { services.httpd = {
@ -131,7 +131,7 @@ services.httpd = {
''; '';
}; };
}; };
} };
``` ```
Now you need to configure ACME to generate a certificate. Now you need to configure ACME to generate a certificate.
@ -181,7 +181,7 @@ services.bind = {
extraConfig = "allow-update { key rfc2136key.example.com.; };"; extraConfig = "allow-update { key rfc2136key.example.com.; };";
} }
]; ];
} };
# Now we can configure ACME # Now we can configure ACME
security.acme.acceptTerms = true; security.acme.acceptTerms = true;
@ -271,7 +271,7 @@ services.nginx = {
acmeRoot = null; acmeRoot = null;
}; };
}; };
} };
``` ```
And that's it! Next time your configuration is rebuilt, or when And that's it! Next time your configuration is rebuilt, or when

View file

@ -897,10 +897,10 @@ in {
certs = attrValues cfg.certs; certs = attrValues cfg.certs;
in [ in [
{ {
assertion = cfg.email != null || all (certOpts: certOpts.email != null) certs; assertion = cfg.defaults.email != null || all (certOpts: certOpts.email != null) certs;
message = '' message = ''
You must define `security.acme.certs.<name>.email` or You must define `security.acme.certs.<name>.email` or
`security.acme.email` to register with the CA. Note that using `security.acme.defaults.email` to register with the CA. Note that using
many different addresses for certs may trigger account rate limits. many different addresses for certs may trigger account rate limits.
''; '';
} }

View file

@ -168,7 +168,7 @@ in
echo "$PW" echo "$PW"
# Retype password: # Retype password:
echo "$PW" echo "$PW"
) | ${cfg.package}/bin/pgadmin4-setup ) | ${cfg.package}/bin/pgadmin4-cli setup-db
''; '';
restartTriggers = [ restartTriggers = [

View file

@ -64,6 +64,7 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
systemd = { systemd = {
services.gmediarender = { services.gmediarender = {
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
description = "gmediarender server daemon"; description = "gmediarender server daemon";

View file

@ -26,6 +26,7 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
systemd.services.jmusicbot = { systemd.services.jmusicbot = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
description = "Discord music bot that's easy to set up and run yourself!"; description = "Discord music bot that's easy to set up and run yourself!";
serviceConfig = mkMerge [{ serviceConfig = mkMerge [{

View file

@ -50,6 +50,7 @@ in
systemd.services.spotifyd = { systemd.services.spotifyd = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" "sound.target" ]; after = [ "network-online.target" "sound.target" ];
description = "spotifyd, a Spotify playing daemon"; description = "spotifyd, a Spotify playing daemon";
environment.SHELL = "/bin/sh"; environment.SHELL = "/bin/sh";

View file

@ -50,6 +50,7 @@ in {
description = "Standalone MPD Web GUI written in C"; description = "Standalone MPD Web GUI written in C";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
serviceConfig = { serviceConfig = {

View file

@ -267,7 +267,7 @@ in {
systemd.services.buildbot-master = { systemd.services.buildbot-master = {
description = "Buildbot Continuous Integration Server."; description = "Buildbot Continuous Integration Server.";
after = [ "network-online.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = cfg.packages ++ cfg.pythonPackages python.pkgs; path = cfg.packages ++ cfg.pythonPackages python.pkgs;
environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}"; environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}";

View file

@ -188,6 +188,7 @@ in
nameValuePair "gitea-runner-${escapeSystemdPath name}" { nameValuePair "gitea-runner-${escapeSystemdPath name}" {
inherit (instance) enable; inherit (instance) enable;
description = "Gitea Actions Runner"; description = "Gitea Actions Runner";
wants = [ "network-online.target" ];
after = [ after = [
"network-online.target" "network-online.target"
] ++ optionals (wantsDocker) [ ] ++ optionals (wantsDocker) [

View file

@ -153,6 +153,7 @@ with lib;
type = types.attrs; type = types.attrs;
description = lib.mdDoc '' description = lib.mdDoc ''
Modify the systemd service. Can be used to, e.g., adjust the sandboxing options. Modify the systemd service. Can be used to, e.g., adjust the sandboxing options.
See {manpage}`systemd.exec(5)` for more options.
''; '';
example = { example = {
ProtectHome = false; ProtectHome = false;

View file

@ -393,6 +393,7 @@ in
systemd.services.hydra-evaluator = systemd.services.hydra-evaluator =
{ wantedBy = [ "multi-user.target" ]; { wantedBy = [ "multi-user.target" ];
requires = [ "hydra-init.service" ]; requires = [ "hydra-init.service" ];
wants = [ "network-online.target" ];
after = [ "hydra-init.service" "network.target" "network-online.target" ]; after = [ "hydra-init.service" "network.target" "network-online.target" ];
path = with pkgs; [ hydra-package nettools jq ]; path = with pkgs; [ hydra-package nettools jq ];
restartTriggers = [ hydraConf ]; restartTriggers = [ hydraConf ];

View file

@ -143,7 +143,7 @@ in
# ConnectionTimeout = 180 # ConnectionTimeout = 180
#RemoteServiceName = gds_db #RemoteServiceName = gds_db
RemoteServicePort = ${cfg.port} RemoteServicePort = ${toString cfg.port}
# randomly choose port for server Event Notification # randomly choose port for server Event Notification
#RemoteAuxPort = 0 #RemoteAuxPort = 0

View file

@ -161,6 +161,7 @@ in
ExecStart = ''${cfg.package}/bin/influxd -config "${configFile}"''; ExecStart = ''${cfg.package}/bin/influxd -config "${configFile}"'';
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
Restart = "on-failure";
}; };
postStart = postStart =
let let

View file

@ -104,6 +104,7 @@ in
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
systemd.services.lldap = { systemd.services.lldap = {
description = "Lightweight LDAP server (lldap)"; description = "Lightweight LDAP server (lldap)";
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig = { serviceConfig = {

View file

@ -294,6 +294,7 @@ in {
"man:slapd-mdb" "man:slapd-mdb"
]; ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
serviceConfig = { serviceConfig = {
User = cfg.user; User = cfg.user;

View file

@ -0,0 +1,33 @@
# TigerBeetle {#module-services-tigerbeetle}
*Source:* {file}`modules/services/databases/tigerbeetle.nix`
*Upstream documentation:* <https://docs.tigerbeetle.com/>
TigerBeetle is a distributed financial accounting database designed for mission critical safety and performance.
To enable TigerBeetle, add the following to your {file}`configuration.nix`:
```
services.tigerbeetle.enable = true;
```
When first started, the TigerBeetle service will create its data file at {file}`/var/lib/tigerbeetle` unless the file already exists, in which case it will just use the existing file.
If you make changes to the configuration of TigerBeetle after its data file was already created (for example increasing the replica count), you may need to remove the existing file to avoid conflicts.
## Configuring {#module-services-tigerbeetle-configuring}
By default, TigerBeetle will only listen on a local interface.
To configure it to listen on a different interface (and to configure it to connect to other replicas, if you're creating more than one), you'll have to set the `addresses` option.
Note that the TigerBeetle module won't open any firewall ports automatically, so if you configure it to listen on an external interface, you'll need to ensure that connections can reach it:
```
services.tigerbeetle = {
enable = true;
addresses = [ "0.0.0.0:3001" ];
};
networking.firewall.allowedTCPPorts = [ 3001 ];
```
A complete list of options for TigerBeetle can be found [here](#opt-services.tigerbeetle.enable).

View file

@ -0,0 +1,115 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.tigerbeetle;
in
{
meta = {
maintainers = with lib.maintainers; [ danielsidhion ];
doc = ./tigerbeetle.md;
buildDocsInSandbox = true;
};
options = {
services.tigerbeetle = with lib; {
enable = mkEnableOption (mdDoc "TigerBeetle server");
package = mkPackageOption pkgs "tigerbeetle" { };
clusterId = mkOption {
type = types.either types.ints.unsigned (types.strMatching "[0-9]+");
default = 0;
description = lib.mdDoc ''
The 128-bit cluster ID used to create the replica data file (if needed).
Since Nix only supports integers up to 64 bits, you need to pass a string to this if the cluster ID can't fit in 64 bits.
Otherwise, you can pass the cluster ID as either an integer or a string.
'';
};
replicaIndex = mkOption {
type = types.ints.unsigned;
default = 0;
description = lib.mdDoc ''
The index (starting at 0) of the replica in the cluster.
'';
};
replicaCount = mkOption {
type = types.ints.unsigned;
default = 1;
description = lib.mdDoc ''
The number of replicas participating in replication of the cluster.
'';
};
cacheGridSize = mkOption {
type = types.strMatching "[0-9]+(K|M|G)B";
default = "1GB";
description = lib.mdDoc ''
The grid cache size.
The grid cache acts like a page cache for TigerBeetle.
It is recommended to set this as large as possible.
'';
};
addresses = mkOption {
type = types.listOf types.nonEmptyStr;
default = [ "3001" ];
description = lib.mdDoc ''
The addresses of all replicas in the cluster.
This should be a list of IPv4/IPv6 addresses with port numbers.
Either the address or port number (but not both) may be omitted, in which case a default of 127.0.0.1 or 3001 will be used.
The first address in the list corresponds to the address for replica 0, the second address for replica 1, and so on.
'';
};
};
};
config = lib.mkIf cfg.enable {
assertions =
let
numAddresses = builtins.length cfg.addresses;
in
[
{
assertion = cfg.replicaIndex < cfg.replicaCount;
message = "the TigerBeetle replica index must fit the configured replica count";
}
{
assertion = cfg.replicaCount == numAddresses;
message = if cfg.replicaCount < numAddresses then "TigerBeetle must not have more addresses than the configured number of replicas" else "TigerBeetle must be configured with the addresses of all replicas";
}
];
systemd.services.tigerbeetle =
let
replicaDataPath = "/var/lib/tigerbeetle/${builtins.toString cfg.clusterId}_${builtins.toString cfg.replicaIndex}.tigerbeetle";
in
{
description = "TigerBeetle server";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
preStart = ''
if ! test -e "${replicaDataPath}"; then
${lib.getExe cfg.package} format --cluster="${builtins.toString cfg.clusterId}" --replica="${builtins.toString cfg.replicaIndex}" --replica-count="${builtins.toString cfg.replicaCount}" "${replicaDataPath}"
fi
'';
serviceConfig = {
Type = "exec";
DynamicUser = true;
ProtectHome = true;
DevicePolicy = "closed";
StateDirectory = "tigerbeetle";
StateDirectoryMode = 700;
ExecStart = "${lib.getExe cfg.package} start --cache-grid=${cfg.cacheGridSize} --addresses=${lib.escapeShellArg (builtins.concatStringsSep "," cfg.addresses)} ${replicaDataPath}";
};
};
environment.systemPackages = [ cfg.package ];
};
}

View file

@ -200,6 +200,7 @@ in
}; };
systemd.services.geoclue = { systemd.services.geoclue = {
wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
after = lib.optionals cfg.enableWifi [ "network-online.target" ]; after = lib.optionals cfg.enableWifi [ "network-online.target" ];
# restart geoclue service when the configuration changes # restart geoclue service when the configuration changes
restartTriggers = [ restartTriggers = [
@ -217,6 +218,7 @@ in
# we can't be part of a system service, and the agent should # we can't be part of a system service, and the agent should
# be okay with the main service coming and going # be okay with the main service coming and going
wantedBy = [ "default.target" ]; wantedBy = [ "default.target" ];
wants = lib.optionals cfg.enableWifi [ "network-online.target" ];
after = lib.optionals cfg.enableWifi [ "network-online.target" ]; after = lib.optionals cfg.enableWifi [ "network-online.target" ];
unitConfig.ConditionUser = "!@system"; unitConfig.ConditionUser = "!@system";
serviceConfig = { serviceConfig = {

View file

@ -15,25 +15,6 @@ let
fi fi
''; '';
desktopApplicationFile = pkgs.writeTextFile {
name = "emacsclient.desktop";
destination = "/share/applications/emacsclient.desktop";
text = ''
[Desktop Entry]
Name=Emacsclient
GenericName=Text Editor
Comment=Edit text
MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
Exec=emacseditor %F
Icon=emacs
Type=Application
Terminal=false
Categories=Development;TextEditor;
StartupWMClass=Emacs
Keywords=Text;Editor;
'';
};
in in
{ {
@ -102,7 +83,7 @@ in
wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ]; wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
}; };
environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ]; environment.systemPackages = [ cfg.package editorScript ];
environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "emacseditor"); environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "emacseditor");
}; };

View file

@ -1,13 +1,11 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
cfg = config.services.archisteamfarm; cfg = config.services.archisteamfarm;
format = pkgs.formats.json { }; format = pkgs.formats.json { };
asf-config = format.generate "ASF.json" (cfg.settings // { configFile = format.generate "ASF.json" (cfg.settings // {
# we disable it because ASF cannot update itself anyways # we disable it because ASF cannot update itself anyways
# and nixos takes care of restarting the service # and nixos takes care of restarting the service
# is in theory not needed as this is already the default for default builds # is in theory not needed as this is already the default for default builds
@ -30,8 +28,8 @@ let
in in
{ {
options.services.archisteamfarm = { options.services.archisteamfarm = {
enable = mkOption { enable = lib.mkOption {
type = types.bool; type = lib.types.bool;
description = lib.mdDoc '' description = lib.mdDoc ''
If enabled, starts the ArchisSteamFarm service. If enabled, starts the ArchisSteamFarm service.
For configuring the SteamGuard token you will need to use the web-ui, which is enabled by default over on 127.0.0.1:1242. For configuring the SteamGuard token you will need to use the web-ui, which is enabled by default over on 127.0.0.1:1242.
@ -40,14 +38,14 @@ in
default = false; default = false;
}; };
web-ui = mkOption { web-ui = lib.mkOption {
type = types.submodule { type = lib.types.submodule {
options = { options = {
enable = mkEnableOption "" // { enable = lib.mkEnableOption "" // {
description = lib.mdDoc "Whether to start the web-ui. This is the preferred way of configuring things such as the steam guard token."; description = lib.mdDoc "Whether to start the web-ui. This is the preferred way of configuring things such as the steam guard token.";
}; };
package = mkPackageOption pkgs [ "ArchiSteamFarm" "ui" ] { package = lib.mkPackageOption pkgs [ "ArchiSteamFarm" "ui" ] {
extraDescription = '' extraDescription = ''
::: {.note} ::: {.note}
Contents must be in lib/dist Contents must be in lib/dist
@ -65,7 +63,7 @@ in
description = lib.mdDoc "The Web-UI hosted on 127.0.0.1:1242."; description = lib.mdDoc "The Web-UI hosted on 127.0.0.1:1242.";
}; };
package = mkPackageOption pkgs "ArchiSteamFarm" { package = lib.mkPackageOption pkgs "ArchiSteamFarm" {
extraDescription = '' extraDescription = ''
::: {.warning} ::: {.warning}
Should always be the latest version, for security reasons, Should always be the latest version, for security reasons,
@ -74,15 +72,15 @@ in
''; '';
}; };
dataDir = mkOption { dataDir = lib.mkOption {
type = types.path; type = lib.types.path;
default = "/var/lib/asf"; default = "/var/lib/archisteamfarm";
description = lib.mdDoc '' description = lib.mdDoc ''
The ASF home directory used to store all data. The ASF home directory used to store all data.
If left as the default value this directory will automatically be created before the ASF server starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.''; If left as the default value this directory will automatically be created before the ASF server starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.'';
}; };
settings = mkOption { settings = lib.mkOption {
type = format.type; type = format.type;
description = lib.mdDoc '' description = lib.mdDoc ''
The ASF.json file, all the options are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#global-config). The ASF.json file, all the options are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#global-config).
@ -96,13 +94,13 @@ in
default = { }; default = { };
}; };
ipcPasswordFile = mkOption { ipcPasswordFile = lib.mkOption {
type = types.nullOr types.path; type = with lib.types; nullOr path;
default = null; default = null;
description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group."; description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.";
}; };
ipcSettings = mkOption { ipcSettings = lib.mkOption {
type = format.type; type = format.type;
description = lib.mdDoc '' description = lib.mdDoc ''
Settings to write to IPC.config. Settings to write to IPC.config.
@ -120,25 +118,25 @@ in
default = { }; default = { };
}; };
bots = mkOption { bots = lib.mkOption {
type = types.attrsOf (types.submodule { type = lib.types.attrsOf (lib.types.submodule {
options = { options = {
username = mkOption { username = lib.mkOption {
type = types.str; type = lib.types.str;
description = lib.mdDoc "Name of the user to log in. Default is attribute name."; description = lib.mdDoc "Name of the user to log in. Default is attribute name.";
default = ""; default = "";
}; };
passwordFile = mkOption { passwordFile = lib.mkOption {
type = types.path; type = lib.types.path;
description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group."; description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `archisteamfarm` user/group.";
}; };
enabled = mkOption { enabled = lib.mkOption {
type = types.bool; type = lib.types.bool;
default = true; default = true;
description = lib.mdDoc "Whether to enable the bot on startup."; description = lib.mdDoc "Whether to enable the bot on startup.";
}; };
settings = mkOption { settings = lib.mkOption {
type = types.attrs; type = lib.types.attrs;
description = lib.mdDoc '' description = lib.mdDoc ''
Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config). Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config).
''; '';
@ -152,7 +150,7 @@ in
example = { example = {
exampleBot = { exampleBot = {
username = "alice"; username = "alice";
passwordFile = "/var/lib/asf/secrets/password"; passwordFile = "/var/lib/archisteamfarm/secrets/password";
settings = { SteamParentalCode = "1234"; }; settings = { SteamParentalCode = "1234"; };
}; };
}; };
@ -160,32 +158,34 @@ in
}; };
}; };
config = mkIf cfg.enable { config = lib.mkIf cfg.enable {
# TODO: drop with 24.11
services.archisteamfarm.dataDir = lib.mkIf (lib.versionAtLeast config.system.stateVersion "24.05") (lib.mkDefault "/var/lib/asf");
users = { users = {
users.asf = { users.archisteamfarm = {
home = cfg.dataDir; home = cfg.dataDir;
isSystemUser = true; isSystemUser = true;
group = "asf"; group = "archisteamfarm";
description = "Archis-Steam-Farm service user"; description = "Archis-Steam-Farm service user";
}; };
groups.asf = { }; groups.archisteamfarm = { };
}; };
systemd.services = { systemd.services = {
asf = { archisteamfarm = {
description = "Archis-Steam-Farm Service"; description = "Archis-Steam-Farm Service";
after = [ "network.target" ]; after = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig = mkMerge [ serviceConfig = lib.mkMerge [
(mkIf (cfg.dataDir == "/var/lib/asf") { (lib.mkIf (lib.hasPrefix "/var/lib/" cfg.dataDir) {
StateDirectory = "asf"; StateDirectory = lib.last (lib.splitString "/" cfg.dataDir);
StateDirectoryMode = "700"; StateDirectoryMode = "700";
}) })
{ {
User = "asf"; User = "archisteamfarm";
Group = "asf"; Group = "archisteamfarm";
WorkingDirectory = cfg.dataDir; WorkingDirectory = cfg.dataDir;
Type = "simple"; Type = "simple";
ExecStart = "${lib.getExe cfg.package} --no-restart --process-required --service --system-required --path ${cfg.dataDir}"; ExecStart = "${lib.getExe cfg.package} --no-restart --process-required --service --system-required --path ${cfg.dataDir}";
@ -217,12 +217,10 @@ in
RestrictNamespaces = true; RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
UMask = "0077";
# we luckily already have systemd v247+
SecureBits = "noroot-locked"; SecureBits = "noroot-locked";
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ]; SystemCallFilter = [ "@system-service" "~@privileged" ];
UMask = "0077";
} }
]; ];
@ -232,7 +230,7 @@ in
mkdir -p $out mkdir -p $out
# clean potential removed bots # clean potential removed bots
rm -rf $out/*.json rm -rf $out/*.json
for i in ${strings.concatStringsSep " " (lists.map (x: "${getName x},${x}") (attrsets.mapAttrsToList mkBot cfg.bots))}; do IFS=","; for i in ${lib.concatStringsSep " " (map (x: "${lib.getName x},${x}") (lib.mapAttrsToList mkBot cfg.bots))}; do IFS=",";
set -- $i set -- $i
ln -fs $2 $out/$1 ln -fs $2 $out/$1
done done
@ -242,22 +240,22 @@ in
'' ''
mkdir -p config mkdir -p config
cp --no-preserve=mode ${asf-config} config/ASF.json cp --no-preserve=mode ${configFile} config/ASF.json
${optionalString (cfg.ipcPasswordFile != null) '' ${lib.optionalString (cfg.ipcPasswordFile != null) ''
${replaceSecretBin} '#ipcPassword#' '${cfg.ipcPasswordFile}' config/ASF.json ${replaceSecretBin} '#ipcPassword#' '${cfg.ipcPasswordFile}' config/ASF.json
''} ''}
${optionalString (cfg.ipcSettings != {}) '' ${lib.optionalString (cfg.ipcSettings != {}) ''
ln -fs ${ipc-config} config/IPC.config ln -fs ${ipc-config} config/IPC.config
''} ''}
${optionalString (cfg.ipcSettings != {}) '' ${lib.optionalString (cfg.ipcSettings != {}) ''
ln -fs ${createBotsScript}/* config/ ln -fs ${createBotsScript}/* config/
''} ''}
rm -f www rm -f www
${optionalString cfg.web-ui.enable '' ${lib.optionalString cfg.web-ui.enable ''
ln -s ${cfg.web-ui.package}/ www ln -s ${cfg.web-ui.package}/ www
''} ''}
''; '';
@ -267,6 +265,6 @@ in
meta = { meta = {
buildDocsInSandbox = false; buildDocsInSandbox = false;
maintainers = with maintainers; [ lom SuperSandro2000 ]; maintainers = with lib.maintainers; [ lom SuperSandro2000 ];
}; };
} }

View file

@ -135,6 +135,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig = { serviceConfig = {
PrivateNetwork = true;
ExecStart = escapeShellArgs ExecStart = escapeShellArgs
([ "${pkgs.acpid}/bin/acpid" ([ "${pkgs.acpid}/bin/acpid"
"--foreground" "--foreground"

View file

@ -16,6 +16,7 @@ let
"fwupd/fwupd.conf" = { "fwupd/fwupd.conf" = {
source = format.generate "fwupd.conf" { source = format.generate "fwupd.conf" {
fwupd = cfg.daemonSettings; fwupd = cfg.daemonSettings;
} // lib.optionalAttrs (lib.length (lib.attrNames cfg.uefiCapsuleSettings) != 0) {
uefi_capsule = cfg.uefiCapsuleSettings; uefi_capsule = cfg.uefiCapsuleSettings;
}; };
# fwupd tries to chmod the file if it doesn't have the right permissions # fwupd tries to chmod the file if it doesn't have the right permissions

View file

@ -16,9 +16,6 @@ let
in in
{ {
###### interface
options.services.pcscd = { options.services.pcscd = {
enable = mkEnableOption (lib.mdDoc "PCSC-Lite daemon"); enable = mkEnableOption (lib.mdDoc "PCSC-Lite daemon");
@ -46,13 +43,10 @@ in
}; };
}; };
###### implementation
config = mkIf config.services.pcscd.enable { config = mkIf config.services.pcscd.enable {
environment.etc."reader.conf".source = cfgFile; environment.etc."reader.conf".source = cfgFile;
environment.systemPackages = [ package ]; environment.systemPackages = [ package.out ];
systemd.packages = [ (getBin package) ]; systemd.packages = [ (getBin package) ];
services.pcscd.plugins = [ pkgs.ccid ]; services.pcscd.plugins = [ pkgs.ccid ];
@ -61,7 +55,6 @@ in
systemd.services.pcscd = { systemd.services.pcscd = {
environment.PCSCLITE_HP_DROPDIR = pluginEnv; environment.PCSCLITE_HP_DROPDIR = pluginEnv;
restartTriggers = [ "/etc/reader.conf" ];
# If the cfgFile is empty and not specified (in which case the default # If the cfgFile is empty and not specified (in which case the default
# /etc/reader.conf is assumed), pcscd will happily start going through the # /etc/reader.conf is assumed), pcscd will happily start going through the

View file

@ -41,6 +41,7 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
systemd.services.evcc = { systemd.services.evcc = {
wants = [ "network-online.target" ];
after = [ after = [
"network-online.target" "network-online.target"
"mosquitto.target" "mosquitto.target"

View file

@ -435,6 +435,7 @@ in {
systemd.services.home-assistant = { systemd.services.home-assistant = {
description = "Home Assistant"; description = "Home Assistant";
wants = [ "network-online.target" ];
after = [ after = [
"network-online.target" "network-online.target"

View file

@ -84,6 +84,7 @@ in {
systemd.services.journaldriver = { systemd.services.journaldriver = {
description = "Stackdriver Logging journal forwarder"; description = "Stackdriver Logging journal forwarder";
script = "${pkgs.journaldriver}/bin/journaldriver"; script = "${pkgs.journaldriver}/bin/journaldriver";
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View file

@ -201,13 +201,12 @@ in {
DynamicUser = true; DynamicUser = true;
NoNewPrivileges = true; NoNewPrivileges = true;
CapabilityBoundingSet = ""; CapabilityBoundingSet = "";
SystemCallArchitecture = "native"; SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ]; SystemCallFilter = [ "@system-service" "~@privileged" ];
ProtectDevices = true; PrivateDevices = true;
ProtectControlGroups = true; ProtectControlGroups = true;
ProtectKernelTunables = true; ProtectKernelTunables = true;
ProtectHome = true; ProtectHome = true;
DeviceAllow = false;
RestrictNamespaces = true; RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
UMask = "0027"; UMask = "0027";

View file

@ -120,7 +120,7 @@ with lib;
}; };
maxpause = mkOption { maxpause = mkOption {
type = types.nullOr types.str; type = with types; nullOr (oneOf [ str int ]);
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
The maximum time to pause between successive queue runs, in seconds. The maximum time to pause between successive queue runs, in seconds.
@ -138,7 +138,7 @@ with lib;
}; };
pausetime = mkOption { pausetime = mkOption {
type = types.nullOr types.str; type = with types; nullOr (oneOf [ str int ]);
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
The minimum time to pause between successive queue runs when there The minimum time to pause between successive queue runs when there
@ -168,7 +168,7 @@ with lib;
}; };
sendtimeout = mkOption { sendtimeout = mkOption {
type = types.nullOr types.str; type = with types; nullOr (oneOf [ str int ]);
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
The time to wait for a remote module listed above to complete sending The time to wait for a remote module listed above to complete sending
@ -194,7 +194,7 @@ with lib;
environment = { environment = {
systemPackages = [ pkgs.nullmailer ]; systemPackages = [ pkgs.nullmailer ];
etc = let etc = let
validAttrs = filterAttrs (name: value: value != null) cfg.config; validAttrs = lib.mapAttrs (_: toString) (filterAttrs (_: value: value != null) cfg.config);
in in
(foldl' (as: name: as // { "nullmailer/${name}".text = validAttrs.${name}; }) {} (attrNames validAttrs)) (foldl' (as: name: as // { "nullmailer/${name}".text = validAttrs.${name}; }) {} (attrNames validAttrs))
// optionalAttrs (cfg.remotesFile != null) { "nullmailer/remotes".source = cfg.remotesFile; }; // optionalAttrs (cfg.remotesFile != null) { "nullmailer/remotes".source = cfg.remotesFile; };

View file

@ -102,6 +102,12 @@ in
apply = configuredMaxAttachmentSize: "${toString (configuredMaxAttachmentSize * 1.3)}M"; apply = configuredMaxAttachmentSize: "${toString (configuredMaxAttachmentSize * 1.3)}M";
}; };
configureNginx = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc "Configure nginx as a reverse proxy for roundcube.";
};
extraConfig = mkOption { extraConfig = mkOption {
type = types.lines; type = types.lines;
default = ""; default = "";
@ -142,26 +148,39 @@ in
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
services.nginx = { services.nginx = lib.mkIf cfg.configureNginx {
enable = true; enable = true;
virtualHosts = { virtualHosts = {
${cfg.hostName} = { ${cfg.hostName} = {
forceSSL = mkDefault true; forceSSL = mkDefault true;
enableACME = mkDefault true; enableACME = mkDefault true;
root = cfg.package;
locations."/" = { locations."/" = {
root = cfg.package;
index = "index.php"; index = "index.php";
priority = 1100;
extraConfig = '' extraConfig = ''
location ~* \.php(/|$) { add_header Cache-Control 'public, max-age=604800, must-revalidate';
fastcgi_split_path_info ^(.+\.php)(/.+)$; '';
fastcgi_pass unix:${fpm.socket}; };
locations."~ ^/(SQL|bin|config|logs|temp|vendor)/" = {
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; priority = 3110;
fastcgi_param PATH_INFO $fastcgi_path_info; extraConfig = ''
return 404;
include ${config.services.nginx.package}/conf/fastcgi_params; '';
include ${pkgs.nginx}/conf/fastcgi.conf; };
} locations."~ ^/(CHANGELOG.md|INSTALL|LICENSE|README.md|SECURITY.md|UPGRADING|composer.json|composer.lock)" = {
priority = 3120;
extraConfig = ''
return 404;
'';
};
locations."~* \\.php(/|$)" = {
priority = 3130;
extraConfig = ''
fastcgi_pass unix:${fpm.socket};
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
include ${config.services.nginx.package}/conf/fastcgi.conf;
''; '';
}; };
}; };
@ -231,6 +250,7 @@ in
path = [ config.services.postgresql.package ]; path = [ config.services.postgresql.package ];
}) })
{ {
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
script = let script = let

View file

@ -435,7 +435,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wants = sympaSubServices; wants = sympaSubServices ++ [ "network-online.target" ];
before = sympaSubServices; before = sympaSubServices;
serviceConfig = sympaServiceConfig "sympa_msg"; serviceConfig = sympaServiceConfig "sympa_msg";

View file

@ -1056,6 +1056,7 @@ in {
systemd.targets.matrix-synapse = lib.mkIf hasWorkers { systemd.targets.matrix-synapse = lib.mkIf hasWorkers {
description = "Synapse Matrix parent target"; description = "Synapse Matrix parent target";
wants = [ "network-online.target" ];
after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service"; after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };
@ -1071,6 +1072,7 @@ in {
requires = optional hasLocalPostgresDB "postgresql.service"; requires = optional hasLocalPostgresDB "postgresql.service";
} }
else { else {
wants = [ "network-online.target" ];
after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service"; after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
requires = optional hasLocalPostgresDB "postgresql.service"; requires = optional hasLocalPostgresDB "postgresql.service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View file

@ -41,6 +41,7 @@ in {
# See https://github.com/aws/amazon-ssm-agent/blob/mainline/packaging/linux/amazon-ssm-agent.service # See https://github.com/aws/amazon-ssm-agent/blob/mainline/packaging/linux/amazon-ssm-agent.service
systemd.services.amazon-ssm-agent = { systemd.services.amazon-ssm-agent = {
inherit (cfg.package.meta) description; inherit (cfg.package.meta) description;
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View file

@ -154,7 +154,7 @@ in
in { in {
description = "BigClown Gateway"; description = "BigClown Gateway";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = mkIf config.services.mosquitto.enable [ "mosquitto.service" ]; wants = [ "network-online.target" ] ++ lib.optional config.services.mosquitto.enable "mosquitto.service";
after = [ "network-online.target" ]; after = [ "network-online.target" ];
preStart = '' preStart = ''
umask 077 umask 077

View file

@ -35,6 +35,7 @@ in {
systemd.services."domoticz" = { systemd.services."domoticz" = {
description = pkgDesc; description = pkgDesc;
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
serviceConfig = { serviceConfig = {
DynamicUser = true; DynamicUser = true;

View file

@ -59,6 +59,7 @@ in
systemd.services.etesync-dav = { systemd.services.etesync-dav = {
description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync"; description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync";
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ pkgs.etesync-dav ]; path = [ pkgs.etesync-dav ];

View file

@ -357,6 +357,7 @@ in {
description = "${cfg.serverName} media Server"; description = "${cfg.serverName} media Server";
# Gerbera might fail if the network interface is not available on startup # Gerbera might fail if the network interface is not available on startup
# https://github.com/gerbera/gerbera/issues/1324 # https://github.com/gerbera/gerbera/issues/1324
wants = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ]; after = [ "network.target" "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig.ExecStart = "${binaryCommand} --port ${toString cfg.port} ${interfaceFlag} ${configFlag} --home ${cfg.dataDir}"; serviceConfig.ExecStart = "${binaryCommand} --port ${toString cfg.port} ${interfaceFlag} ${configFlag} --home ${cfg.dataDir}";

View file

@ -77,6 +77,7 @@ in {
systemd.services.metabase = { systemd.services.metabase = {
description = "Metabase server"; description = "Metabase server";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
environment = { environment = {
MB_PLUGINS_DIR = "${dataDir}/plugins"; MB_PLUGINS_DIR = "${dataDir}/plugins";

View file

@ -103,7 +103,7 @@ in {
config = mkIf cfg.enable { config = mkIf cfg.enable {
warnings = [] warnings = []
++ optional (cfg.settings ? update_manager) ++ optional (cfg.settings.update_manager.enable_system_updates or false)
''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.'' ''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.''
++ optional (cfg.configDir != null) ++ optional (cfg.configDir != null)
'' ''

View file

@ -1,7 +1,5 @@
{ config, lib, ... }: { config, lib, ... }:
with lib;
let let
cfg = config.nix.gc; cfg = config.nix.gc;
in in
@ -14,14 +12,14 @@ in
nix.gc = { nix.gc = {
automatic = mkOption { automatic = lib.mkOption {
default = false; default = false;
type = types.bool; type = lib.types.bool;
description = lib.mdDoc "Automatically run the garbage collector at a specific time."; description = lib.mdDoc "Automatically run the garbage collector at a specific time.";
}; };
dates = mkOption { dates = lib.mkOption {
type = types.str; type = lib.types.singleLineStr;
default = "03:15"; default = "03:15";
example = "weekly"; example = "weekly";
description = lib.mdDoc '' description = lib.mdDoc ''
@ -33,9 +31,9 @@ in
''; '';
}; };
randomizedDelaySec = mkOption { randomizedDelaySec = lib.mkOption {
default = "0"; default = "0";
type = types.str; type = lib.types.singleLineStr;
example = "45min"; example = "45min";
description = lib.mdDoc '' description = lib.mdDoc ''
Add a randomized delay before each garbage collection. Add a randomized delay before each garbage collection.
@ -45,9 +43,9 @@ in
''; '';
}; };
persistent = mkOption { persistent = lib.mkOption {
default = true; default = true;
type = types.bool; type = lib.types.bool;
example = false; example = false;
description = lib.mdDoc '' description = lib.mdDoc ''
Takes a boolean argument. If true, the time when the service Takes a boolean argument. If true, the time when the service
@ -61,10 +59,10 @@ in
''; '';
}; };
options = mkOption { options = lib.mkOption {
default = ""; default = "";
example = "--max-freed $((64 * 1024**3))"; example = "--max-freed $((64 * 1024**3))";
type = types.str; type = lib.types.singleLineStr;
description = lib.mdDoc '' description = lib.mdDoc ''
Options given to {file}`nix-collect-garbage` when the Options given to {file}`nix-collect-garbage` when the
garbage collector is run automatically. garbage collector is run automatically.
@ -89,7 +87,8 @@ in
systemd.services.nix-gc = lib.mkIf config.nix.enable { systemd.services.nix-gc = lib.mkIf config.nix.enable {
description = "Nix Garbage Collector"; description = "Nix Garbage Collector";
script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}"; script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
startAt = optional cfg.automatic cfg.dates; serviceConfig.Type = "oneshot";
startAt = lib.optional cfg.automatic cfg.dates;
}; };
systemd.timers.nix-gc = lib.mkIf cfg.automatic { systemd.timers.nix-gc = lib.mkIf cfg.automatic {

View file

@ -1,4 +1,4 @@
{ config, lib, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
let cfg = config.nix.sshServe; let cfg = config.nix.sshServe;
@ -46,7 +46,7 @@ in {
description = "Nix SSH store user"; description = "Nix SSH store user";
isSystemUser = true; isSystemUser = true;
group = "nix-ssh"; group = "nix-ssh";
useDefaultShell = true; shell = pkgs.bashInteractive;
}; };
users.groups.nix-ssh = {}; users.groups.nix-ssh = {};

View file

@ -9,6 +9,13 @@ in {
enable = lib.mkEnableOption ( enable = lib.mkEnableOption (
lib.mdDoc "Server for local large language models" lib.mdDoc "Server for local large language models"
); );
listenAddress = lib.mkOption {
type = lib.types.str;
default = "127.0.0.1:11434";
description = lib.mdDoc ''
Specifies the bind address on which the ollama server HTTP interface listens.
'';
};
package = lib.mkPackageOption pkgs "ollama" { }; package = lib.mkPackageOption pkgs "ollama" { };
}; };
}; };
@ -23,6 +30,7 @@ in {
environment = { environment = {
HOME = "%S/ollama"; HOME = "%S/ollama";
OLLAMA_MODELS = "%S/ollama/models"; OLLAMA_MODELS = "%S/ollama/models";
OLLAMA_HOST = cfg.listenAddress;
}; };
serviceConfig = { serviceConfig = {
ExecStart = "${lib.getExe cfg.package} serve"; ExecStart = "${lib.getExe cfg.package} serve";

View file

@ -297,6 +297,7 @@ in
wantedBy = [ "paperless-scheduler.service" ]; wantedBy = [ "paperless-scheduler.service" ];
before = [ "paperless-scheduler.service" ]; before = [ "paperless-scheduler.service" ];
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = defaultServiceConfig // { serviceConfig = defaultServiceConfig // {
User = cfg.user; User = cfg.user;
Type = "oneshot"; Type = "oneshot";

Some files were not shown because too many files have changed in this diff Show more