Project import generated by Copybara.

GitOrigin-RevId: f99e5f03cc0aa231ab5950a15ed02afec45ed51a
This commit is contained in:
Default email 2023-10-09 21:29:22 +02:00
parent dbb65cc740
commit 5e7c2d6cef
7521 changed files with 253268 additions and 349735 deletions

View file

@ -23,23 +23,25 @@
# Libraries
/lib @edolstra @infinisil
/lib/systems @alyssais @ericson2314 @matthewbauer @amjoseph-nixpkgs
/lib/systems @alyssais @ericson2314 @amjoseph-nixpkgs
/lib/generators.nix @edolstra @Profpatsch
/lib/cli.nix @edolstra @Profpatsch
/lib/debug.nix @edolstra @Profpatsch
/lib/asserts.nix @edolstra @Profpatsch
/lib/path.* @infinisil @fricklerhandwerk
/lib/fileset @infinisil
/doc/functions/fileset.section.md @infinisil
# Nixpkgs Internals
/default.nix @Ericson2314
/pkgs/top-level/default.nix @Ericson2314
/pkgs/top-level/impure.nix @Ericson2314
/pkgs/top-level/stage.nix @Ericson2314 @matthewbauer
/pkgs/top-level/splice.nix @Ericson2314 @matthewbauer
/pkgs/top-level/release-cross.nix @Ericson2314 @matthewbauer
/pkgs/stdenv/generic @Ericson2314 @matthewbauer @amjoseph-nixpkgs
/pkgs/stdenv/generic/check-meta.nix @Ericson2314 @matthewbauer @piegamesde
/pkgs/stdenv/cross @Ericson2314 @matthewbauer @amjoseph-nixpkgs
/pkgs/top-level/stage.nix @Ericson2314
/pkgs/top-level/splice.nix @Ericson2314
/pkgs/top-level/release-cross.nix @Ericson2314
/pkgs/stdenv/generic @Ericson2314 @amjoseph-nixpkgs
/pkgs/stdenv/generic/check-meta.nix @Ericson2314 @piegamesde
/pkgs/stdenv/cross @Ericson2314 @amjoseph-nixpkgs
/pkgs/build-support/cc-wrapper @Ericson2314 @amjoseph-nixpkgs
/pkgs/build-support/bintools-wrapper @Ericson2314
/pkgs/build-support/setup-hooks @Ericson2314
@ -47,6 +49,12 @@
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/pkgs-lib @infinisil
# pkgs/by-name
/pkgs/test/nixpkgs-check-by-name @infinisil
/pkgs/by-name/README.md @infinisil
/pkgs/top-level/by-name-overlay.nix @infinisil
/.github/workflows/check-by-name.nix @infinisil
# Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch
@ -93,6 +101,10 @@
/nixos/lib/systemd-*.nix @NixOS/systemd
/pkgs/os-specific/linux/systemd @NixOS/systemd
# Images and installer media
/nixos/modules/installer/cd-dvd/ @samueldr
/nixos/modules/installer/sd-card/ @samueldr
# Updaters
## update.nix
/maintainers/scripts/update.nix @jtojnar
@ -135,12 +147,8 @@
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda
# C compilers
/pkgs/development/compilers/gcc @matthewbauer @amjoseph-nixpkgs
/pkgs/development/compilers/llvm @matthewbauer @RaitoBezarius
# Compatibility stuff
/pkgs/top-level/unix-tools.nix @matthewbauer
/pkgs/development/tools/xcbuild @matthewbauer
/pkgs/development/compilers/gcc @amjoseph-nixpkgs
/pkgs/development/compilers/llvm @RaitoBezarius
# Audio
/nixos/modules/services/audio/botamusique.nix @mweinelt
@ -245,7 +253,8 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# PHP interpreter, packages, extensions, tests and documentation
/doc/languages-frameworks/php.section.md @aanderse @drupol @etu @globin @ma27 @talyz
/nixos/tests/php @aanderse @drupol @etu @globin @ma27 @talyz
/pkgs/build-support/build-pecl.nix @aanderse @drupol @etu @globin @ma27 @talyz
/pkgs/build-support/php/build-pecl.nix @aanderse @drupol @etu @globin @ma27 @talyz
/pkgs/build-support/php @drupol @etu
/pkgs/development/interpreters/php @jtojnar @aanderse @drupol @etu @globin @ma27 @talyz
/pkgs/development/php-packages @aanderse @drupol @etu @globin @ma27 @talyz
/pkgs/top-level/php-packages.nix @jtojnar @aanderse @drupol @etu @globin @ma27 @talyz
@ -293,6 +302,10 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/nixos/modules/services/misc/matrix-conduit.nix @piegamesde
/nixos/tests/matrix-conduit.nix @piegamesde
# Forgejo
nixos/modules/services/misc/forgejo.nix @bendlas @emilylange
pkgs/applications/version-management/forgejo @bendlas @emilylange
# Dotnet
/pkgs/build-support/dotnet @IvarWithoutBones
/pkgs/development/compilers/dotnet @IvarWithoutBones

View file

@ -20,7 +20,7 @@ jobs:
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs
@ -31,5 +31,5 @@ jobs:
pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
* [ ] Before merging, ensure that this backport complies with the [Criteria for Backporting](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#criteria-for-backporting-changes).
* Even as a non-commiter, if you find that it does not comply, leave a comment.
* [ ] Before merging, ensure that this backport is [acceptable for the release](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#changes-acceptable-for-releases).
* Even as a non-commiter, if you find that it is not acceptable, leave a comment.

View file

@ -18,8 +18,8 @@ jobs:
runs-on: ubuntu-latest
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v23
- uses: cachix/cachix-action@v12
with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.

View file

@ -0,0 +1,151 @@
# Checks pkgs/by-name (see pkgs/by-name/README.md)
# using the nixpkgs-check-by-name tool (see pkgs/test/nixpkgs-check-by-name)
name: Check pkgs/by-name
# The pre-built tool is fetched from a channel,
# making it work predictable on all PRs.
on:
# Using pull_request_target instead of pull_request avoids having to approve first time contributors
pull_request_target
# The tool doesn't need any permissions, it only outputs success or not based on the checkout
permissions: {}
jobs:
check:
# This is x86_64-linux, for which the tool is always prebuilt on the nixos-* channels,
# as specified in nixos/release-combined.nix
runs-on: ubuntu-latest
steps:
- name: Resolving the merge commit
run: |
if result=$(git ls-remote --exit-code ${{ github.event.pull_request.base.repo.clone_url }} refs/pull/${{ github.event.pull_request.number }}/merge); then
mergedSha=$(cut -f1 <<< "$result")
echo "The PR appears to not have any conflicts, checking the merge commit $mergedSha"
else
echo "The PR may have a merge conflict"
exit 1
fi
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"
- uses: actions/checkout@v4
with:
# pull_request_target checks out the base branch by default
ref: ${{ env.mergedSha }}
# Fetches the merge commit and its parents
fetch-depth: 2
- name: Determining PR git hashes
run: |
# For pull_request_target this is the same as $GITHUB_SHA
echo "baseSha=$(git rev-parse HEAD^1)" >> "$GITHUB_ENV"
echo "headSha=$(git rev-parse HEAD^2)" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@v23
- name: Determining channel to use for dependencies
run: |
echo "Determining which channel to use for PR base branch $GITHUB_BASE_REF"
if [[ "$GITHUB_BASE_REF" =~ ^(release|staging|staging-next)-([0-9][0-9]\.[0-9][0-9])$ ]]; then
# Use the release channel for all PRs to release-XX.YY, staging-XX.YY and staging-next-XX.YY
channel=nixos-${BASH_REMATCH[2]}
echo "PR is for a release branch, using release channel $channel"
else
# Use the nixos-unstable channel for all other PRs
channel=nixos-unstable
echo "PR is for a non-release branch, using unstable channel $channel"
fi
echo "channel=$channel" >> "$GITHUB_ENV"
- name: Fetching latest version of channel
run: |
echo "Fetching latest version of channel $channel"
# This is probably the easiest way to get Nix to output the path to a downloaded channel!
nixpkgs=$(nix-instantiate --find-file nixpkgs -I nixpkgs=channel:"$channel")
# This file only exists in channels
rev=$(<"$nixpkgs"/.git-revision)
echo "Channel $channel is at revision $rev"
echo "nixpkgs=$nixpkgs" >> "$GITHUB_ENV"
echo "rev=$rev" >> "$GITHUB_ENV"
- name: Fetching pre-built nixpkgs-check-by-name from the channel
run: |
echo "Fetching pre-built nixpkgs-check-by-name from channel $channel at revision $rev"
# Passing --max-jobs 0 makes sure that we won't build anything
nix-build "$nixpkgs" -A tests.nixpkgs-check-by-name --max-jobs 0
- name: Running nixpkgs-check-by-name
run: |
echo "Checking whether the check succeeds on the base branch $GITHUB_BASE_REF"
git checkout -q "$baseSha"
if baseOutput=$(result/bin/nixpkgs-check-by-name . 2>&1); then
baseSuccess=1
else
baseSuccess=
fi
printf "%s\n" "$baseOutput"
echo "Checking whether the check would succeed after merging this pull request"
git checkout -q "$mergedSha"
if mergedOutput=$(result/bin/nixpkgs-check-by-name . 2>&1); then
mergedSuccess=1
exitCode=0
else
mergedSuccess=
exitCode=1
fi
printf "%s\n" "$mergedOutput"
resultToEmoji() {
if [[ -n "$1" ]]; then
echo ":heavy_check_mark:"
else
echo ":x:"
fi
}
# Print a markdown summary in GitHub actions
{
echo "| Nixpkgs version | Check result |"
echo "| --- | --- |"
echo "| Latest base commit | $(resultToEmoji "$baseSuccess") |"
echo "| After merging this PR | $(resultToEmoji "$mergedSuccess") |"
echo ""
if [[ -n "$baseSuccess" ]]; then
if [[ -n "$mergedSuccess" ]]; then
echo "The check succeeds on both the base branch and after merging this PR"
else
echo "The check succeeds on the base branch, but would fail after merging this PR:"
echo "\`\`\`"
echo "$mergedOutput"
echo "\`\`\`"
echo ""
fi
else
if [[ -n "$mergedSuccess" ]]; then
echo "The check fails on the base branch, but this PR fixes it, nicely done!"
else
echo "The check fails on both the base branch and after merging this PR, unknown if only this PRs changes would satisfy the check, the base branch needs to be fixed first."
echo ""
echo "Failure on the base branch:"
echo "\`\`\`"
echo "$baseOutput"
echo "\`\`\`"
echo ""
echo "Failure after merging this PR:"
echo "\`\`\`"
echo "$mergedOutput"
echo "\`\`\`"
echo ""
fi
fi
echo "### Details"
echo "- nixpkgs-check-by-name tool:"
echo " - Channel: $channel"
echo " - Nixpkgs commit: [$rev](https://github.com/${GITHUB_REPOSITORY}/commit/$rev)"
echo " - Store path: \`$(realpath result)\`"
echo "- Tested Nixpkgs:"
echo " - Base branch: $GITHUB_BASE_REF"
echo " - Latest base branch commit: [$baseSha](https://github.com/${GITHUB_REPOSITORY}/commit/$baseSha)"
echo " - Latest PR commit: [$headSha](https://github.com/${GITHUB_REPOSITORY}/commit/$headSha)"
echo " - Merge commit: [$mergedSha](https://github.com/${GITHUB_REPOSITORY}/commit/$mergedSha)"
} >> "$GITHUB_STEP_SUMMARY"
exit "$exitCode"

View file

@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v22
- uses: cachix/install-nix-action@v23
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -24,11 +24,11 @@ jobs:
- name: print list of changed files
run: |
cat "$HOME/changed_files"
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v22
- uses: cachix/install-nix-action@v23
with:
# nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0

View file

@ -14,11 +14,11 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v22
- uses: cachix/install-nix-action@v23
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -15,11 +15,11 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v22
- uses: cachix/install-nix-action@v23
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -40,7 +40,7 @@ jobs:
into: staging-23.05
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@1.4.0

View file

@ -38,7 +38,7 @@ jobs:
into: staging
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
uses: devmasx/merge-branch@1.4.0

View file

@ -1,8 +1,8 @@
name: "Update terraform-providers"
on:
schedule:
- cron: "0 3 * * *"
#schedule:
# - cron: "0 3 * * *"
workflow_dispatch:
permissions:
@ -16,8 +16,8 @@ jobs:
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: cachix/install-nix-action@v22
- uses: actions/checkout@v4
- uses: cachix/install-nix-action@v23
with:
nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup

View file

@ -5,6 +5,7 @@
.\#*
\#*\#
.idea/
.nixos-test-history
.vscode/
outputs/
result-*

View file

@ -129,7 +129,7 @@ When a PR is created, it will be pre-populated with some checkboxes detailed bel
#### Tested using sandboxing
When sandbox builds are enabled, Nix will setup an isolated environment for each build process. It is used to remove further hidden dependencies set by the build environment to improve reproducibility. This includes access to the network during the build outside of `fetch*` functions and files outside the Nix store. Depending on the operating system access to other resources are blocked as well (ex. inter process communication is isolated on Linux); see [sandbox](https://nixos.org/nix/manual/#conf-sandbox) in Nix manual for details.
When sandbox builds are enabled, Nix will setup an isolated environment for each build process. It is used to remove further hidden dependencies set by the build environment to improve reproducibility. This includes access to the network during the build outside of `fetch*` functions and files outside the Nix store. Depending on the operating system access to other resources are blocked as well (ex. inter process communication is isolated on Linux); see [sandbox](https://nixos.org/manual/nix/stable/command-ref/conf-file#conf-sandbox) in the Nix manual for details.
Sandboxing is not enabled by default in Nix due to a small performance hit on each build. In pull requests for [nixpkgs](https://github.com/NixOS/nixpkgs/) people are asked to test builds with sandboxing enabled (see `Tested using sandboxing` in the pull request template) because in [Hydra](https://nixos.org/hydra/) sandboxing is also used.
@ -483,17 +483,17 @@ The oldest supported release (`YYMM`) can be found using
nix-instantiate --eval -A lib.trivial.oldestSupportedRelease
```
The release branches should generally not receive any breaking changes, both for the Nix expressions and derivations.
So these changes are acceptable to backport:
- New packages, modules and functions
- Security fixes
- Package version updates
- Patch versions with fixes
- Minor versions with new functionality, but no breaking changes
The release branches should generally only receive backwards-compatible changes, both for the Nix expressions and derivations.
Here are some examples of backwards-compatible changes that are okay to backport:
- ✔️ New packages, modules and functions
- ✔️ Security fixes
- ✔️ Package version updates
- ✔️ Patch versions with fixes
- ✔️ Minor versions with new functionality, but no breaking changes
In addition, major package version updates with breaking changes are also acceptable for:
- Services that would fail without up-to-date client software, such as `spotify`, `steam`, and `discord`
- Security critical applications, such as `firefox` and `chromium`
- ✔️ Services that would fail without up-to-date client software, such as `spotify`, `steam`, and `discord`
- ✔️ Security critical applications, such as `firefox` and `chromium`
### Changes causing mass rebuilds
[mass-rebuild]: #changes-causing-mass-rebuilds
@ -535,6 +535,11 @@ To get a sense for what changes are considered mass rebuilds, see [previously me
The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
When adding yourself as maintainer, in the same pull request, make a separate
commit with the message `maintainers: add <handle>`.
Add the commit before those making changes to the package or module.
See [Nixpkgs Maintainers](../maintainers/README.md) for details.
### Writing good commit messages
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work.

View file

@ -82,6 +82,53 @@ Note that because the checksum is computed after applying these effects, using o
Most other fetchers return a directory rather than a single file.
## `fetchDebianPatch` {#fetchdebianpatch}
A wrapper around `fetchpatch`, which takes:
- `patch` and `hash`: the patch's filename,
and its hash after normalization by `fetchpatch` ;
- `pname`: the Debian source package's name ;
- `version`: the upstream version number ;
- `debianRevision`: the [Debian revision number] if applicable ;
- the `area` of the Debian archive: `main` (default), `contrib`, or `non-free`.
Here is an example of `fetchDebianPatch` in action:
```nix
{ lib
, fetchDebianPatch
, buildPythonPackage
}:
buildPythonPackage rec {
pname = "pysimplesoap";
version = "1.16.2";
src = ...;
patches = [
(fetchDebianPatch {
inherit pname version;
debianRevision = "5";
name = "Add-quotes-to-SOAPAction-header-in-SoapClient.patch";
hash = "sha256-xA8Wnrpr31H8wy3zHSNfezFNjUJt1HbSXn3qUMzeKc0=";
})
];
...
}
```
Patches are fetched from `sources.debian.org`, and so must come from a
package version that was uploaded to the Debian archive. Packages may
be removed from there once that specific version isn't in any suite
anymore (stable, testing, unstable, etc.), so maintainers should use
`copy-tarballs.pl` to archive the patch if it needs to be available
longer-term.
[Debian revision number]: https://www.debian.org/doc/debian-policy/ch-controlfields.html#version
## `fetchsvn` {#fetchsvn}
Used with Subversion. Expects `url` to a Subversion directory, `rev`, and `hash`.
@ -181,7 +228,7 @@ Otherwise, the builder will run, but fail with a message explaining to the user
requireFile {
name = "jdk-${version}_linux-x64_bin.tar.gz";
url = "https://www.oracle.com/java/technologies/javase-jdk11-downloads.html";
sha256 = "94bd34f85ee38d3ef59e5289ec7450b9443b924c55625661fffe66b03f2c8de2";
hash = "sha256-lL00+F7jjT71nlKJ7HRQuUQ7kkxVYlZh//5msD8sjeI=";
}
```
results in this error message:

View file

@ -26,10 +26,6 @@ You can install it like any other packages via `nix-env -iA myEmacs`. However, t
{
packageOverrides = pkgs: with pkgs; rec {
myEmacsConfig = writeText "default.el" ''
;; initialize package
(require 'package)
(package-initialize 'noactivate)
(eval-when-compile
(require 'use-package))
@ -103,7 +99,7 @@ You can install it like any other packages via `nix-env -iA myEmacs`. However, t
This provides a fairly full Emacs start file. It will load in addition to the user's personal config. You can always disable it by passing `-q` to the Emacs command.
Sometimes `emacs.pkgs.withPackages` is not enough, as this package set has some priorities imposed on packages (with the lowest priority assigned to Melpa Unstable, and the highest for packages manually defined in `pkgs/top-level/emacs-packages.nix`). But you can't control these priorities when some package is installed as a dependency. You can override it on a per-package-basis, providing all the required dependencies manually, but it's tedious and there is always a possibility that an unwanted dependency will sneak in through some other package. To completely override such a package, you can use `overrideScope`.
Sometimes `emacs.pkgs.withPackages` is not enough, as this package set has some priorities imposed on packages (with the lowest priority assigned to GNU-devel ELPA, and the highest for packages manually defined in `pkgs/applications/editors/emacs/elisp-packages/manual-packages`). But you can't control these priorities when some package is installed as a dependency. You can override it on a per-package-basis, providing all the required dependencies manually, but it's tedious and there is always a possibility that an unwanted dependency will sneak in through some other package. To completely override such a package, you can use `overrideScope`.
```nix
overrides = self: super: rec {

View file

@ -29,12 +29,12 @@ _Note: each language passed to `langs` must be an attribute name in `pkgs.hunspe
## Built-in emoji picker {#sec-ibus-typing-booster-emoji-picker}
The `ibus-engines.typing-booster` package contains a program named `emoji-picker`. To display all emojis correctly, a special font such as `noto-fonts-emoji` is needed:
The `ibus-engines.typing-booster` package contains a program named `emoji-picker`. To display all emojis correctly, a special font such as `noto-fonts-color-emoji` is needed:
On NixOS, it can be installed using the following expression:
```nix
{ pkgs, ... }: {
fonts.packages = with pkgs; [ noto-fonts-emoji ];
fonts.packages = with pkgs; [ noto-fonts-color-emoji ];
}
```

View file

@ -4,9 +4,25 @@ Nixpkgs provides a couple of functions that help with building derivations. The
## `runCommand` {#trivial-builder-runCommand}
This takes three arguments, `name`, `env`, and `buildCommand`. `name` is just the name that Nix will append to the store path in the same way that `stdenv.mkDerivation` uses its `name` attribute. `env` is an attribute set specifying environment variables that will be set for this derivation. These attributes are then passed to the wrapped `stdenv.mkDerivation`. `buildCommand` specifies the commands that will be run to create this derivation. Note that you will need to create `$out` for Nix to register the command as successful.
`runCommand :: String -> AttrSet -> String -> Derivation`
An example of using `runCommand` is provided below.
`runCommand name drvAttrs buildCommand` returns a derivation that is built by running the specified shell commands.
`name :: String`
: The name that Nix will append to the store path in the same way that `stdenv.mkDerivation` uses its `name` attribute.
`drvAttr :: AttrSet`
: Attributes to pass to the underlying call to [`stdenv.mkDerivation`](#chap-stdenv).
`buildCommand :: String`
: Shell commands to run in the derivation builder.
::: {.note}
You have to create a file or directory `$out` for Nix to be able to run the builder successfully.
:::
::: {.example #ex-runcommand-simple}
# Invocation of `runCommand`
```nix
(import <nixpkgs> {}).runCommand "my-example" {} ''
@ -28,6 +44,7 @@ An example of using `runCommand` is provided below.
date
''
```
:::
## `runCommandCC` {#trivial-builder-runCommandCC}

View file

@ -19,8 +19,10 @@ let
{ name = "options"; description = "NixOS / nixpkgs option handling"; }
{ name = "path"; description = "path functions"; }
{ name = "filesystem"; description = "filesystem functions"; }
{ name = "fileset"; description = "file set functions"; }
{ name = "sources"; description = "source filtering functions"; }
{ name = "cli"; description = "command-line serialization functions"; }
{ name = "gvariant"; description = "GVariant formatted string serialization functions"; }
];
};

View file

@ -8,4 +8,5 @@ functions/generators.section.md
functions/debug.section.md
functions/prefer-remote-fetch.section.md
functions/nix-gitignore.section.md
functions/fileset.section.md
```

View file

@ -0,0 +1,51 @@
<!-- TODO: Render this document in front of function documentation in case https://github.com/nix-community/nixdoc/issues/19 is ever supported -->
# File sets {#sec-fileset}
The [`lib.fileset`](#sec-functions-library-fileset) library allows you to work with _file sets_.
A file set is a mathematical set of local files that can be added to the Nix store for use in Nix derivations.
File sets are easy and safe to use, providing obvious and composable semantics with good error messages to prevent mistakes.
These sections apply to the entire library.
See the [function reference](#sec-functions-library-fileset) for function-specific documentation.
The file set library is currently somewhat limited but is being expanded to include more functions over time.
## Implicit coercion from paths to file sets {#sec-fileset-path-coercion}
All functions accepting file sets as arguments can also accept [paths](https://nixos.org/manual/nix/stable/language/values.html#type-path) as arguments.
Such path arguments are implicitly coerced to file sets containing all files under that path:
- A path to a file turns into a file set containing that single file.
- A path to a directory turns into a file set containing all files _recursively_ in that directory.
If the path points to a non-existent location, an error is thrown.
::: {.note}
Just like in Git, file sets cannot represent empty directories.
Because of this, a path to a directory that contains no files (recursively) will turn into a file set containing no files.
:::
:::{.note}
File set coercion does _not_ add any of the files under the coerced paths to the store.
Only the [`toSource`](#function-library-lib.fileset.toSource) function adds files to the Nix store, and only those files contained in the `fileset` argument.
This is in contrast to using [paths in string interpolation](https://nixos.org/manual/nix/stable/language/values.html#type-path), which does add the entire referenced path to the store.
:::
### Example {#sec-fileset-path-coercion-example}
Assume we are in a local directory with a file hierarchy like this:
```
├─ a/
│ ├─ x (file)
│ └─ b/
  └─ y (file)
└─ c/
  └─ d/
```
Here's a listing of which files get included when different path expressions get coerced to file sets:
- `./.` as a file set contains both `a/x` and `a/b/y` (`c/` does not contain any files and is therefore omitted).
- `./a` as a file set contains both `a/x` and `a/b/y`.
- `./a/x` as a file set contains only `a/x`.
- `./a/b` as a file set contains only `a/b/y`.
- `./c` as a file set is empty, since neither `c` nor `c/d` contain any files.

View file

@ -0,0 +1,7 @@
# bmake {#bmake-hook}
[bmake](https://www.crufty.net/help/sjg/bmake.html) is the portable variant of
NetBSD make utility.
In Nixpkgs, `bmake` comes with a hook that overrides the default build, check,
install and dist phases.

View file

@ -8,6 +8,7 @@ The stdenv built-in hooks are documented in [](#ssec-setup-hooks).
autoconf.section.md
automake.section.md
autopatchelf.section.md
bmake.section.md
breakpoint.section.md
cmake.section.md
gdk-pixbuf.section.md
@ -17,6 +18,7 @@ installShellFiles.section.md
libiconv.section.md
libxml2.section.md
meson.section.md
mpi-check-hook.section.md
ninja.section.md
patch-rc-path-hooks.section.md
perl.section.md

View file

@ -0,0 +1,24 @@
# mpiCheckPhaseHook {#setup-hook-mpi-check}
This hook can be used to setup a check phase that
requires running a MPI application. It detects the
used present MPI implementation type and exports
the neceesary environment variables to use
`mpirun` and `mpiexec` in a Nix sandbox.
Example:
```nix
{ mpiCheckPhaseHook, mpi, ... }:
...
nativeCheckInputs = [
openssh
mpiCheckPhaseHook
];
```

View file

@ -1,3 +1,58 @@
# wafHook {#wafhook}
# wafHook {#waf-hook}
Overrides the configure, build, and install phases. This will run the “waf” script used by many projects. If `wafPath` (default `./waf`) doesnt exist, it will copy the version of waf available in Nixpkgs. `wafFlags` can be used to pass flags to the waf script.
[Waf](https://waf.io) is a Python-based software building system.
In Nixpkgs, `wafHook` overrides the default configure, build, and install phases.
## Variables controlling wafHook {#waf-hook-variables-controlling}
### `wafHook` Exclusive Variables {#waf-hook-exclusive-variables}
The variables below are exclusive of `wafHook`.
#### `wafPath` {#waf-path}
Location of the `waf` tool. It defaults to `./waf`, to honor software projects that include it directly inside their source trees.
If `wafPath` doesn't exist, then `wafHook` will copy the `waf` provided from Nixpkgs to it.
#### `wafFlags` {#waf-flags}
Controls the flags passed to waf tool during build and install phases. For settings specific to build or install phases, use `wafBuildFlags` or `wafInstallFlags` respectively.
#### `dontAddWafCrossFlags` {#dont-add-waf-cross-flags}
When set to `true`, don't add cross compilation flags during configure phase.
#### `dontUseWafConfigure` {#dont-use-waf-configure}
When set to true, don't use the predefined `wafConfigurePhase`.
#### `dontUseWafBuild` {#dont-use-waf-build}
When set to true, don't use the predefined `wafBuildPhase`.
#### `dontUseWafInstall` {#dont-use-waf-install}
When set to true, don't use the predefined `wafInstallPhase`.
### Similar variables {#waf-hook-similar-variables}
The following variables are similar to their `stdenv.mkDerivation` counterparts.
| `wafHook` Variable | `stdenv.mkDerivation` Counterpart |
|-----------------------|-----------------------------------|
| `wafConfigureFlags` | `configureFlags` |
| `wafConfigureTargets` | `configureTargets` |
| `wafBuildFlags` | `buildFlags` |
| `wafBuildTargets` | `buildTargets` |
| `wafInstallFlags` | `installFlags` |
| `wafInstallTargets` | `installTargets` |
### Honored variables {#waf-hook-honored-variables}
The following variables commonly used by `stdenv.mkDerivation` are honored by `wafHook`.
- `prefixKey`
- `enableParallelBuilding`
- `enableParallelInstalling`

View file

@ -4,7 +4,7 @@
In Nixpkgs, `zig.hook` overrides the default build, check and install phases.
## Example code snippet {#example-code-snippet}
## Example code snippet {#zig-hook-example-code-snippet}
```nix
{ lib
@ -27,33 +27,37 @@ stdenv.mkDerivation {
}
```
## Variables controlling zig.hook {#variables-controlling-zig-hook}
## Variables controlling zig.hook {#zig-hook-variables-controlling}
### `dontUseZigBuild` {#dontUseZigBuild}
### `zig.hook` Exclusive Variables {#zig-hook-exclusive-variables}
The variables below are exclusive to `zig.hook`.
#### `dontUseZigBuild` {#dont-use-zig-build}
Disables using `zigBuildPhase`.
### `zigBuildFlags` {#zigBuildFlags}
Controls the flags passed to the build phase.
### `dontUseZigCheck` {#dontUseZigCheck}
#### `dontUseZigCheck` {#dont-use-zig-check}
Disables using `zigCheckPhase`.
### `zigCheckFlags` {#zigCheckFlags}
Controls the flags passed to the check phase.
### `dontUseZigInstall` {#dontUseZigInstall}
#### `dontUseZigInstall` {#dont-use-zig-install}
Disables using `zigInstallPhase`.
### `zigInstallFlags` {#zigInstallFlags}
### Similar variables {#zig-hook-similar-variables}
Controls the flags passed to the install phase.
The following variables are similar to their `stdenv.mkDerivation` counterparts.
### Variables honored by zig.hook {#variables-honored-by-zig-hook}
| `zig.hook` Variable | `stdenv.mkDerivation` Counterpart |
|---------------------|-----------------------------------|
| `zigBuildFlags` | `buildFlags` |
| `zigCheckFlags` | `checkFlags` |
| `zigInstallFlags` | `installFlags` |
### Variables honored by zig.hook {#zig-hook-variables-honored}
The following variables commonly used by `stdenv.mkDerivation` are honored by `zig.hook`.
- `prefixKey`
- `dontAddPrefix`

View file

@ -44,11 +44,29 @@ There is also a `buildMix` helper, whose behavior is closer to that of `buildErl
## How to Install BEAM Packages {#how-to-install-beam-packages}
BEAM builders are not registered at the top level, simply because they are not relevant to the vast majority of Nix users. To install any of those builders into your profile, refer to them by their attribute path `beamPackages.rebar3`:
BEAM builders are not registered at the top level, simply because they are not relevant to the vast majority of Nix users.
To use any of those builders into your environment, refer to them by their attribute path under `beamPackages`, e.g. `beamPackages.rebar3`:
::: {.example #ex-beam-ephemeral-shell}
# Ephemeral shell
```ShellSession
$ nix-env -f "<nixpkgs>" -iA beamPackages.rebar3
$ nix-shell -p beamPackages.rebar3
```
:::
::: {.example #ex-beam-declarative-shell}
# Declarative shell
```nix
let
pkgs = import <nixpkgs> { config = {}; overlays = []; };
in
pkgs.mkShell {
packages = [ pkgs.beamPackages.rebar3 ];
}
```
:::
## Packaging BEAM Applications {#packaging-beam-applications}
@ -154,7 +172,7 @@ Here is how your `default.nix` file would look for a phoenix project.
with import <nixpkgs> { };
let
# beam.interpreters.erlang_23 is available if you need a particular version
# beam.interpreters.erlang_26 is available if you need a particular version
packages = beam.packagesWith beam.interpreters.erlang;
pname = "your_project";

View file

@ -47,3 +47,32 @@ To include more eggs, edit `pkgs/development/compilers/chicken/5/eggs.scm`.
The first section of this file lists eggs which are required by `egg2nix`
itself; all other eggs go into the second section. After editing, follow the
procedure for updating eggs.
## Override Scope {#sec-chicken-override-scope}
The chicken package and its eggs, respectively, reside in a scope. This means,
the scope can be overridden to effect other packages in it.
This example shows how to use a local copy of `srfi-180` and have it affect
all the other eggs:
```nix
let
myChickenPackages = pkgs.chickenPackages.overrideScope' (self: super: {
# The chicken package itself can be overridden to effect the whole ecosystem.
# chicken = super.chicken.overrideAttrs {
# src = ...
# };
chickenEggs = super.chickenEggs.overrideScope' (eggself: eggsuper: {
srfi-180 = eggsuper.srfi-180.overrideAttrs {
# path to a local copy of srfi-180
src = ...
};
});
});
in
# Here, `myChickenPackages.chickenEggs.json-rpc`, which depends on `srfi-180` will use
# the local copy of `srfi-180`.
# ...
```

View file

@ -54,3 +54,65 @@ for your specific card(s).
Library maintainers should consult [NVCC Docs](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/)
and release notes for their software package.
## Adding a new CUDA release {#adding-a-new-cuda-release}
> **WARNING**
>
> This section of the docs is still very much in progress. Feedback is welcome in GitHub Issues tagging @NixOS/cuda-maintainers or on [Matrix](https://matrix.to/#/#cuda:nixos.org).
The CUDA Toolkit is a suite of CUDA libraries and software meant to provide a development environment for CUDA-accelerated applications. Until the release of CUDA 11.4, NVIDIA had only made the CUDA Toolkit available as a multi-gigabyte runfile installer, which we provide through the [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit) attribute. From CUDA 11.4 and onwards, NVIDIA has also provided CUDA redistributables (“CUDA-redist”): individually packaged CUDA Toolkit components meant to facilitate redistribution and inclusion in downstream projects. These packages are available in the [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) package set.
All new projects should use the CUDA redistributables available in [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) in place of [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit), as they are much easier to maintain and update.
### Updating CUDA redistributables {#updating-cuda-redistributables}
1. Go to NVIDIA's index of CUDA redistributables: <https://developer.download.nvidia.com/compute/cuda/redist/>
2. Copy the `redistrib_*.json` corresponding to the release to `pkgs/development/compilers/cudatoolkit/redist/manifests`.
3. Generate the `redistrib_features_*.json` file by running:
```bash
nix run github:ConnorBaker/cuda-redist-find-features -- <path to manifest>
```
That command will generate the `redistrib_features_*.json` file in the same directory as the manifest.
4. Include the path to the new manifest in `pkgs/development/compilers/cudatoolkit/redist/extension.nix`.
### Updating the CUDA Toolkit runfile installer {#updating-the-cuda-toolkit}
> **WARNING**
>
> While the CUDA Toolkit runfile installer is still available in Nixpkgs as the [`cudaPackages.cudatoolkit`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.cudatoolkit) attribute, its use is not recommended and should it be considered deprecated. Please migrate to the CUDA redistributables provided by the [`cudaPackages`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages) package set.
>
> To ensure packages relying on the CUDA Toolkit runfile installer continue to build, it will continue to be updated until a migration path is available.
1. Go to NVIDIA's CUDA Toolkit runfile installer download page: <https://developer.nvidia.com/cuda-downloads>
2. Select the appropriate OS, architecture, distribution, and version, and installer type.
- For example: Linux, x86_64, Ubuntu, 22.04, runfile (local)
- NOTE: Typically, we use the Ubuntu runfile. It is unclear if the runfile for other distributions will work.
3. Take the link provided by the installer instructions on the webpage after selecting the installer type and get its hash by running:
```bash
nix store prefetch-file --hash-type sha256 <link>
```
4. Update `pkgs/development/compilers/cudatoolkit/versions.toml` to include the release.
### Updating the CUDA package set {#updating-the-cuda-package-set}
1. Include a new `cudaPackages_<major>_<minor>` package set in `pkgs/top-level/all-packages.nix`.
- NOTE: Changing the default CUDA package set should occur in a separate PR, allowing time for additional testing.
2. Successfully build the closure of the new package set, updating `pkgs/development/compilers/cudatoolkit/redist/overrides.nix` as needed. Below are some common failures:
| Unable to ... | During ... | Reason | Solution | Note |
| --- | --- | --- | --- | --- |
| Find headers | `configurePhase` or `buildPhase` | Missing dependency on a `dev` output | Add the missing dependency | The `dev` output typically contain the headers |
| Find libraries | `configurePhase` | Missing dependency on a `dev` output | Add the missing dependency | The `dev` output typically contain CMake configuration files |
| Find libraries | `buildPhase` or `patchelf` | Missing dependency on a `lib` or `static` output | Add the missing dependency | The `lib` or `static` output typically contain the libraries |
In the scenario you are unable to run the resulting binary: this is arguably the most complicated as it could be any combination of the previous reasons. This type of failure typically occurs when a library attempts to load or open a library it depends on that it does not declare in its `DT_NEEDED` section. As a first step, ensure that dependencies are patched with [`cudaPackages.autoAddOpenGLRunpath`](https://search.nixos.org/packages?channel=unstable&type=packages&query=cudaPackages.autoAddOpenGLRunpath). Failing that, try running the application with [`nixGL`](https://github.com/guibou/nixGL) or a similar wrapper tool. If that works, it likely means that the application is attempting to load a library that is not in the `RPATH` or `RUNPATH` of the binary.

View file

@ -91,7 +91,7 @@ buildDhallPackage {
let
nixpkgs = builtins.fetchTarball {
url = "https://github.com/NixOS/nixpkgs/archive/94b2848559b12a8ed1fe433084686b2a81123c99.tar.gz";
sha256 = "sha256-B4Q3c6IvTLg3Q92qYa8y+i4uTaphtFdjp+Ir3QQjdN0=";
hash = "sha256-B4Q3c6IvTLg3Q92qYa8y+i4uTaphtFdjp+Ir3QQjdN0=";
};
dhallOverlay = self: super: {
@ -303,11 +303,8 @@ You can use the `dhall-to-nixpkgs` command-line utility to automate
packaging Dhall code. For example:
```ShellSession
$ nix-env --install --attr haskellPackages.dhall-nixpkgs
$ nix-env --install --attr nix-prefetch-git # Used by dhall-to-nixpkgs
$ dhall-to-nixpkgs github https://github.com/Gabriella439/dhall-semver.git
$ nix-shell -p haskellPackages.dhall-nixpkgs nix-prefetch-git
[nix-shell]$ dhall-to-nixpkgs github https://github.com/Gabriella439/dhall-semver.git
{ buildDhallGitHubPackage, Prelude }:
buildDhallGitHubPackage {
name = "dhall-semver";
@ -325,6 +322,10 @@ $ dhall-to-nixpkgs github https://github.com/Gabriella439/dhall-semver.git
}
```
:::{.note}
`nix-prefetch-git` has to be in `$PATH` for `dhall-to-nixpkgs` to work.
:::
The utility takes care of automatically detecting remote imports and converting
them to package dependencies. You can also use the utility on local
Dhall directories, too:

View file

@ -161,7 +161,7 @@ in buildDotnetModule rec {
They can be installed either as a global tool for the entire system, or as a local tool specific to project.
The local installation is the easiest and works on NixOS in the same way as on other Linux distributions.
[See dotnet documention](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-local-tool) to learn more.
[See dotnet documentation](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-local-tool) to learn more.
[The global installation method](https://learn.microsoft.com/en-us/dotnet/core/tools/global-tools#install-a-global-tool)
should also work most of the time. You have to remember to update the `PATH`

View file

@ -221,7 +221,7 @@ Sadly we currently dont have tooling for this. For this you might be
interested in the alternative [haskell.nix] framework, which, be warned, is
completely incompatible with packages from `haskellPackages`.
<!-- TODO(@maralorn) Link to package set generation docs in the contributers guide below. -->
<!-- TODO(@maralorn) Link to package set generation docs in the contributors guide below. -->
## `haskellPackages.mkDerivation` {#haskell-mkderivation}
@ -1029,7 +1029,7 @@ ugly, and we may want to deprecate them at some point. -->
`disableCabalFlag flag drv`
: Makes sure that the Cabal flag `flag` is disabled in Cabal's configure step.
`appendBuildflags list drv`
`appendBuildFlags list drv`
: Adds the strings in `list` to the `buildFlags` argument for `drv`.
<!-- TODO(@sternenseemann): removeConfigureFlag -->
@ -1192,7 +1192,7 @@ with GHC), it is recommended to use overlays for Nixpkgs to change them.
Since the interrelated parts, i.e. the package set and GHC, are connected
via the Nixpkgs fixpoint, we need to modify them both in a way that preserves
their connection (or else we'd have to wire it up again manually). This is
achieved by changing GHC and the package set in seperate overlays to prevent
achieved by changing GHC and the package set in separate overlays to prevent
the package set from pulling in GHC from `prev`.
The result is two overlays like the ones shown below. Adjustable parts are

View file

@ -161,6 +161,8 @@ git config --global url."https://github.com/".insteadOf git://github.com/
`buildNpmPackage` allows you to package npm-based projects in Nixpkgs without the use of an auto-generated dependencies file (as used in [node2nix](#javascript-node2nix)). It works by utilizing npm's cache functionality -- creating a reproducible cache that contains the dependencies of a project, and pointing npm to it.
Here's an example:
```nix
{ lib, buildNpmPackage, fetchFromGitHub }:
@ -191,6 +193,8 @@ buildNpmPackage rec {
}
```
In the default `installPhase` set by `buildNpmPackage`, it uses `npm pack --json --dry-run` to decide what files to install in `$out/lib/node_modules/$name/`, where `$name` is the `name` string defined in the package's `package.json`. Additionally, the `bin` and `man` keys in the source's `package.json` are used to decide what binaries and manpages are supposed to be installed. If these are not defined, `npm pack` may miss some files, and no binaries will be produced.
#### Arguments {#javascript-buildNpmPackage-arguments}
* `npmDepsHash`: The output hash of the dependencies for this project. Can be calculated in advance with [`prefetch-npm-deps`](#javascript-buildNpmPackage-prefetch-npm-deps).
@ -204,10 +208,11 @@ buildNpmPackage rec {
* `npmBuildFlags`: Flags to pass to `npm run ${npmBuildScript}`.
* `npmPackFlags`: Flags to pass to `npm pack`.
* `npmPruneFlags`: Flags to pass to `npm prune`. Defaults to the value of `npmInstallFlags`.
* `makeWrapperArgs`: Flags to pass to `makeWrapper`, added to executable calling the generated `.js` with `node` as an interpreter. These scripts are defined in `package.json`.
#### prefetch-npm-deps {#javascript-buildNpmPackage-prefetch-npm-deps}
`prefetch-npm-deps` can calculate the hash of the dependencies of an npm project ahead of time.
`prefetch-npm-deps` is a Nixpkgs package that calculates the hash of the dependencies of an npm project ahead of time.
```console
$ ls
@ -217,6 +222,19 @@ $ prefetch-npm-deps package-lock.json
sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
```
#### fetchNpmDeps {#javascript-buildNpmPackage-fetchNpmDeps}
`fetchNpmDeps` is a Nix function that requires the following mandatory arguments:
- `src`: A directory / tarball with `package-lock.json` file
- `hash`: The output hash of the node dependencies defined in `package-lock.json`.
It returns a derivation with all `package-lock.json` dependencies downloaded into `$out/`, usable as an npm cache.
### corepack {#javascript-corepack}
This package puts the corepack wrappers for pnpm and yarn in your PATH, and they will honor the `packageManager` setting in the `package.json`.
### node2nix {#javascript-node2nix}
#### Preparation {#javascript-node2nix-preparation}

View file

@ -1,36 +1,32 @@
# lisp-modules {#lisp}
This document describes the Nixpkgs infrastructure for building Common Lisp
libraries that use ASDF (Another System Definition Facility). It lives in
`pkgs/development/lisp-modules`.
systems that use [ASDF](https://asdf.common-lisp.dev/) (Another System
Definition Facility). It lives in `pkgs/development/lisp-modules`.
## Overview {#lisp-overview}
The main entry point of the API are the Common Lisp implementation packages
(e.g. `abcl`, `ccl`, `clasp-common-lisp`, `clisp` `ecl`, `sbcl`)
themselves. They have the `pkgs` and `withPackages` attributes, which can be
used to discover available packages and to build wrappers, respectively.
themselves (e.g. `abcl`, `ccl`, `clasp-common-lisp`, `clisp`, `ecl`,
`sbcl`). They have the `pkgs` and `withPackages` attributes, which can be used
to discover available packages and to build wrappers, respectively.
The `pkgs` attribute set contains packages that were automatically imported from
Quicklisp, and any other manually defined ones. Not every package works for all
the CL implementations (e.g. `nyxt` only makes sense for `sbcl`).
The `pkgs` attribute set contains packages that were automatically
[imported](#lisp-importing-packages-from-quicklisp) from Quicklisp, and any
other [manually defined](#lisp-defining-packages-inside) ones. Not every package
works for all the CL implementations (e.g. `nyxt` only makes sense for `sbcl`).
The `withPackages` function is of primary utility. It is used to build runnable
wrappers, with a pinned and pre-built ASDF FASL available in the `ASDF`
environment variable, and `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS`
configured to find the desired systems on runtime.
With a few exceptions, the primary thing that the infrastructure does is to run
`asdf:load-system` for each system specified in the `systems` argument to
`build-asdf-system`, and save the FASLs to the Nix store. Then, it makes these
FASLs available to wrappers. Any other use-cases, such as producing SBCL
executables with `sb-ext:save-lisp-and-die`, are achieved via overriding the
`buildPhase` etc.
The `withPackages` function is of primary utility. It is used to build
[runnable wrappers](#lisp-building-wrappers), with a pinned and pre-built
[ASDF FASL](#lisp-loading-asdf) available in the `ASDF` environment variable,
and `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS` configured to
[find the desired systems on runtime](#lisp-loading-systems).
In addition, Lisps have the `withOverrides` function, which can be used to
substitute any package in the scope of their `pkgs`. This will be useful
together with `overrideLispAttrs` when dealing with slashy ASDF systems, because
they should stay in the main package and be build by specifying the `systems`
[substitute](#lisp-including-external-pkg-in-scope) any package in the scope of
their `pkgs`. This will also be useful together with `overrideLispAttrs` when
[dealing with slashy systems](#lisp-dealing-with-slashy-systems), because they
should stay in the main package and be built by specifying the `systems`
argument to `build-asdf-system`.
## The 90% use case example {#lisp-use-case-example}
@ -42,7 +38,7 @@ The most common way to use the library is to run ad-hoc wrappers like this:
Then, in a shell:
```
$ result/bin/sbcl
$ sbcl
* (load (sb-ext:posix-getenv "ASDF"))
* (asdf:load-system 'alexandria)
```
@ -53,7 +49,7 @@ Also one can create a `pkgs.mkShell` environment in `shell.nix`/`flake.nix`:
let
sbcl' = sbcl.withPackages (ps: [ ps.alexandria ]);
in mkShell {
buildInputs = [ sbcl' ];
packages = [ sbcl' ];
}
```
@ -67,32 +63,37 @@ buildPhase = ''
## Importing packages from Quicklisp {#lisp-importing-packages-from-quicklisp}
The library is able to very quickly import all the packages distributed by
Quicklisp by parsing its `releases.txt` and `systems.txt` files. These files are
available from [http://beta.quicklisp.org/dist/quicklisp.txt].
To save some work of writing Nix expressions, there is a script that imports all
the packages distributed by Quicklisp into `imported.nix`. This works by parsing
its `releases.txt` and `systems.txt` files, which are published every couple of
months on [quicklisp.org](http://beta.quicklisp.org/dist/quicklisp.txt).
The import process is implemented in the `import` directory as Common Lisp
functions in the `org.lispbuilds.nix` ASDF system. To run the script, one can
code in the `org.lispbuilds.nix` ASDF system. To run the script, one can
execute `ql-import.lisp`:
```
cd pkgs/development/lisp-modules
nix-shell --run 'sbcl --script ql-import.lisp'
```
The script will:
1. Download the latest Quicklisp `systems.txt` and `releases.txt` files
2. Generate an SQLite database of all QL systems in `packages.sqlite`
2. Generate a temporary SQLite database of all QL systems in `packages.sqlite`
3. Generate an `imported.nix` file from the database
The maintainer's job there is to:
(The `packages.sqlite` file can be deleted at will, because it is regenerated
each time the script runs.)
1. Re-run the `ql-import.lisp` script
2. Add missing native dependencies in `ql.nix`
3. For packages that still don't build, package them manually in `packages.nix`
The maintainer's job is to:
1. Re-run the `ql-import.lisp` script when there is a new Quicklisp release
2. [Add any missing native dependencies](#lisp-quicklisp-adding-native-dependencies) in `ql.nix`
3. For packages that still don't build, [package them manually](#lisp-defining-packages-inside) in `packages.nix`
Also, the `imported.nix` file **must not be edited manually**! It should only be
generated as described in this section.
generated as described in this section (by running `ql-import.lisp`).
### Adding native dependencies {#lisp-quicklisp-adding-native-dependencies}
@ -108,7 +109,7 @@ Packages defined in `packages.nix` contain these dependencies naturally.
The previous implementation of `lisp-modules` didn't fully trust the Quicklisp
data, because there were times where the dependencies specified were not
complete, and caused broken builds. It instead used a `nix-shell` environment to
complete and caused broken builds. It instead used a `nix-shell` environment to
discover real dependencies by using the ASDF APIs.
The current implementation has chosen to trust this data, because it's faster to
@ -126,33 +127,46 @@ replace the `systems` attribute of the affected packages. (See the definition of
During Quicklisp import:
- `+` in names are converted to `_plus{_,}`: `cl+ssl`->`cl_plus_ssl`, `alexandria+`->`alexandria_plus`
- `.` to `_dot_`: `iolib.base`->`iolib_dot_base`
- `+` in names is converted to `_plus{_,}`: `cl+ssl`->`cl_plus_ssl`, `alexandria+`->`alexandria_plus`
- `.` in names is converted to `_dot_`: `iolib.base`->`iolib_dot_base`
- names starting with a number have a `_` prepended (`3d-vectors`->`_3d-vectors`)
- `_` in names is converted to `__` for reversibility
## Defining packages manually inside Nixpkgs {#lisp-defining-packages-inside}
New packages, that for some reason are not in Quicklisp, and so cannot be
auto-imported, can be written in the `packages.nix` file.
Packages that for some reason are not in Quicklisp, and so cannot be
auto-imported, or don't work straight from the import, are defined in the
`packages.nix` file.
In that file, use the `build-asdf-system` function, which is a wrapper around
`mkDerivation` for building ASDF systems. Various other hacks are present, such
as `build-with-compile-into-pwd` for systems which create files during
compilation.
compilation (such as cl-unicode).
The `build-asdf-system` function is documented with comments in
`nix-cl.nix`. Also, `packages.nix` is full of examples of how to use it.
The `build-asdf-system` function is documented
[here](#lisp-defining-packages-outside). Also, `packages.nix` is full of
examples of how to use it.
## Defining packages manually outside Nixpkgs {#lisp-defining-packages-outside}
Lisp derivations (`abcl`, `sbcl` etc.) also export the `buildASDFSystem`
function, which is the same as `build-asdf-system`, except for the `lisp`
argument which is set to the given CL implementation.
function, which is similar to `build-asdf-system` from `packages.nix`, but is
part of the public API.
It takes the following arguments:
- `pname`: the package name
- `version`: the package version
- `src`: the package source
- `patches`: patches to apply to the source before build
- `nativeLibs`: native libraries used by CFFI and grovelling
- `javaLibs`: Java libraries for ABCL
- `lispLibs`: dependencies on other packages build with `buildASDFSystem`
- `systems`: list of systems to build
It can be used to define packages outside Nixpkgs, and, for example, add them
into the package scope with `withOverrides` which will be discussed later on.
into the package scope with `withOverrides`.
### Including an external package in scope {#lisp-including-external-pkg-in-scope}
@ -198,28 +212,6 @@ sbcl.pkgs.alexandria.overrideLispAttrs (oldAttrs: rec {
})
```
## Overriding packages in scope {#lisp-overriding-packages-in-scope}
Packages can be woven into a new scope by using `withOverrides`:
```
let
sbcl' = sbcl.withOverrides (self: super: {
alexandria = super.alexandria.overrideLispAttrs (oldAttrs: rec {
pname = "alexandria";
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
});
});
in builtins.elemAt sbcl'.pkgs.bordeaux-threads.lispLibs 0
```
### Dealing with slashy systems {#lisp-dealing-with-slashy-systems}
Slashy (secondary) systems should not exist in their own packages! Instead, they
@ -240,8 +232,8 @@ ecl.pkgs.alexandria.overrideLispAttrs (oldAttrs: {
})
```
See the respective section on using `withOverrides` for how to weave it back
into `ecl.pkgs`.
See the [respective section](#lisp-including-external-pkg-in-scope) on using
`withOverrides` for how to weave it back into `ecl.pkgs`.
Note that sometimes the slashy systems might not only have more dependencies
than the main one, but create a circular dependency between `.asd`
@ -253,13 +245,16 @@ Wrappers can be built using the `withPackages` function of Common Lisp
implementations (`abcl`, `ecl`, `sbcl` etc.):
```
sbcl.withPackages (ps: [ ps.alexandria ps.bordeaux-threads ])
nix-shell -p 'sbcl.withPackages (ps: [ ps.alexandria ps.bordeaux-threads ])'
```
Such a wrapper can then be executed like this:
Such a wrapper can then be used like this:
```
result/bin/sbcl
$ sbcl
* (load (sb-ext:posix-getenv "ASDF"))
* (asdf:load-system 'alexandria)
* (asdf:load-system 'bordeaux-threads)
```
### Loading ASDF {#lisp-loading-asdf}

View file

@ -130,6 +130,7 @@ package: a project may depend on certain extensions and `composer`
won't work with that project unless those extensions are loaded.
Example of building `composer` with additional extensions:
```nix
(php.withExtensions ({ all, enabled }:
enabled ++ (with all; [ imagick redis ]))
@ -138,7 +139,9 @@ Example of building `composer` with additional extensions:
### Overriding PHP packages {#ssec-php-user-guide-overriding-packages}
`php-packages.nix` form a scope, allowing us to override the packages defined within. For example, to apply a patch to a `mysqlnd` extension, you can simply pass an overlay-style function to `php`s `packageOverrides` argument:
`php-packages.nix` form a scope, allowing us to override the packages defined
within. For example, to apply a patch to a `mysqlnd` extension, you can simply
pass an overlay-style function to `php`s `packageOverrides` argument:
```nix
php.override {
@ -153,3 +156,138 @@ php.override {
};
}
```
### Building PHP projects {#ssec-building-php-projects}
With [Composer](https://getcomposer.org/), you can effectively build PHP
projects by streamlining dependency management. As the de-facto standard
dependency manager for PHP, Composer enables you to declare and manage the
libraries your project relies on, ensuring a more organized and efficient
development process.
Composer is not a package manager in the same sense as `Yum` or `Apt` are. Yes,
it deals with "packages" or libraries, but it manages them on a per-project
basis, installing them in a directory (e.g. `vendor`) inside your project. By
default, it does not install anything globally. This idea is not new and
Composer is strongly inspired by node's `npm` and ruby's `bundler`.
Currently, there is no other PHP tool that offers the same functionality as
Composer. Consequently, incorporating a helper in Nix to facilitate building
such applications is a logical choice.
In a Composer project, dependencies are defined in a `composer.json` file,
while their specific versions are locked in a `composer.lock` file. Some
Composer-based projects opt to include this `composer.lock` file in their source
code, while others choose not to.
In Nix, there are multiple approaches to building a Composer-based project.
One such method is the `php.buildComposerProject` helper function, which serves
as a wrapper around `mkDerivation`.
Using this function, you can build a PHP project that includes both a
`composer.json` and `composer.lock` file. If the project specifies binaries
using the `bin` attribute in `composer.json`, these binaries will be
automatically linked and made accessible in the derivation. In this context,
"binaries" refer to PHP scripts that are intended to be executable.
To use the helper effectively, simply add the `vendorHash` attribute, which
enables the wrapper to handle the heavy lifting.
Internally, the helper operates in three stages:
1. It constructs a `composerRepository` attribute derivation by creating a
composer repository on the filesystem containing dependencies specified in
`composer.json`. This process uses the function
`php.mkComposerRepository` which in turn uses the
`php.composerHooks.composerRepositoryHook` hook. Internally this function uses
a custom
[Composer plugin](https://github.com/nix-community/composer-local-repo-plugin) to
generate the repository.
2. The resulting `composerRepository` derivation is then used by the
`php.composerHooks.composerInstallHook` hook, which is responsible for
creating the final `vendor` directory.
3. Any "binary" specified in the `composer.json` are linked and made accessible
in the derivation.
As the autoloader optimization can be activated directly within the
`composer.json` file, we do not enable any autoloader optimization flags.
To customize the PHP version, you can specify the `php` attribute. Similarly, if
you wish to modify the Composer version, use the `composer` attribute. It is
important to note that both attributes should be of the `derivation` type.
Here's an example of working code example using `php.buildComposerProject`:
```nix
{ php, fetchFromGitHub }:
php.buildComposerProject (finalAttrs: {
pname = "php-app";
version = "1.0.0";
src = fetchFromGitHub {
owner = "git-owner";
repo = "git-repo";
rev = finalAttrs.version;
hash = "sha256-VcQRSss2dssfkJ+iUb5qT+FJ10GHiFDzySigcmuVI+8=";
};
# PHP version containing the `ast` extension enabled
php = php.buildEnv {
extensions = ({ enabled, all }: enabled ++ (with all; [
ast
]));
};
# The composer vendor hash
vendorHash = "sha256-86s/F+/5cBAwBqZ2yaGRM5rTGLmou5//aLRK5SA0WiQ=";
# If the composer.lock file is missing from the repository, add it:
# composerLock = ./path/to/composer.lock;
})
```
In case the file `composer.lock` is missing from the repository, it is possible
to specify it using the `composerLock` attribute.
The other method is to use all these methods and hooks individually. This has
the advantage of building a PHP library within another derivation very easily
when necessary.
Here's a working code example to build a PHP library using `mkDerivation` and
separate functions and hooks:
```nix
{ stdenvNoCC, fetchFromGitHub, php }:
stdenvNoCC.mkDerivation (finalAttrs:
let
src = fetchFromGitHub {
owner = "git-owner";
repo = "git-repo";
rev = finalAttrs.version;
hash = "sha256-VcQRSss2dssfkJ+iUb5qT+FJ10GHiFDzySigcmuVI+8=";
};
in {
inherit src;
pname = "php-app";
version = "1.0.0";
buildInputs = [ php ];
nativeBuildInputs = [
php.packages.composer
# This hook will use the attribute `composerRepository`
php.composerHooks.composerInstallHook
];
composerRepository = php.mkComposerRepository {
inherit (finalAttrs) src;
# Specifying a custom composer.lock since it is not present in the sources.
composerLock = ./composer.lock;
# The composer vendor hash
vendorHash = "sha256-86s/F+/5cBAwBqZ2yaGRM5rTGLmou5//aLRK5SA0WiQ=";
};
})
```

View file

@ -32,8 +32,8 @@ Each interpreter has the following attributes:
- `libPrefix`. Name of the folder in `${python}/lib/` for corresponding interpreter.
- `interpreter`. Alias for `${python}/bin/${executable}`.
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See section *python.buildEnv function* for usage and documentation.
- `withPackages`. Simpler interface to `buildEnv`. See section *python.withPackages function* for usage and documentation.
- `buildEnv`. Function to build python interpreter environments with extra packages bundled together. See [](#python.buildenv-function) for usage and documentation.
- `withPackages`. Simpler interface to `buildEnv`. See [](#python.withpackages-function) for usage and documentation.
- `sitePackages`. Alias for `lib/${libPrefix}/site-packages`.
- `executable`. Name of the interpreter executable, e.g. `python3.10`.
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
@ -41,8 +41,8 @@ Each interpreter has the following attributes:
### Building packages and applications {#building-packages-and-applications}
Python libraries and applications that use `setuptools` or
`distutils` are typically built with respectively the `buildPythonPackage` and
`buildPythonApplication` functions. These two functions also support installing a `wheel`.
`distutils` are typically built with respectively the [`buildPythonPackage`](#buildpythonpackage-function) and
[`buildPythonApplication`](#buildpythonapplication-function) functions. These two functions also support installing a `wheel`.
All Python packages reside in `pkgs/top-level/python-packages.nix` and all
applications elsewhere. In case a package is used as both a library and an
@ -101,7 +101,7 @@ The following is an example:
buildPythonPackage rec {
pname = "pytest";
version = "3.3.1";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
@ -141,23 +141,23 @@ buildPythonPackage rec {
The `buildPythonPackage` mainly does four things:
* In the `buildPhase`, it calls `${python.pythonForBuild.interpreter} setup.py bdist_wheel` to
* In the [`buildPhase`](#build-phase), it calls `${python.pythonForBuild.interpreter} setup.py bdist_wheel` to
build a wheel binary zipfile.
* In the `installPhase`, it installs the wheel file using `pip install *.whl`.
* In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to
* In the [`installPhase`](#ssec-install-phase), it installs the wheel file using `pip install *.whl`.
* In the [`postFixup`](#var-stdenv-postFixup) phase, the `wrapPythonPrograms` bash function is called to
wrap all programs in the `$out/bin/*` directory to include `$PATH`
environment variable and add dependent libraries to script's `sys.path`.
* In the `installCheck` phase, `${python.interpreter} setup.py test` is run.
* In the [`installCheck`](#ssec-installCheck-phase) phase, `${python.interpreter} setup.py test` is run.
By default tests are run because `doCheck = true`. Test dependencies, like
e.g. the test runner, should be added to `nativeCheckInputs`.
By default tests are run because [`doCheck = true`](#var-stdenv-doCheck). Test dependencies, like
e.g. the test runner, should be added to [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs).
By default `meta.platforms` is set to the same value
as the interpreter unless overridden otherwise.
##### `buildPythonPackage` parameters {#buildpythonpackage-parameters}
All parameters from `stdenv.mkDerivation` function are still supported. The
All parameters from [`stdenv.mkDerivation`](#sec-using-stdenv) function are still supported. The
following are specific to `buildPythonPackage`:
* `catchConflicts ? true`: If `true`, abort package build if a package name
@ -167,15 +167,18 @@ following are specific to `buildPythonPackage`:
* `dontWrapPythonPrograms ? false`: Skip wrapping of Python programs.
* `permitUserSite ? false`: Skip setting the `PYTHONNOUSERSITE` environment
variable in wrapped programs.
* `format ? "setuptools"`: Format of the source. Valid options are
`"setuptools"`, `"pyproject"`, `"flit"`, `"wheel"`, and `"other"`.
`"setuptools"` is for when the source has a `setup.py` and `setuptools` is
used to build a wheel, `flit`, in case `flit` should be used to build a wheel,
and `wheel` in case a wheel is provided. Use `other` when a custom
`buildPhase` and/or `installPhase` is needed.
* `pyproject`: Whether the pyproject format should be used. When set to `true`,
`pypaBuildHook` will be used, and you can add the required build dependencies
from `build-system.requires` to `nativeBuildInputs`. Note that the pyproject
format falls back to using `setuptools`, so you can use `pyproject = true`
even if the package only has a `setup.py`. When set to `false`, you can
use the existing [hooks](#setup-hooks0 or provide your own logic to build the
package. This can be useful for packages that don't support the pyproject
format. When unset, the legacy `setuptools` hooks are used for backwards
compatibility.
* `makeWrapperArgs ? []`: A list of strings. Arguments to be passed to
`makeWrapper`, which wraps generated binaries. By default, the arguments to
`makeWrapper` set `PATH` and `PYTHONPATH` environment variables before calling
[`makeWrapper`](#fun-makeWrapper), which wraps generated binaries. By default, the arguments to
[`makeWrapper`](#fun-makeWrapper) set `PATH` and `PYTHONPATH` environment variables before calling
the binary. Additional arguments here can allow a developer to set environment
variables which will be available when the binary is run. For example,
`makeWrapperArgs = ["--set FOO BAR" "--set BAZ QUX"]`.
@ -187,7 +190,7 @@ following are specific to `buildPythonPackage`:
* `pipBuildFlags ? []`: A list of strings. Arguments to be passed to `pip wheel`.
* `pypaBuildFlags ? []`: A list of strings. Arguments to be passed to `python -m build --wheel`.
* `pythonPath ? []`: List of packages to be added into `$PYTHONPATH`. Packages
in `pythonPath` are not propagated (contrary to `propagatedBuildInputs`).
in `pythonPath` are not propagated (contrary to [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs)).
* `preShellHook`: Hook to execute commands before `shellHook`.
* `postShellHook`: Hook to execute commands after `shellHook`.
* `removeBinByteCode ? true`: Remove bytecode from `/bin`. Bytecode is only
@ -195,7 +198,7 @@ following are specific to `buildPythonPackage`:
* `setupPyGlobalFlags ? []`: List of flags passed to `setup.py` command.
* `setupPyBuildFlags ? []`: List of flags passed to `setup.py build_ext` command.
The `stdenv.mkDerivation` function accepts various parameters for describing
The [`stdenv.mkDerivation`](#sec-using-stdenv) function accepts various parameters for describing
build inputs (see "Specifying dependencies"). The following are of special
interest for Python packages, either because these are primarily used, or
because their behaviour is different:
@ -205,8 +208,8 @@ because their behaviour is different:
* `buildInputs ? []`: Build and/or run-time dependencies that need to be
compiled for the host machine. Typically non-Python libraries which are being
linked.
* `nativeCheckInputs ? []`: Dependencies needed for running the `checkPhase`. These
are added to `nativeBuildInputs` when `doCheck = true`. Items listed in
* `nativeCheckInputs ? []`: Dependencies needed for running the [`checkPhase`](#ssec-check-phase). These
are added to [`nativeBuildInputs`](#var-stdenv-nativeBuildInputs) when [`doCheck = true`](#var-stdenv-doCheck). Items listed in
`tests_require` go here.
* `propagatedBuildInputs ? []`: Aside from propagating dependencies,
`buildPythonPackage` also injects code into and wraps executables with the
@ -263,17 +266,17 @@ compilation issues, because scipy dependencies need to use the same blas impleme
#### `buildPythonApplication` function {#buildpythonapplication-function}
The `buildPythonApplication` function is practically the same as
`buildPythonPackage`. The main purpose of this function is to build a Python
The [`buildPythonApplication`](#buildpythonapplication-function) function is practically the same as
[`buildPythonPackage`](#buildpythonpackage-function). The main purpose of this function is to build a Python
package where one is interested only in the executables, and not importable
modules. For that reason, when adding this package to a `python.buildEnv`, the
modules. For that reason, when adding this package to a [`python.buildEnv`](#python.buildenv-function), the
modules won't be made available.
Another difference is that `buildPythonPackage` by default prefixes the names of
Another difference is that [`buildPythonPackage`](#buildpythonpackage-function) by default prefixes the names of
the packages with the version of the interpreter. Because this is irrelevant for
applications, the prefix is omitted.
When packaging a Python application with `buildPythonApplication`, it should be
When packaging a Python application with [`buildPythonApplication`](#buildpythonapplication-function), it should be
called with `callPackage` and passed `python` or `pythonPackages` (possibly
specifying an interpreter version), like this:
@ -286,20 +289,25 @@ specifying an interpreter version), like this:
python3.pkgs.buildPythonApplication rec {
pname = "luigi";
version = "2.7.9";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-Pe229rT0aHwA98s+nTHQMEFKZPo/yw6sot8MivFDvAw=";
};
nativeBuildInputs = [
python3.pkgs.setuptools
python3.pkgs.wheel
];
propagatedBuildInputs = with python3.pkgs; [
tornado
python-daemon
];
meta = with lib; {
...
# ...
};
}
```
@ -321,7 +329,7 @@ package is used as both. In this case the package is added as a library to
duplication the `toPythonApplication` can be used to convert a library to an
application.
The Nix expression shall use `buildPythonPackage` and be called from
The Nix expression shall use [`buildPythonPackage`](#buildpythonpackage-function) and be called from
`python-packages.nix`. A reference shall be created from `all-packages.nix` to
the attribute in `python-packages.nix`, and the `toPythonApplication` shall be
applied to the reference:
@ -333,7 +341,7 @@ youtube-dl = with pythonPackages; toPythonApplication youtube-dl;
#### `toPythonModule` function {#topythonmodule-function}
In some cases, such as bindings, a package is created using
`stdenv.mkDerivation` and added as attribute in `all-packages.nix`. The Python
[`stdenv.mkDerivation`](#sec-using-stdenv) and added as attribute in `all-packages.nix`. The Python
bindings should be made available from `python-packages.nix`. The
`toPythonModule` function takes a derivation and makes certain Python-specific
modifications.
@ -399,9 +407,9 @@ specified packages in its path.
#### `python.withPackages` function {#python.withpackages-function}
The `python.withPackages` function provides a simpler interface to the `python.buildEnv` functionality.
The [`python.withPackages`](#python.withpackages-function) function provides a simpler interface to the [`python.buildEnv`](#python.buildenv-function) functionality.
It takes a function as an argument that is passed the set of python packages and returns the list
of the packages to be included in the environment. Using the `withPackages` function, the previous
of the packages to be included in the environment. Using the [`withPackages`](#python.withpackages-function) function, the previous
example for the Pyramid Web Framework environment can be written like this:
```nix
@ -410,7 +418,7 @@ with import <nixpkgs> {};
python.withPackages (ps: [ ps.pyramid ])
```
`withPackages` passes the correct package set for the specific interpreter
[`withPackages`](#python.withpackages-function) passes the correct package set for the specific interpreter
version as an argument to the function. In the above example, `ps` equals
`pythonPackages`. But you can also easily switch to using python3:
@ -422,7 +430,7 @@ python3.withPackages (ps: [ ps.pyramid ])
Now, `ps` is set to `python3Packages`, matching the version of the interpreter.
As `python.withPackages` simply uses `python.buildEnv` under the hood, it also
As [`python.withPackages`](#python.withpackages-function) simply uses [`python.buildEnv`](#python.buildenv-function) under the hood, it also
supports the `env` attribute. The `shell.nix` file from the previous section can
thus be also written like this:
@ -435,23 +443,22 @@ with import <nixpkgs> {};
])).env
```
In contrast to `python.buildEnv`, `python.withPackages` does not support the
In contrast to [`python.buildEnv`](#python.buildenv-function), [`python.withPackages`](#python.withpackages-function) does not support the
more advanced options such as `ignoreCollisions = true` or `postBuild`. If you
need them, you have to use `python.buildEnv`.
need them, you have to use [`python.buildEnv`](#python.buildenv-function).
Python 2 namespace packages may provide `__init__.py` that collide. In that case
`python.buildEnv` should be used with `ignoreCollisions = true`.
[`python.buildEnv`](#python.buildenv-function) should be used with `ignoreCollisions = true`.
#### Setup hooks {#setup-hooks}
The following are setup hooks specifically for Python packages. Most of these
are used in `buildPythonPackage`.
are used in [`buildPythonPackage`](#buildpythonpackage-function).
- `eggUnpackhook` to move an egg to the correct folder so it can be installed
with the `eggInstallHook`
- `eggBuildHook` to skip building for eggs.
- `eggInstallHook` to install eggs.
- `flitBuildHook` to build a wheel using `flit`.
- `pipBuildHook` to build a wheel using `pip` and PEP 517. Note a build system
(e.g. `setuptools` or `flit`) should still be added as `nativeBuildInput`.
- `pypaBuildHook` to build a wheel using
@ -478,7 +485,7 @@ are used in `buildPythonPackage`.
### Development mode {#development-mode}
Development or editable mode is supported. To develop Python packages
`buildPythonPackage` has additional logic inside `shellPhase` to run `pip
[`buildPythonPackage`](#buildpythonpackage-function) has additional logic inside `shellPhase` to run `pip
install -e . --prefix $TMPDIR/`for the package.
Warning: `shellPhase` is executed only if `setup.py` exists.
@ -559,7 +566,7 @@ without impacting other applications or polluting your user environment.
But Python libraries you would like to use for development cannot be installed,
at least not individually, because they won't be able to find each other
resulting in import errors. Instead, it is possible to create an environment
with `python.buildEnv` or `python.withPackages` where the interpreter and other
with [`python.buildEnv`](#python.buildenv-function) or [`python.withPackages`](#python.withpackages-function) where the interpreter and other
executables are wrapped to be able to find each other and all of the modules.
In the following examples we will start by creating a simple, ad-hoc environment
@ -716,8 +723,8 @@ We've now seen how to create an ad-hoc temporary shell session, and how to
create a single script with Python dependencies, but in the course of normal
development we're usually working in an entire package repository.
As explained in the Nix manual, `nix-shell` can also load an expression from a
`.nix` file. Say we want to have Python 3.11, `numpy` and `toolz`, like before,
As explained [in the `nix-shell` section](https://nixos.org/manual/nix/stable/command-ref/nix-shell) of the Nix manual, `nix-shell` can also load an expression from a `.nix` file.
Say we want to have Python 3.11, `numpy` and `toolz`, like before,
in an environment. We can add a `shell.nix` file describing our dependencies:
```nix
@ -739,8 +746,8 @@ What's happening here?
imports the `<nixpkgs>` function, `{}` calls it and the `with` statement
brings all attributes of `nixpkgs` in the local scope. These attributes form
the main package set.
2. Then we create a Python 3.11 environment with the `withPackages` function, as before.
3. The `withPackages` function expects us to provide a function as an argument
2. Then we create a Python 3.11 environment with the [`withPackages`](#python.withpackages-function) function, as before.
3. The [`withPackages`](#python.withpackages-function) function expects us to provide a function as an argument
that takes the set of all Python packages and returns a list of packages to
include in the environment. Here, we select the packages `numpy` and `toolz`
from the package set.
@ -851,25 +858,32 @@ we will look at how you can use development mode with your code.
#### Python library packages in Nixpkgs {#python-library-packages-in-nixpkgs}
With Nix all packages are built by functions. The main function in Nix for
building Python libraries is `buildPythonPackage`. Let's see how we can build the
building Python libraries is [`buildPythonPackage`](#buildpythonpackage-function). Let's see how we can build the
`toolz` package.
```nix
{ lib
, buildPythonPackage
, fetchPypi
, setuptools
, wheel
}:
buildPythonPackage rec {
pname = "toolz";
version = "0.10.0";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
};
nativeBuildInputs = [
setuptools
wheel
];
# has no tests
doCheck = false;
@ -889,13 +903,13 @@ buildPythonPackage rec {
}
```
What happens here? The function `buildPythonPackage` is called and as argument
What happens here? The function [`buildPythonPackage`](#buildpythonpackage-function) is called and as argument
it accepts a set. In this case the set is a recursive set, `rec`. One of the
arguments is the name of the package, which consists of a basename (generally
following the name on PyPi) and a version. Another argument, `src` specifies the
source, which in this case is fetched from PyPI using the helper function
`fetchPypi`. The argument `doCheck` is used to set whether tests should be run
when building the package. Since there are no tests, we rely on `pythonImportsCheck`
when building the package. Since there are no tests, we rely on [`pythonImportsCheck`](#using-pythonimportscheck)
to test whether the package can be imported. Furthermore, we specify some meta
information. The output of the function is a derivation.
@ -918,13 +932,18 @@ with import <nixpkgs> {};
my_toolz = python311.pkgs.buildPythonPackage rec {
pname = "toolz";
version = "0.10.0";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
};
nativeBuildInputs = [
python311.pkgs.setuptools
python311.pkgs.wheel
];
# has no tests
doCheck = false;
@ -949,7 +968,7 @@ for which Python version we want to build a package.
So, what did we do here? Well, we took the Nix expression that we used earlier
to build a Python environment, and said that we wanted to include our own
version of `toolz`, named `my_toolz`. To introduce our own package in the scope
of `withPackages` we used a `let` expression. You can see that we used
of [`withPackages`](#python.withpackages-function) we used a `let` expression. You can see that we used
`ps.numpy` to select numpy from the nixpkgs package set (`ps`). We did not take
`toolz` from the Nixpkgs package set this time, but instead took our own version
that we introduced with the `let` expression.
@ -957,14 +976,14 @@ that we introduced with the `let` expression.
#### Handling dependencies {#handling-dependencies}
Our example, `toolz`, does not have any dependencies on other Python packages or
system libraries. According to the manual, `buildPythonPackage` uses the
arguments `buildInputs` and `propagatedBuildInputs` to specify dependencies. If
system libraries. According to the manual, [`buildPythonPackage`](#buildpythonpackage-function) uses the
arguments [`buildInputs`](#var-stdenv-buildInputs) and [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs) to specify dependencies. If
something is exclusively a build-time dependency, then the dependency should be
included in `buildInputs`, but if it is (also) a runtime dependency, then it
should be added to `propagatedBuildInputs`. Test dependencies are considered
build-time dependencies and passed to `nativeCheckInputs`.
included in [`buildInputs`](#var-stdenv-buildInputs), but if it is (also) a runtime dependency, then it
should be added to [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs). Test dependencies are considered
build-time dependencies and passed to [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs).
The following example shows which arguments are given to `buildPythonPackage` in
The following example shows which arguments are given to [`buildPythonPackage`](#buildpythonpackage-function) in
order to build [`datashape`](https://github.com/blaze/datashape).
```nix
@ -972,6 +991,9 @@ order to build [`datashape`](https://github.com/blaze/datashape).
, buildPythonPackage
, fetchPypi
# build dependencies
, setuptools, wheel
# dependencies
, numpy, multipledispatch, python-dateutil
@ -982,13 +1004,18 @@ order to build [`datashape`](https://github.com/blaze/datashape).
buildPythonPackage rec {
pname = "datashape";
version = "0.4.7";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-FLLvdm1MllKrgTGC6Gb0k0deZeVYvtCCLji/B7uhong=";
};
nativeBuildInputs = [
setuptools
wheel
];
propagatedBuildInputs = [
multipledispatch
numpy
@ -1010,19 +1037,21 @@ buildPythonPackage rec {
```
We can see several runtime dependencies, `numpy`, `multipledispatch`, and
`python-dateutil`. Furthermore, we have `nativeCheckInputs` with `pytest`.
`pytest` is a test runner and is only used during the `checkPhase` and is
therefore not added to `propagatedBuildInputs`.
`python-dateutil`. Furthermore, we have [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs) with `pytest`.
`pytest` is a test runner and is only used during the [`checkPhase`](#ssec-check-phase) and is
therefore not added to [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs).
In the previous case we had only dependencies on other Python packages to consider.
Occasionally you have also system libraries to consider. E.g., `lxml` provides
Python bindings to `libxml2` and `libxslt`. These libraries are only required
when building the bindings and are therefore added as `buildInputs`.
when building the bindings and are therefore added as [`buildInputs`](#var-stdenv-buildInputs).
```nix
{ lib
, buildPythonPackage
, fetchPypi
, setuptools
, wheel
, libxml2
, libxslt
}:
@ -1030,13 +1059,18 @@ when building the bindings and are therefore added as `buildInputs`.
buildPythonPackage rec {
pname = "lxml";
version = "3.4.4";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-s9NiusRxFydHzaNRMjjxFcvWxfi45jGb9ql6eJJyQJk=";
};
nativeBuildInputs = [
setuptools
wheel
];
buildInputs = [
libxml2
libxslt
@ -1058,7 +1092,7 @@ files of the dependencies are. This is not always the case.
The example below shows bindings to The Fastest Fourier Transform in the West,
commonly known as FFTW. On Nix we have separate packages of FFTW for the
different types of floats (`"single"`, `"double"`, `"long-double"`). The
bindings need all three types, and therefore we add all three as `buildInputs`.
bindings need all three types, and therefore we add all three as [`buildInputs`](#var-stdenv-buildInputs).
The bindings don't expect to find each of them in a different folder, and
therefore we have to set `LDFLAGS` and `CFLAGS`.
@ -1067,6 +1101,10 @@ therefore we have to set `LDFLAGS` and `CFLAGS`.
, buildPythonPackage
, fetchPypi
# build dependencies
, setuptools
, wheel
# dependencies
, fftw
, fftwFloat
@ -1078,13 +1116,18 @@ therefore we have to set `LDFLAGS` and `CFLAGS`.
buildPythonPackage rec {
pname = "pyFFTW";
version = "0.9.2";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-9ru2r6kwhUCaskiFoaPNuJCfCVoUL01J40byvRt4kHQ=";
};
nativeBuildInputs = [
setuptools
wheel
];
buildInputs = [
fftw
fftwFloat
@ -1114,7 +1157,7 @@ buildPythonPackage rec {
}
```
Note also the line `doCheck = false;`, we explicitly disabled running the test-suite.
Note also the line [`doCheck = false;`](#var-stdenv-doCheck), we explicitly disabled running the test-suite.
#### Testing Python Packages {#testing-python-packages}
@ -1123,10 +1166,10 @@ helps to avoid situations where the package was able to build and install,
but is not usable at runtime. Currently, all packages will use the `test`
command provided by the setup.py (i.e. `python setup.py test`). However,
this is currently deprecated https://github.com/pypa/setuptools/pull/1878
and your package should provide its own checkPhase.
and your package should provide its own [`checkPhase`](#ssec-check-phase).
::: {.note}
The `checkPhase` for python maps to the `installCheckPhase` on a
The [`checkPhase`](#ssec-check-phase) for python maps to the `installCheckPhase` on a
normal derivation. This is due to many python packages not behaving well
to the pre-installed version of the package. Version info, and natively
compiled extensions generally only exist in the install directory, and
@ -1191,7 +1234,7 @@ been removed, in this case, it's recommended to use `pytestCheckHook`.
#### Using pytestCheckHook {#using-pytestcheckhook}
`pytestCheckHook` is a convenient hook which will substitute the setuptools
`test` command for a `checkPhase` which runs `pytest`. This is also beneficial
`test` command for a [`checkPhase`](#ssec-check-phase) which runs `pytest`. This is also beneficial
when a package may need many items disabled to run the test suite.
Using the example above, the analogous `pytestCheckHook` usage would be:
@ -1236,14 +1279,14 @@ for example:
```
Trying to concatenate the related strings to disable tests in a regular
`checkPhase` would be much harder to read. This also enables us to comment on
[`checkPhase`](#ssec-check-phase) would be much harder to read. This also enables us to comment on
why specific tests are disabled.
#### Using pythonImportsCheck {#using-pythonimportscheck}
Although unit tests are highly preferred to validate correctness of a package, not
all packages have test suites that can be run easily, and some have none at all.
To help ensure the package still works, `pythonImportsCheck` can attempt to import
To help ensure the package still works, [`pythonImportsCheck`](#using-pythonimportscheck) can attempt to import
the listed modules.
```
@ -1262,7 +1305,7 @@ roughly translates to:
'';
```
However, this is done in its own phase, and not dependent on whether `doCheck = true;`.
However, this is done in its own phase, and not dependent on whether [`doCheck = true;`](#var-stdenv-doCheck).
This can also be useful in verifying that the package doesn't assume commonly
present packages (e.g. `setuptools`).
@ -1334,13 +1377,11 @@ instead of a dev dependency).
Keep in mind that while the examples above are done with `requirements.txt`,
`pythonRelaxDepsHook` works by modifying the resulting wheel file, so it should
work in any of the formats supported by `buildPythonPackage` currently,
with the exception of `other` (see `format` in
[`buildPythonPackage` parameters](#buildpythonpackage-parameters) for more details).
work with any of the [existing hooks](#setup-hooks).
#### Using unittestCheckHook {#using-unittestcheckhook}
`unittestCheckHook` is a hook which will substitute the setuptools `test` command for a `checkPhase` which runs `python -m unittest discover`:
`unittestCheckHook` is a hook which will substitute the setuptools `test` command for a [`checkPhase`](#ssec-check-phase) which runs `python -m unittest discover`:
```
nativeCheckInputs = [
@ -1410,15 +1451,15 @@ mode is also available. Let's see how you can use it.
In the previous Nix expression the source was fetched from a url. We can also
refer to a local source instead using `src = ./path/to/source/tree;`
If we create a `shell.nix` file which calls `buildPythonPackage`, and if `src`
If we create a `shell.nix` file which calls [`buildPythonPackage`](#buildpythonpackage-function), and if `src`
is a local source, and if the local source has a `setup.py`, then development
mode is activated.
In the following example, we create a simple environment that has a Python 3.11
version of our package in it, as well as its dependencies and other packages we
like to have in the environment, all specified with `propagatedBuildInputs`.
like to have in the environment, all specified with [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs).
Indeed, we can just add any package we like to have in our environment to
`propagatedBuildInputs`.
[`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs).
```nix
with import <nixpkgs> {};
@ -1452,7 +1493,7 @@ own packages. The important functions here are `import` and `callPackage`.
### Including a derivation using `callPackage` {#including-a-derivation-using-callpackage}
Earlier we created a Python environment using `withPackages`, and included the
Earlier we created a Python environment using [`withPackages`](#python.withpackages-function), and included the
`toolz` package via a `let` expression.
Let's split the package definition from the environment definition.
@ -1461,18 +1502,26 @@ We first create a function that builds `toolz` in `~/path/to/toolz/release.nix`
```nix
{ lib
, buildPythonPackage
, fetchPypi
, setuptools
, wheel
}:
buildPythonPackage rec {
pname = "toolz";
version = "0.10.0";
format = "setuptools";
pyproject = true;
src = fetchPypi {
inherit pname version;
hash = "sha256-CP3V73yWSArRHBLUct4hrNMjWZlvaaUlkpm1QP66RWA=";
};
nativeBuildInputs = [
setuptools
wheel
];
meta = with lib; {
changelog = "https://github.com/pytoolz/toolz/releases/tag/${version}";
homepage = "https://github.com/pytoolz/toolz/";
@ -1483,7 +1532,7 @@ buildPythonPackage rec {
}
```
It takes an argument `buildPythonPackage`. We now call this function using
It takes an argument [`buildPythonPackage`](#buildpythonpackage-function). We now call this function using
`callPackage` in the definition of our environment
```nix
@ -1502,10 +1551,10 @@ Packages.buildPythonPackage;
```
Important to remember is that the Python version for which the package is made
depends on the `python` derivation that is passed to `buildPythonPackage`. Nix
depends on the `python` derivation that is passed to [`buildPythonPackage`](#buildpythonpackage-function). Nix
tries to automatically pass arguments when possible, which is why generally you
don't explicitly define which `python` derivation should be used. In the above
example we use `buildPythonPackage` that is part of the set `python3Packages`,
example we use [`buildPythonPackage`](#buildpythonpackage-function) that is part of the set `python3Packages`,
and in this case the `python3` interpreter is automatically used.
## FAQ {#faq}
@ -1648,7 +1697,7 @@ Python, guarantees the right versions of the interpreter and libraries or
packages are available. There is therefore no need to maintain a global `site-packages`.
If you want to create a Python environment for development, then the recommended
method is to use `nix-shell`, either with or without the `python.buildEnv`
method is to use `nix-shell`, either with or without the [`python.buildEnv`](#python.buildenv-function)
function.
### How to consume Python modules using pip in a virtual environment like I am used to on other Operating Systems? {#how-to-consume-python-modules-using-pip-in-a-virtual-environment-like-i-am-used-to-on-other-operating-systems}
@ -1825,7 +1874,7 @@ self: super: {
### How to override a Python package for all Python versions using extensions? {#how-to-override-a-python-package-for-all-python-versions-using-extensions}
The following overlay overrides the call to `buildPythonPackage` for the
The following overlay overrides the call to [`buildPythonPackage`](#buildpythonpackage-function) for the
`foo` package for all interpreters by appending a Python extension to the
`pythonPackagesExtensions` list of extensions.
@ -1852,9 +1901,9 @@ configure alternatives](#sec-overlays-alternatives-blas-lapack)".
In a `setup.py` or `setup.cfg` it is common to declare dependencies:
* `setup_requires` corresponds to `nativeBuildInputs`
* `install_requires` corresponds to `propagatedBuildInputs`
* `tests_require` corresponds to `nativeCheckInputs`
* `setup_requires` corresponds to [`nativeBuildInputs`](#var-stdenv-nativeBuildInputs)
* `install_requires` corresponds to [`propagatedBuildInputs`](#var-stdenv-propagatedBuildInputs)
* `tests_require` corresponds to [`nativeCheckInputs`](#var-stdenv-nativeCheckInputs)
### How to enable interpreter optimizations? {#optimizations}
@ -1901,7 +1950,7 @@ collisions.
### How to contribute a Python package to nixpkgs? {#tools}
Packages inside nixpkgs must use the `buildPythonPackage` or `buildPythonApplication` function directly,
Packages inside nixpkgs must use the [`buildPythonPackage`](#buildpythonpackage-function) or [`buildPythonApplication`](#buildpythonapplication-function) function directly,
because we can only provide security support for non-vendored dependencies.
We recommend [nix-init](https://github.com/nix-community/nix-init) for creating new python packages within nixpkgs,
@ -1915,7 +1964,7 @@ has security implications and is relevant for those using Python in a
`nix-shell`.
When the environment variable `DETERMINISTIC_BUILD` is set, all bytecode will
have timestamp 1. The `buildPythonPackage` function sets `DETERMINISTIC_BUILD=1`
have timestamp 1. The [`buildPythonPackage`](#buildpythonpackage-function) function sets `DETERMINISTIC_BUILD=1`
and [PYTHONHASHSEED=0](https://docs.python.org/3.11/using/cmdline.html#envvar-PYTHONHASHSEED).
Both are also exported in `nix-shell`.
@ -1925,12 +1974,12 @@ It is recommended to test packages as part of the build process.
Source distributions (`sdist`) often include test files, but not always.
By default the command `python setup.py test` is run as part of the
`checkPhase`, but often it is necessary to pass a custom `checkPhase`. An
[`checkPhase`](#ssec-check-phase), but often it is necessary to pass a custom [`checkPhase`](#ssec-check-phase). An
example of such a situation is when `py.test` is used.
#### Common issues {#common-issues}
* Non-working tests can often be deselected. By default `buildPythonPackage`
* Non-working tests can often be deselected. By default [`buildPythonPackage`](#buildpythonpackage-function)
runs `python setup.py test`. which is deprecated. Most Python modules however
do follow the standard test protocol where the pytest runner can be used
instead. `pytest` supports the `-k` and `--ignore` parameters to ignore test
@ -1965,10 +2014,10 @@ example of such a situation is when `py.test` is used.
The following rules are desired to be respected:
* Python libraries are called from `python-packages.nix` and packaged with
`buildPythonPackage`. The expression of a library should be in
[`buildPythonPackage`](#buildpythonpackage-function). The expression of a library should be in
`pkgs/development/python-modules/<name>/default.nix`.
* Python applications live outside of `python-packages.nix` and are packaged
with `buildPythonApplication`.
with [`buildPythonApplication`](#buildpythonapplication-function).
* Make sure libraries build for all Python interpreters.
* By default we enable tests. Make sure the tests are found and, in the case of
libraries, are passing for all interpreters. If certain tests fail they can be

View file

@ -32,7 +32,8 @@ Again, it's possible to launch the interpreter from the shell. The Ruby interpre
#### Load Ruby environment from `.nix` expression {#load-ruby-environment-from-.nix-expression}
As explained in the Nix manual, `nix-shell` can also load an expression from a `.nix` file. Say we want to have Ruby 2.6, `nokogori`, and `pry`. Consider a `shell.nix` file with:
As explained [in the `nix-shell` section](https://nixos.org/manual/nix/stable/command-ref/nix-shell) of the Nix manual, `nix-shell` can also load an expression from a `.nix` file.
Say we want to have Ruby 2.6, `nokogori`, and `pry`. Consider a `shell.nix` file with:
```nix
with import <nixpkgs> {};
@ -120,6 +121,16 @@ One common issue that you might have is that you have Ruby 2.6, but also `bundle
mkShell { buildInputs = [ gems (lowPrio gems.wrappedRuby) ]; }
```
Sometimes a Gemfile references other files. Such as `.ruby-version` or vendored gems. When copying the Gemfile to the nix store we need to copy those files alongside. This can be done using `extraConfigPaths`. For example:
```nix
gems = bundlerEnv {
name = "gems-for-some-project";
gemdir = ./.;
extraConfigPaths = [ "${./.}/.ruby-version" ];
};
```
### Gem-specific configurations and workarounds {#gem-specific-configurations-and-workarounds}
In some cases, especially if the gem has native extensions, you might need to modify the way the gem is built.

View file

@ -102,7 +102,7 @@ rustPlatform.buildRustPackage rec {
src = fetchCrate {
inherit pname version;
sha256 = "sha256-aDQA4A5mScX9or3Lyiv/5GyAehidnpKKE0grhbP1Ctc=";
hash = "sha256-aDQA4A5mScX9or3Lyiv/5GyAehidnpKKE0grhbP1Ctc=";
};
cargoHash = "sha256-tbrTbutUs5aPSV+yE0IBUZAAytgmZV7Eqxia7g+9zRs=";

View file

@ -134,7 +134,7 @@ If one of your favourite plugins isn't packaged, you can package it yourself:
{ config, pkgs, ... }:
let
easygrep = pkgs.vimUtils.buildVimPluginFrom2Nix {
easygrep = pkgs.vimUtils.buildVimPlugin {
name = "vim-easygrep";
src = pkgs.fetchFromGitHub {
owner = "dkprice";
@ -212,9 +212,9 @@ Note: this is not possible anymore for Neovim.
## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`nix-shell -p vimPluginsUpdater --run vim-plugins-updater`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/updater.nix). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
After running `./update.py`, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
After running the updater, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
@ -241,7 +241,8 @@ GITHUB_API_TOKEN=my_token ./pkgs/applications/editors/vim/plugins/update.py
Alternatively, set the number of processes to a lower count to avoid rate-limiting.
```sh
./pkgs/applications/editors/vim/plugins/update.py --proc 1
nix-shell -p vimPluginsUpdater --run 'vim-plugins-updater --proc 1'
```
## How to maintain an out-of-tree overlay of vim plugins ? {#vim-out-of-tree-overlays}
@ -250,7 +251,7 @@ You can use the updater script to generate basic packages out of a custom vim
plugin list:
```
pkgs/applications/editors/vim/plugins/update.py -i vim-plugin-names -o generated.nix --no-commit
nix-shell -p vimPluginsUpdater --run vim-plugins-updater -i vim-plugin-names -o generated.nix --no-commit
```
with the contents of `vim-plugin-names` being for example:
@ -264,7 +265,7 @@ You can then reference the generated vim plugins via:
```nix
myVimPlugins = pkgs.vimPlugins.extend (
(pkgs.callPackage generated.nix {})
(pkgs.callPackage ./generated.nix {})
);
```

View file

@ -2,7 +2,7 @@
The Nix Packages collection (Nixpkgs) is a set of thousands of packages for the
[Nix package manager](https://nixos.org/nix/), released under a
[permissive MIT/X11 license](https://github.com/NixOS/nixpkgs/blob/master/COPYING).
[permissive MIT license](https://github.com/NixOS/nixpkgs/blob/master/COPYING).
Packages are available for several platforms, and can be used with the Nix
package manager on most GNU/Linux distributions as well as [NixOS](https://nixos.org/nixos).

View file

@ -24,7 +24,8 @@ It is expected that each meta-attribute is one of the following:
### `description` {#var-meta-description}
A short (one-line) description of the package. This is shown by `nix-env -q --description` and also on the Nixpkgs release pages.
A short (one-line) description of the package.
This is displayed on [search.nixos.org](https://search.nixos.org/packages).
Dont include a period at the end. Dont include newline characters. Capitalise the first character. For brevity, dont repeat the name of package --- just describe what it does.
@ -74,7 +75,7 @@ The name of the main binary for the package. This affects the binary `nix run` e
### `priority` {#var-meta-priority}
The *priority* of the package, used by `nix-env` to resolve file name conflicts between packages. See the Nix manual page for `nix-env` for details. Example: `"10"` (a low-priority package).
The *priority* of the package, used by `nix-env` to resolve file name conflicts between packages. See the [manual page for `nix-env`](https://nixos.org/manual/nix/stable/command-ref/nix-env) for details. Example: `"10"` (a low-priority package).
### `platforms` {#var-meta-platforms}

View file

@ -1,7 +1,5 @@
# Multiple-output packages {#chap-multiple-output}
## Introduction {#sec-multiple-outputs-introduction}
The Nix language allows a derivation to produce multiple outputs, which is similar to what is utilized by other Linux distribution packaging systems. The outputs reside in separate Nix store paths, so they can be mostly handled independently of each other, including passing to build inputs, garbage collection or binary substitution. The exception is that building from source always produces all the outputs.
The main motivation is to save disk space by reducing runtime closure sizes; consequently also sizes of substituted binaries get reduced. Splitting can be used to have more granular runtime dependencies, for example the typical reduction is to split away development-only files, as those are typically not needed during runtime. As a result, closure sizes of many packages can get reduced to a half or even much less.
@ -10,44 +8,12 @@ The main motivation is to save disk space by reducing runtime closure sizes; con
The reduction effects could be instead achieved by building the parts in completely separate derivations. That would often additionally reduce build-time closures, but it tends to be much harder to write such derivations, as build systems typically assume all parts are being built at once. This compromise approach of single source package producing multiple binary packages is also utilized often by rpm and deb.
:::
A number of attributes can be used to work with a derivation with multiple outputs. The attribute `outputs` is a list of strings, which are the names of the outputs. For each of these names, an identically named attribute is created, corresponding to that output. The attribute `meta.outputsToInstall` is used to determine the default set of outputs to install when using the derivation name unqualified.
A number of attributes can be used to work with a derivation with multiple outputs.
The attribute `outputs` is a list of strings, which are the names of the outputs.
For each of these names, an identically named attribute is created, corresponding to that output.
## Installing a split package {#sec-multiple-outputs-installing}
When installing a package with multiple outputs, the packages `meta.outputsToInstall` attribute determines which outputs are actually installed. `meta.outputsToInstall` is a list whose [default installs binaries and the associated man pages](https://github.com/NixOS/nixpkgs/blob/f1680774340d5443a1409c3421ced84ac1163ba9/pkgs/stdenv/generic/make-derivation.nix#L310-L320). The following sections describe ways to install different outputs.
### Selecting outputs to install via NixOS {#sec-multiple-outputs-installing-nixos}
NixOS provides two ways to select the outputs to install for packages listed in `environment.systemPackages`:
- The configuration option `environment.extraOutputsToInstall` is appended to each packages `meta.outputsToInstall` attribute to determine the outputs to install. It can for example be used to install `info` documentation or debug symbols for all packages.
- The outputs can be listed as packages in `environment.systemPackages`. For example, the `"out"` and `"info"` outputs for the `coreutils` package can be installed by including `coreutils` and `coreutils.info` in `environment.systemPackages`.
### Selecting outputs to install via `nix-env` {#sec-multiple-outputs-installing-nix-env}
`nix-env` lacks an easy way to select the outputs to install. When installing a package, `nix-env` always installs the outputs listed in `meta.outputsToInstall`, even when the user explicitly selects an output.
::: {.warning}
`nix-env` silently disregards the outputs selected by the user, and instead installs the outputs from `meta.outputsToInstall`. For example,
```ShellSession
$ nix-env -iA nixpkgs.coreutils.info
```
installs the `"out"` output (`coreutils.meta.outputsToInstall` is `[ "out" ]`) instead of the requested `"info"`.
:::
The only recourse to select an output with `nix-env` is to override the packages `meta.outputsToInstall`, using the functions described in [](#chap-overrides). For example, the following overlay adds the `"info"` output for the `coreutils` package:
```nix
self: super:
{
coreutils = super.coreutils.overrideAttrs (oldAttrs: {
meta = oldAttrs.meta // { outputsToInstall = oldAttrs.meta.outputsToInstall or [ "out" ] ++ [ "info" ]; };
});
}
```
The attribute `meta.outputsToInstall` is used to determine the [default set of outputs to install](https://github.com/NixOS/nixpkgs/blob/08c3198f1c6fd89a09f8f0ea09b425028a34de3e/pkgs/stdenv/generic/check-meta.nix#L411-L426) when using the derivation name unqualified:
`bin`, or `out`, or the first specified output; as well as `man` if that is specified.
## Using a split package {#sec-multiple-outputs-using-split-packages}

View file

@ -180,7 +180,7 @@ stdenv.mkDerivation rec {
src = fetchurl {
url = "https://github.com/Solo5/solo5/releases/download/v${version}/solo5-v${version}.tar.gz";
sha256 = "sha256-viwrS9lnaU8sTGuzK/+L/PlMM/xRRtgVuK5pixVeDEw=";
hash = "sha256-viwrS9lnaU8sTGuzK/+L/PlMM/xRRtgVuK5pixVeDEw=";
};
nativeBuildInputs = [ makeWrapper pkg-config ];
@ -937,6 +937,28 @@ Like `stripDebugList`, but only applies to packages target platform. By defau
Flags passed to the `strip` command applied to the files in the directories listed in `stripDebugList`. Defaults to `-S` (i.e. `--strip-debug`).
##### `stripExclude` {#var-stdenv-stripExclude}
A list of filenames or path patterns to avoid stripping. A file is excluded if its name _or_ path (from the derivation root) matches.
This example prevents all `*.rlib` files from being stripped:
```nix
stdenv.mkDerivation {
# ...
stripExclude = [ "*.rlib" ]
}
```
This example prevents files within certain paths from being stripped:
```nix
stdenv.mkDerivation {
# ...
stripExclude = [ "lib/modules/*/build/* ]
}
```
##### `dontPatchELF` {#var-stdenv-dontPatchELF}
If set, the `patchelf` command is not used to remove unnecessary `RPATH` entries. Only applies to Linux.
@ -969,13 +991,56 @@ Hook executed at the end of the fixup phase.
If set to `true`, the standard environment will enable debug information in C/C++ builds. After installation, the debug information will be separated from the executables and stored in the output named `debug`. (This output is enabled automatically; you dont need to set the `outputs` attribute explicitly.) To be precise, the debug information is stored in `debug/lib/debug/.build-id/XX/YYYY…`, where \<XXYYYY…\> is the \<build ID\> of the binary — a SHA-1 hash of the contents of the binary. Debuggers like GDB use the build ID to look up the separated debug information.
For example, with GDB, you can add
:::{.example #ex-gdb-debug-symbols-socat}
```
set debug-file-directory ~/.nix-profile/lib/debug
# Enable debug symbols for use with GDB
To make GDB find debug information for the `socat` package and its dependencies, you can use the following `shell.nix`:
```nix
let
pkgs = import ./. {
config = {};
overlays = [
(final: prev: {
ncurses = prev.ncurses.overrideAttrs { separateDebugInfo = true; };
readline = prev.readline.overrideAttrs { separateDebugInfo = true; };
})
];
};
myDebugInfoDirs = pkgs.symlinkJoin {
name = "myDebugInfoDirs";
paths = with pkgs; [
glibc.debug
ncurses.debug
openssl.debug
readline.debug
];
};
in
pkgs.mkShell {
NIX_DEBUG_INFO_DIRS = "${pkgs.lib.getLib myDebugInfoDirs}/lib/debug";
packages = [
pkgs.gdb
pkgs.socat
];
shellHook = ''
${pkgs.lib.getBin pkgs.gdb}/bin/gdb ${pkgs.lib.getBin pkgs.socat}/bin/socat
'';
}
```
to `~/.gdbinit`. GDB will then be able to find debug information installed via `nix-env -i`.
This setup works as follows:
- Add [`overlays`](#chap-overlays) to the package set, since debug symbols are disabled for `ncurses` and `readline` by default.
- Create a derivation to combine all required debug symbols under one path with [`symlinkJoin`](#trivial-builder-symlinkJoin).
- Set the environment variable `NIX_DEBUG_INFO_DIRS` in the shell. Nixpkgs patches `gdb` to use it for looking up debug symbols.
- Run `gdb` on the `socat` binary on shell startup in the [`shellHook`](#sec-pkgs-mkShell). Here we use [`lib.getBin`](#function-library-lib.attrsets.getBin) to ensure that the correct derivation output is selected rather than the default one.
:::
### The installCheck phase {#ssec-installCheck-phase}
@ -1179,7 +1244,7 @@ someVar=$(stripHash $name)
### `wrapProgram` \<executable\> \<makeWrapperArgs\> {#fun-wrapProgram}
Convenience function for `makeWrapper` that replaces `<\executable\>` with a wrapper that executes the original program. It takes all the same arguments as `makeWrapper`, except for `--inherit-argv0` (used by the `makeBinaryWrapper` implementation) and `--argv0` (used by both `makeWrapper` and `makeBinaryWrapper` wrapper implementations).
Convenience function for `makeWrapper` that replaces `<executable>` with a wrapper that executes the original program. It takes all the same arguments as `makeWrapper`, except for `--inherit-argv0` (used by the `makeBinaryWrapper` implementation) and `--argv0` (used by both `makeWrapper` and `makeBinaryWrapper` wrapper implementations).
If you will apply it multiple times, it will overwrite the wrapper file and you will end up with double wrapping, which should be avoided.
@ -1456,7 +1521,7 @@ This flag can break dynamic shared object loading. For instance, the module syst
#### `bindnow` {#bindnow}
Adds the `-z bindnow` linker option. During program load, all dynamic symbols are resolved, allowing for the complete GOT to be marked read-only (due to `relro`). This prevents GOT overwrite attacks. For very large applications, this can incur some performance loss during initial load while symbols are resolved, but this shouldnt be an issue for daemons.
Adds the `-z now` linker option. During program load, all dynamic symbols are resolved, allowing for the complete GOT to be marked read-only (due to `relro`). This prevents GOT overwrite attacks. For very large applications, this can incur some performance loss during initial load while symbols are resolved, but this shouldnt be an issue for daemons.
This flag can break dynamic shared object loading. For instance, the module systems of Xorg and PHP are incompatible with this flag. Programs incompatible with this flag often fail at runtime due to missing symbols, like:

View file

@ -1,6 +1,7 @@
# Using Nixpkgs {#part-using}
```{=include=} chapters
using/platform-support.chapter.md
using/configuration.chapter.md
using/overlays.chapter.md
using/overrides.chapter.md

View file

@ -24,7 +24,7 @@ The list of overlays is determined as follows.
2. Otherwise, if the Nix path entry `<nixpkgs-overlays>` exists, we look for overlays at that path, as described below.
See the section on `NIX_PATH` in the Nix manual for more details on how to set a value for `<nixpkgs-overlays>.`
See the [section on `NIX_PATH`](https://nixos.org/manual/nix/stable/command-ref/env-common.html#env-NIX_PATH) in the Nix manual for more details on how to set a value for `<nixpkgs-overlays>.`
3. If one of `~/.config/nixpkgs/overlays.nix` and `~/.config/nixpkgs/overlays/` exists, then we look for overlays at that path, as described below. It is an error if both exist.

View file

@ -0,0 +1,18 @@
# Platform Support {#chap-platform-support}
Packages receive varying degrees of support, both in terms of maintainer attention and available computation resources for continuous integration (CI).
Below is the list of the best supported platforms:
- `x86_64-linux`: Highest level of support.
- `aarch64-linux`: Well supported, with most packages building successfully in CI.
- `aarch64-darwin`: Receives better support than `x86_64-darwin`.
- `x86_64-darwin`: Receives some support.
There are many other platforms with varying levels of support.
The provisional platform list in [Appendix A] of [RFC046], while not up to date, can be used as guidance.
A more formal definition of the platform support tiers is provided in [RFC046], but has not been fully implemented yet.
[RFC046]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md
[Appendix A]: https://github.com/NixOS/rfcs/blob/master/rfcs/0046-platform-support-tiers.md#appendix-a-non-normative-description-of-platforms-in-november-2019

View file

@ -70,4 +70,7 @@ tests/filesystem.sh
# Run the lib.path property tests
path/tests/prop.sh
# Run the lib.fileset tests
fileset/tests.sh
```

View file

@ -338,7 +338,7 @@ rec {
);
/*
Like builtins.foldl' but for attribute sets.
Like [`lib.lists.foldl'`](#function-library-lib.lists.foldl-prime) but for attribute sets.
Iterates over every name-value pair in the given attribute set.
The result of the callback function is often called `acc` for accumulator. It is passed between callbacks from left to right and the final `acc` is the return value of `foldlAttrs`.
@ -372,9 +372,9 @@ rec {
123
foldlAttrs
(_: _: v: v)
(throw "initial accumulator not needed")
{ z = 3; a = 2; };
(acc: _: _: acc)
3
{ z = throw "value not needed"; a = throw "value not needed"; };
->
3

View file

@ -19,7 +19,7 @@ rec {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
hash = "sha256-MxBJRcM2rYzQYwJ5XKxhXTQByvSg5jZc5cSHEZoB2IY=";
};
patches = [];
});

View file

@ -41,6 +41,7 @@ let
# serialization
cli = callLibs ./cli.nix;
gvariant = callLibs ./gvariant.nix;
generators = callLibs ./generators.nix;
# misc
@ -54,6 +55,7 @@ let
# Eval-time filesystem handling
path = callLibs ./path;
filesystem = callLibs ./filesystem.nix;
fileset = callLibs ./fileset;
sources = callLibs ./sources.nix;
# back-compat aliases
@ -104,6 +106,7 @@ let
upperChars toLower toUpper addContextFrom splitString
removePrefix removeSuffix versionOlder versionAtLeast
getName getVersion
cmakeOptionType cmakeBool cmakeFeature
mesonOption mesonBool mesonEnable
nameFromURL enableFeature enableFeatureAs withFeature
withFeatureAs fixedWidthString fixedWidthNumber

View file

@ -0,0 +1,216 @@
# File set library
This is the internal contributor documentation.
The user documentation is [in the Nixpkgs manual](https://nixos.org/manual/nixpkgs/unstable/#sec-fileset).
## Goals
The main goal of the file set library is to be able to select local files that should be added to the Nix store.
It should have the following properties:
- Easy:
The functions should have obvious semantics, be low in number and be composable.
- Safe:
Throw early and helpful errors when mistakes are detected.
- Lazy:
Only compute values when necessary.
Non-goals are:
- Efficient:
If the abstraction proves itself worthwhile but too slow, it can be still be optimized further.
## Tests
Tests are declared in [`tests.sh`](./tests.sh) and can be run using
```
./tests.sh
```
## Benchmark
A simple benchmark against the HEAD commit can be run using
```
./benchmark.sh HEAD
```
This is intended to be run manually and is not checked by CI.
## Internal representation
The internal representation is versioned in order to allow file sets from different Nixpkgs versions to be composed with each other, see [`internal.nix`](./internal.nix) for the versions and conversions between them.
This section describes only the current representation, but past versions will have to be supported by the code.
### `fileset`
An attribute set with these values:
- `_type` (constant string `"fileset"`):
Tag to indicate this value is a file set.
- `_internalVersion` (constant `3`, the current version):
Version of the representation.
- `_internalIsEmptyWithoutBase` (bool):
Whether this file set is the empty file set without a base path.
If `true`, `_internalBase*` and `_internalTree` are not set.
This is the only way to represent an empty file set without needing a base path.
Such a value can be used as the identity element for `union` and the return value of `unions []` and co.
- `_internalBase` (path):
Any files outside of this path cannot influence the set of files.
This is always a directory.
- `_internalBaseRoot` (path):
The filesystem root of `_internalBase`, same as `(lib.path.splitRoot _internalBase).root`.
This is here because this needs to be computed anyway, and this computation shouldn't be duplicated.
- `_internalBaseComponents` (list of strings):
The path components of `_internalBase`, same as `lib.path.subpath.components (lib.path.splitRoot _internalBase).subpath`.
This is here because this needs to be computed anyway, and this computation shouldn't be duplicated.
- `_internalTree` ([filesetTree](#filesettree)):
A tree representation of all included files under `_internalBase`.
- `__noEval` (error):
An error indicating that directly evaluating file sets is not supported.
## `filesetTree`
One of the following:
- `{ <name> = filesetTree; }`:
A directory with a nested `filesetTree` value for directory entries.
Entries not included may either be omitted or set to `null`, as necessary to improve efficiency or laziness.
- `"directory"`:
A directory with all its files included recursively, allowing early cutoff for some operations.
This specific string is chosen to be compatible with `builtins.readDir` for a simpler implementation.
- `"regular"`, `"symlink"`, `"unknown"` or any other non-`"directory"` string:
A nested file with its file type.
These specific strings are chosen to be compatible with `builtins.readDir` for a simpler implementation.
Distinguishing between different file types is not strictly necessary for the functionality this library,
but it does allow nicer printing of file sets.
- `null`:
A file or directory that is excluded from the tree.
It may still exist on the file system.
## API design decisions
This section justifies API design decisions.
### Internal structure
The representation of the file set data type is internal and can be changed over time.
Arguments:
- (+) The point of this library is to provide high-level functions, users don't need to be concerned with how it's implemented
- (+) It allows adjustments to the representation, which is especially useful in the early days of the library.
- (+) It still allows the representation to be stabilized later if necessary and if it has proven itself
### Influence tracking
File set operations internally track the top-most directory that could influence the exact contents of a file set.
Specifically, `toSource` requires that the given `fileset` is completely determined by files within the directory specified by the `root` argument.
For example, even with `dir/file.txt` being the only file in `./.`, `toSource { root = ./dir; fileset = ./.; }` gives an error.
This is because `fileset` may as well be the result of filtering `./.` in a way that excludes `dir`.
Arguments:
- (+) This gives us the guarantee that adding new files to a project never breaks a file set expression.
This is also true in a lesser form for removed files:
only removing files explicitly referenced by paths can break a file set expression.
- (+) This can be removed later, if we discover it's too restrictive
- (-) It leads to errors when a sensible result could sometimes be returned, such as in the above example.
### Empty file set without a base
There is a special representation for an empty file set without a base path.
This is used for return values that should be empty but when there's no base path that would makes sense.
Arguments:
- Alternative: This could also be represented using `_internalBase = /.` and `_internalTree = null`.
- (+) Removes the need for a special representation.
- (-) Due to [influence tracking](#influence-tracking),
`union empty ./.` would have `/.` as the base path,
which would then prevent `toSource { root = ./.; fileset = union empty ./.; }` from working,
which is not as one would expect.
- (-) With the assumption that there can be multiple filesystem roots (as established with the [path library](../path/README.md)),
this would have to cause an error with `union empty pathWithAnotherFilesystemRoot`,
which is not as one would expect.
- Alternative: Do not have such a value and error when it would be needed as a return value
- (+) Removes the need for a special representation.
- (-) Leaves us with no identity element for `union` and no reasonable return value for `unions []`.
From a set theory perspective, which has a well-known notion of empty sets, this is unintuitive.
### Empty directories
File sets can only represent a _set_ of local files, directories on their own are not representable.
Arguments:
- (+) There does not seem to be a sensible set of combinators when directories can be represented on their own.
Here's some possibilities:
- `./.` represents the files in `./.` _and_ the directory itself including its subdirectories, meaning that even if there's no files, the entire structure of `./.` is preserved
In that case, what should `fileFilter (file: false) ./.` return?
It could return the entire directory structure unchanged, but with all files removed, which would not be what one would expect.
Trying to have a filter function that also supports directories will lead to the question of:
What should the behavior be if `./foo` itself is excluded but all of its contents are included?
It leads to having to define when directories are recursed into, but then we're effectively back at how the `builtins.path`-based filters work.
- `./.` represents all files in `./.` _and_ the directory itself, but not its subdirectories, meaning that at least `./.` will be preserved even if it's empty.
In that case, `intersect ./. ./foo` should only include files and no directories themselves, since `./.` includes only `./.` as a directory, and same for `./foo`, so there's no overlap in directories.
But intuitively this operation should result in the same as `./foo` everything else is just confusing.
- (+) This matches how Git only supports files, so developers should already be used to it.
- (-) Empty directories (even if they contain nested directories) are neither representable nor preserved when coercing from paths.
- (+) It is very rare that empty directories are necessary.
- (+) We can implement a workaround, allowing `toSource` to take an extra argument for ensuring certain extra directories exist in the result.
- (-) It slows down store imports, since the evaluator needs to traverse the entire tree to remove any empty directories
- (+) This can still be optimized by introducing more Nix builtins if necessary
### String paths
File sets do not support Nix store paths in strings such as `"/nix/store/...-source"`.
Arguments:
- (+) Such paths are usually produced by derivations, which means `toSource` would either:
- Require IFD if `builtins.path` is used as the underlying primitive
- Require importing the entire `root` into the store such that derivations can be used to do the filtering
- (+) The convenient path coercion like `union ./foo ./bar` wouldn't work for absolute paths, requiring more verbose alternate interfaces:
- `let root = "/nix/store/...-source"; in union "${root}/foo" "${root}/bar"`
Verbose and dangerous because if `root` was a path, the entire path would get imported into the store.
- `toSource { root = "/nix/store/...-source"; fileset = union "./foo" "./bar"; }`
Does not allow debug printing intermediate file set contents, since we don't know the paths contents before having a `root`.
- `let fs = lib.fileset.withRoot "/nix/store/...-source"; in fs.union "./foo" "./bar"`
Makes library functions impure since they depend on the contextual root path, questionable composability.
- (+) The point of the file set abstraction is to specify which files should get imported into the store.
This use case makes little sense for files that are already in the store.
This should be a separate abstraction as e.g. `pkgs.drvLayout` instead, which could have a similar interface but be specific to derivations.
Additional capabilities could be supported that can't be done at evaluation time, such as renaming files, creating new directories, setting executable bits, etc.
### Single files
File sets cannot add single files to the store, they can only import files under directories.
Arguments:
- (+) There's no point in using this library for a single file, since you can't do anything other than add it to the store or not.
And it would be unclear how the library should behave if the one file wouldn't be added to the store:
`toSource { root = ./file.nix; fileset = <empty>; }` has no reasonable result because returing an empty store path wouldn't match the file type, and there's no way to have an empty file store path, whatever that would mean.
## To update in the future
Here's a list of places in the library that need to be updated in the future:
- > The file set library is currently somewhat limited but is being expanded to include more functions over time.
in [the manual](../../doc/functions/fileset.section.md)
- If/Once a function to convert `lib.sources` values into file sets exists, the `_coerce` and `toSource` functions should be updated to mention that function in the error when such a value is passed
- If/Once a function exists that can optionally include a path depending on whether it exists, the error message for the path not existing in `_coerce` should mention the new function

140
third_party/nixpkgs/lib/fileset/benchmark.sh vendored Executable file
View file

@ -0,0 +1,140 @@
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p sta jq bc nix -I nixpkgs=../..
# shellcheck disable=SC2016
# Benchmarks lib.fileset
# Run:
# [nixpkgs]$ lib/fileset/benchmark.sh HEAD
set -euo pipefail
shopt -s inherit_errexit dotglob
if (( $# == 0 )); then
echo "Usage: $0 HEAD"
echo "Benchmarks the current tree against the HEAD commit. Any git ref will work."
exit 1
fi
compareTo=$1
SCRIPT_FILE=$(readlink -f "${BASH_SOURCE[0]}")
SCRIPT_DIR=$(dirname "$SCRIPT_FILE")
nixpkgs=$(cd "$SCRIPT_DIR/../.."; pwd)
tmp="$(mktemp -d)"
clean_up() {
rm -rf "$tmp"
}
trap clean_up EXIT SIGINT SIGTERM
work="$tmp/work"
mkdir "$work"
cd "$work"
declare -a stats=(
".envs.elements"
".envs.number"
".gc.totalBytes"
".list.concats"
".list.elements"
".nrFunctionCalls"
".nrLookups"
".nrOpUpdates"
".nrPrimOpCalls"
".nrThunks"
".sets.elements"
".sets.number"
".symbols.number"
".values.number"
)
runs=10
run() {
# Empty the file
: > cpuTimes
for i in $(seq 0 "$runs"); do
NIX_PATH=nixpkgs=$1 NIX_SHOW_STATS=1 NIX_SHOW_STATS_PATH=$tmp/stats.json \
nix-instantiate --eval --strict --show-trace >/dev/null \
--expr 'with import <nixpkgs/lib>; with fileset; '"$2"
# Only measure the time after the first run, one is warmup
if (( i > 0 )); then
jq '.cpuTime' "$tmp/stats.json" >> cpuTimes
fi
done
# Compute mean and standard deviation
read -r mean sd < <(sta --mean --sd --brief <cpuTimes)
jq --argjson mean "$mean" --argjson sd "$sd" \
'.cpuTimeMean = $mean | .cpuTimeSd = $sd' \
"$tmp/stats.json"
}
bench() {
echo "Benchmarking expression $1" >&2
#echo "Running benchmark on index" >&2
run "$nixpkgs" "$1" > "$tmp/new.json"
(
#echo "Checking out $compareTo" >&2
git -C "$nixpkgs" worktree add --quiet "$tmp/worktree" "$compareTo"
trap 'git -C "$nixpkgs" worktree remove "$tmp/worktree"' EXIT
#echo "Running benchmark on $compareTo" >&2
run "$tmp/worktree" "$1" > "$tmp/old.json"
)
read -r oldMean oldSd newMean newSd percentageMean percentageSd < \
<(jq -rn --slurpfile old "$tmp/old.json" --slurpfile new "$tmp/new.json" \
' $old[0].cpuTimeMean as $om
| $old[0].cpuTimeSd as $os
| $new[0].cpuTimeMean as $nm
| $new[0].cpuTimeSd as $ns
| (100 / $om * $nm) as $pm
# Copied from https://github.com/sharkdp/hyperfine/blob/b38d550b89b1dab85139eada01c91a60798db9cc/src/benchmark/relative_speed.rs#L46-L53
| ($pm * pow(pow($ns / $nm; 2) + pow($os / $om; 2); 0.5)) as $ps
| [ $om, $os, $nm, $ns, $pm, $ps ]
| @sh')
echo -e "Mean CPU time $newMean (σ = $newSd) for $runs runs is \e[0;33m$percentageMean% (σ = $percentageSd%)\e[0m of the old value $oldMean (σ = $oldSd)" >&2
different=0
for stat in "${stats[@]}"; do
oldValue=$(jq "$stat" "$tmp/old.json")
newValue=$(jq "$stat" "$tmp/new.json")
if (( oldValue != newValue )); then
percent=$(bc <<< "scale=100; result = 100/$oldValue*$newValue; scale=4; result / 1")
if (( oldValue < newValue )); then
echo -e "Statistic $stat ($newValue) is \e[0;31m$percent% (+$(( newValue - oldValue )))\e[0m of the old value $oldValue" >&2
else
echo -e "Statistic $stat ($newValue) is \e[0;32m$percent% (-$(( oldValue - newValue )))\e[0m of the old value $oldValue" >&2
fi
(( different++ )) || true
fi
done
echo "$different stats differ between the current tree and $compareTo"
echo ""
}
# Create a fairly populated tree
touch f{0..5}
mkdir d{0..5}
mkdir e{0..5}
touch d{0..5}/f{0..5}
mkdir -p d{0..5}/d{0..5}
mkdir -p e{0..5}/e{0..5}
touch d{0..5}/d{0..5}/f{0..5}
mkdir -p d{0..5}/d{0..5}/d{0..5}
mkdir -p e{0..5}/e{0..5}/e{0..5}
touch d{0..5}/d{0..5}/d{0..5}/f{0..5}
mkdir -p d{0..5}/d{0..5}/d{0..5}/d{0..5}
mkdir -p e{0..5}/e{0..5}/e{0..5}/e{0..5}
touch d{0..5}/d{0..5}/d{0..5}/d{0..5}/f{0..5}
bench 'toSource { root = ./.; fileset = ./.; }'
rm -rf -- *
touch {0..1000}
bench 'toSource { root = ./.; fileset = unions (mapAttrsToList (name: value: ./. + "/${name}") (builtins.readDir ./.)); }'
rm -rf -- *

View file

@ -0,0 +1,368 @@
{ lib }:
let
inherit (import ./internal.nix { inherit lib; })
_coerce
_coerceMany
_toSourceFilter
_unionMany
_printFileset
;
inherit (builtins)
isList
isPath
pathExists
seq
typeOf
;
inherit (lib.lists)
imap0
;
inherit (lib.path)
hasPrefix
splitRoot
;
inherit (lib.strings)
isStringLike
;
inherit (lib.filesystem)
pathType
;
inherit (lib.sources)
cleanSourceWith
;
inherit (lib.trivial)
pipe
;
in {
/*
Add the local files contained in `fileset` to the store as a single [store path](https://nixos.org/manual/nix/stable/glossary#gloss-store-path) rooted at `root`.
The result is the store path as a string-like value, making it usable e.g. as the `src` of a derivation, or in string interpolation:
```nix
stdenv.mkDerivation {
src = lib.fileset.toSource { ... };
# ...
}
```
The name of the store path is always `source`.
Type:
toSource :: {
root :: Path,
fileset :: FileSet,
} -> SourceLike
Example:
# Import the current directory into the store
# but only include files under ./src
toSource {
root = ./.;
fileset = ./src;
}
=> "/nix/store/...-source"
# Import the current directory into the store
# but only include ./Makefile and all files under ./src
toSource {
root = ./.;
fileset = union
./Makefile
./src;
}
=> "/nix/store/...-source"
# Trying to include a file outside the root will fail
toSource {
root = ./.;
fileset = unions [
./Makefile
./src
../LICENSE
];
}
=> <error>
# The root needs to point to a directory that contains all the files
toSource {
root = ../.;
fileset = unions [
./Makefile
./src
../LICENSE
];
}
=> "/nix/store/...-source"
# The root has to be a local filesystem path
toSource {
root = "/nix/store/...-source";
fileset = ./.;
}
=> <error>
*/
toSource = {
/*
(required) The local directory [path](https://nixos.org/manual/nix/stable/language/values.html#type-path) that will correspond to the root of the resulting store path.
Paths in [strings](https://nixos.org/manual/nix/stable/language/values.html#type-string), including Nix store paths, cannot be passed as `root`.
`root` has to be a directory.
<!-- Ignore the indentation here, this is a nixdoc rendering bug that needs to be fixed: https://github.com/nix-community/nixdoc/issues/75 -->
:::{.note}
Changing `root` only affects the directory structure of the resulting store path, it does not change which files are added to the store.
The only way to change which files get added to the store is by changing the `fileset` attribute.
:::
*/
root,
/*
(required) The file set whose files to import into the store.
File sets can be created using other functions in this library.
This argument can also be a path,
which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
<!-- Ignore the indentation here, this is a nixdoc rendering bug that needs to be fixed: https://github.com/nix-community/nixdoc/issues/75 -->
:::{.note}
If a directory does not recursively contain any file, it is omitted from the store path contents.
:::
*/
fileset,
}:
let
# We cannot rename matched attribute arguments, so let's work around it with an extra `let in` statement
filesetArg = fileset;
in
let
fileset = _coerce "lib.fileset.toSource: `fileset`" filesetArg;
rootFilesystemRoot = (splitRoot root).root;
filesetFilesystemRoot = (splitRoot fileset._internalBase).root;
sourceFilter = _toSourceFilter fileset;
in
if ! isPath root then
if isStringLike root then
throw ''
lib.fileset.toSource: `root` ("${toString root}") is a string-like value, but it should be a path instead.
Paths in strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.''
else
throw ''
lib.fileset.toSource: `root` is of type ${typeOf root}, but it should be a path instead.''
# Currently all Nix paths have the same filesystem root, but this could change in the future.
# See also ../path/README.md
else if ! fileset._internalIsEmptyWithoutBase && rootFilesystemRoot != filesetFilesystemRoot then
throw ''
lib.fileset.toSource: Filesystem roots are not the same for `fileset` and `root` ("${toString root}"):
`root`: root "${toString rootFilesystemRoot}"
`fileset`: root "${toString filesetFilesystemRoot}"
Different roots are not supported.''
else if ! pathExists root then
throw ''
lib.fileset.toSource: `root` (${toString root}) does not exist.''
else if pathType root != "directory" then
throw ''
lib.fileset.toSource: `root` (${toString root}) is a file, but it should be a directory instead. Potential solutions:
- If you want to import the file into the store _without_ a containing directory, use string interpolation or `builtins.path` instead of this function.
- If you want to import the file into the store _with_ a containing directory, set `root` to the containing directory, such as ${toString (dirOf root)}, and set `fileset` to the file path.''
else if ! fileset._internalIsEmptyWithoutBase && ! hasPrefix root fileset._internalBase then
throw ''
lib.fileset.toSource: `fileset` could contain files in ${toString fileset._internalBase}, which is not under the `root` (${toString root}). Potential solutions:
- Set `root` to ${toString fileset._internalBase} or any directory higher up. This changes the layout of the resulting store path.
- Set `fileset` to a file set that cannot contain files outside the `root` (${toString root}). This could change the files included in the result.''
else
builtins.seq sourceFilter
cleanSourceWith {
name = "source";
src = root;
filter = sourceFilter;
};
/*
The file set containing all files that are in either of two given file sets.
This is the same as [`unions`](#function-library-lib.fileset.unions),
but takes just two file sets instead of a list.
See also [Union (set theory)](https://en.wikipedia.org/wiki/Union_(set_theory)).
The given file sets are evaluated as lazily as possible,
with the first argument being evaluated first if needed.
Type:
union :: FileSet -> FileSet -> FileSet
Example:
# Create a file set containing the file `Makefile`
# and all files recursively in the `src` directory
union ./Makefile ./src
# Create a file set containing the file `Makefile`
# and the LICENSE file from the parent directory
union ./Makefile ../LICENSE
*/
union =
# The first file set.
# This argument can also be a path,
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
fileset1:
# The second file set.
# This argument can also be a path,
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
fileset2:
_unionMany
(_coerceMany "lib.fileset.union" [
{
context = "first argument";
value = fileset1;
}
{
context = "second argument";
value = fileset2;
}
]);
/*
The file set containing all files that are in any of the given file sets.
This is the same as [`union`](#function-library-lib.fileset.unions),
but takes a list of file sets instead of just two.
See also [Union (set theory)](https://en.wikipedia.org/wiki/Union_(set_theory)).
The given file sets are evaluated as lazily as possible,
with earlier elements being evaluated first if needed.
Type:
unions :: [ FileSet ] -> FileSet
Example:
# Create a file set containing selected files
unions [
# Include the single file `Makefile` in the current directory
# This errors if the file doesn't exist
./Makefile
# Recursively include all files in the `src/code` directory
# If this directory is empty this has no effect
./src/code
# Include the files `run.sh` and `unit.c` from the `tests` directory
./tests/run.sh
./tests/unit.c
# Include the `LICENSE` file from the parent directory
../LICENSE
]
*/
unions =
# A list of file sets.
# The elements can also be paths,
# which get [implicitly coerced to file sets](#sec-fileset-path-coercion).
filesets:
if ! isList filesets then
throw "lib.fileset.unions: Expected argument to be a list, but got a ${typeOf filesets}."
else
pipe filesets [
# Annotate the elements with context, used by _coerceMany for better errors
(imap0 (i: el: {
context = "element ${toString i}";
value = el;
}))
(_coerceMany "lib.fileset.unions")
_unionMany
];
/*
Incrementally evaluate and trace a file set in a pretty way.
This function is only intended for debugging purposes.
The exact tracing format is unspecified and may change.
This function takes a final argument to return.
In comparison, [`traceVal`](#function-library-lib.fileset.traceVal) returns
the given file set argument.
This variant is useful for tracing file sets in the Nix repl.
Type:
trace :: FileSet -> Any -> Any
Example:
trace (unions [ ./Makefile ./src ./tests/run.sh ]) null
=>
trace: /home/user/src/myProject
trace: - Makefile (regular)
trace: - src (all files in directory)
trace: - tests
trace: - run.sh (regular)
null
*/
trace =
/*
The file set to trace.
This argument can also be a path,
which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
*/
fileset:
let
# "fileset" would be a better name, but that would clash with the argument name,
# and we cannot change that because of https://github.com/nix-community/nixdoc/issues/76
actualFileset = _coerce "lib.fileset.trace: argument" fileset;
in
seq
(_printFileset actualFileset)
(x: x);
/*
Incrementally evaluate and trace a file set in a pretty way.
This function is only intended for debugging purposes.
The exact tracing format is unspecified and may change.
This function returns the given file set.
In comparison, [`trace`](#function-library-lib.fileset.trace) takes another argument to return.
This variant is useful for tracing file sets passed as arguments to other functions.
Type:
traceVal :: FileSet -> FileSet
Example:
toSource {
root = ./.;
fileset = traceVal (unions [
./Makefile
./src
./tests/run.sh
]);
}
=>
trace: /home/user/src/myProject
trace: - Makefile (regular)
trace: - src (all files in directory)
trace: - tests
trace: - run.sh (regular)
"/nix/store/...-source"
*/
traceVal =
/*
The file set to trace and return.
This argument can also be a path,
which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
*/
fileset:
let
# "fileset" would be a better name, but that would clash with the argument name,
# and we cannot change that because of https://github.com/nix-community/nixdoc/issues/76
actualFileset = _coerce "lib.fileset.traceVal: argument" fileset;
in
seq
(_printFileset actualFileset)
# We could also return the original fileset argument here,
# but that would then duplicate work for consumers of the fileset, because then they have to coerce it again
actualFileset;
}

View file

@ -0,0 +1,536 @@
{ lib ? import ../. }:
let
inherit (builtins)
isAttrs
isPath
isString
pathExists
readDir
seq
split
trace
typeOf
;
inherit (lib.attrsets)
attrNames
attrValues
mapAttrs
setAttrByPath
zipAttrsWith
;
inherit (lib.filesystem)
pathType
;
inherit (lib.lists)
all
commonPrefix
drop
elemAt
filter
findFirst
findFirstIndex
foldl'
head
length
sublist
tail
;
inherit (lib.path)
append
splitRoot
;
inherit (lib.path.subpath)
components
join
;
inherit (lib.strings)
isStringLike
concatStringsSep
substring
stringLength
;
in
# Rare case of justified usage of rec:
# - This file is internal, so the return value doesn't matter, no need to make things overridable
# - The functions depend on each other
# - We want to expose all of these functions for easy testing
rec {
# If you change the internal representation, make sure to:
# - Increment this version
# - Add an additional migration function below
# - Update the description of the internal representation in ./README.md
_currentVersion = 3;
# Migrations between versions. The 0th element converts from v0 to v1, and so on
migrations = [
# Convert v0 into v1: Add the _internalBase{Root,Components} attributes
(
filesetV0:
let
parts = splitRoot filesetV0._internalBase;
in
filesetV0 // {
_internalVersion = 1;
_internalBaseRoot = parts.root;
_internalBaseComponents = components parts.subpath;
}
)
# Convert v1 into v2: filesetTree's can now also omit attributes to signal paths not being included
(
filesetV1:
# This change is backwards compatible (but not forwards compatible, so we still need a new version)
filesetV1 // {
_internalVersion = 2;
}
)
# Convert v2 into v3: filesetTree's now have a representation for an empty file set without a base path
(
filesetV2:
filesetV2 // {
# All v1 file sets are not the new empty file set
_internalIsEmptyWithoutBase = false;
_internalVersion = 3;
}
)
];
_noEvalMessage = ''
lib.fileset: Directly evaluating a file set is not supported.
To turn it into a usable source, use `lib.fileset.toSource`.
To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'';
# The empty file set without a base path
_emptyWithoutBase = {
_type = "fileset";
_internalVersion = _currentVersion;
# The one and only!
_internalIsEmptyWithoutBase = true;
# Due to alphabetical ordering, this is evaluated last,
# which makes the nix repl output nicer than if it would be ordered first.
# It also allows evaluating it strictly up to this error, which could be useful
_noEval = throw _noEvalMessage;
};
# Create a fileset, see ./README.md#fileset
# Type: path -> filesetTree -> fileset
_create = base: tree:
let
# Decompose the base into its components
# See ../path/README.md for why we're not just using `toString`
parts = splitRoot base;
in
{
_type = "fileset";
_internalVersion = _currentVersion;
_internalIsEmptyWithoutBase = false;
_internalBase = base;
_internalBaseRoot = parts.root;
_internalBaseComponents = components parts.subpath;
_internalTree = tree;
# Due to alphabetical ordering, this is evaluated last,
# which makes the nix repl output nicer than if it would be ordered first.
# It also allows evaluating it strictly up to this error, which could be useful
_noEval = throw _noEvalMessage;
};
# Coerce a value to a fileset, erroring when the value cannot be coerced.
# The string gives the context for error messages.
# Type: String -> (fileset | Path) -> fileset
_coerce = context: value:
if value._type or "" == "fileset" then
if value._internalVersion > _currentVersion then
throw ''
${context} is a file set created from a future version of the file set library with a different internal representation:
- Internal version of the file set: ${toString value._internalVersion}
- Internal version of the library: ${toString _currentVersion}
Make sure to update your Nixpkgs to have a newer version of `lib.fileset`.''
else if value._internalVersion < _currentVersion then
let
# Get all the migration functions necessary to convert from the old to the current version
migrationsToApply = sublist value._internalVersion (_currentVersion - value._internalVersion) migrations;
in
foldl' (value: migration: migration value) value migrationsToApply
else
value
else if ! isPath value then
if isStringLike value then
throw ''
${context} ("${toString value}") is a string-like value, but it should be a path instead.
Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.''
else
throw ''
${context} is of type ${typeOf value}, but it should be a path instead.''
else if ! pathExists value then
throw ''
${context} (${toString value}) does not exist.''
else
_singleton value;
# Coerce many values to filesets, erroring when any value cannot be coerced,
# or if the filesystem root of the values doesn't match.
# Type: String -> [ { context :: String, value :: fileset | Path } ] -> [ fileset ]
_coerceMany = functionContext: list:
let
filesets = map ({ context, value }:
_coerce "${functionContext}: ${context}" value
) list;
# Find the first value with a base, there may be none!
firstWithBase = findFirst (fileset: ! fileset._internalIsEmptyWithoutBase) null filesets;
# This value is only accessed if first != null
firstBaseRoot = firstWithBase._internalBaseRoot;
# Finds the first element with a filesystem root different than the first element, if any
differentIndex = findFirstIndex (fileset:
# The empty value without a base doesn't have a base path
! fileset._internalIsEmptyWithoutBase
&& firstBaseRoot != fileset._internalBaseRoot
) null filesets;
in
# Only evaluates `differentIndex` if there are any elements with a base
if firstWithBase != null && differentIndex != null then
throw ''
${functionContext}: Filesystem roots are not the same:
${(head list).context}: root "${toString firstBaseRoot}"
${(elemAt list differentIndex).context}: root "${toString (elemAt filesets differentIndex)._internalBaseRoot}"
Different roots are not supported.''
else
filesets;
# Create a file set from a path.
# Type: Path -> fileset
_singleton = path:
let
type = pathType path;
in
if type == "directory" then
_create path type
else
# This turns a file path ./default.nix into a fileset with
# - _internalBase: ./.
# - _internalTree: {
# "default.nix" = <type>;
# }
# See ./README.md#single-files
_create (dirOf path)
{
${baseNameOf path} = type;
};
# Expand a directory representation to an equivalent one in attribute set form.
# All directory entries are included in the result.
# Type: Path -> filesetTree -> { <name> = filesetTree; }
_directoryEntries = path: value:
if value == "directory" then
readDir path
else
# Set all entries not present to null
mapAttrs (name: value: null) (readDir path)
// value;
/*
A normalisation of a filesetTree suitable filtering with `builtins.path`:
- Replace all directories that have no files with `null`.
This removes directories that would be empty
- Replace all directories with all files with `"directory"`.
This speeds up the source filter function
Note that this function is strict, it evaluates the entire tree
Type: Path -> filesetTree -> filesetTree
*/
_normaliseTreeFilter = path: tree:
if tree == "directory" || isAttrs tree then
let
entries = _directoryEntries path tree;
normalisedSubtrees = mapAttrs (name: _normaliseTreeFilter (path + "/${name}")) entries;
subtreeValues = attrValues normalisedSubtrees;
in
# This triggers either when all files in a directory are filtered out
# Or when the directory doesn't contain any files at all
if all isNull subtreeValues then
null
# Triggers when we have the same as a `readDir path`, so we can turn it back into an equivalent "directory".
else if all isString subtreeValues then
"directory"
else
normalisedSubtrees
else
tree;
/*
A minimal normalisation of a filesetTree, intended for pretty-printing:
- If all children of a path are recursively included or empty directories, the path itself is also recursively included
- If all children of a path are fully excluded or empty directories, the path itself is an empty directory
- Other empty directories are represented with the special "emptyDir" string
While these could be replaced with `null`, that would take another mapAttrs
Note that this function is partially lazy.
Type: Path -> filesetTree -> filesetTree (with "emptyDir"'s)
*/
_normaliseTreeMinimal = path: tree:
if tree == "directory" || isAttrs tree then
let
entries = _directoryEntries path tree;
normalisedSubtrees = mapAttrs (name: _normaliseTreeMinimal (path + "/${name}")) entries;
subtreeValues = attrValues normalisedSubtrees;
in
# If there are no entries, or all entries are empty directories, return "emptyDir".
# After this branch we know that there's at least one file
if all (value: value == "emptyDir") subtreeValues then
"emptyDir"
# If all subtrees are fully included or empty directories
# (both of which are coincidentally represented as strings), return "directory".
# This takes advantage of the fact that empty directories can be represented as included directories.
# Note that the tree == "directory" check allows avoiding recursion
else if tree == "directory" || all (value: isString value) subtreeValues then
"directory"
# If all subtrees are fully excluded or empty directories, return null.
# This takes advantage of the fact that empty directories can be represented as excluded directories
else if all (value: isNull value || value == "emptyDir") subtreeValues then
null
# Mix of included and excluded entries
else
normalisedSubtrees
else
tree;
# Trace a filesetTree in a pretty way when the resulting value is evaluated.
# This can handle both normal filesetTree's, and ones returned from _normaliseTreeMinimal
# Type: Path -> filesetTree (with "emptyDir"'s) -> Null
_printMinimalTree = base: tree:
let
treeSuffix = tree:
if isAttrs tree then
""
else if tree == "directory" then
" (all files in directory)"
else
# This does "leak" the file type strings of the internal representation,
# but this is the main reason these file type strings even are in the representation!
# TODO: Consider removing that information from the internal representation for performance.
# The file types can still be printed by querying them only during tracing
" (${tree})";
# Only for attribute set trees
traceTreeAttrs = prevLine: indent: tree:
foldl' (prevLine: name:
let
subtree = tree.${name};
# Evaluating this prints the line for this subtree
thisLine =
trace "${indent}- ${name}${treeSuffix subtree}" prevLine;
in
if subtree == null || subtree == "emptyDir" then
# Don't print anything at all if this subtree is empty
prevLine
else if isAttrs subtree then
# A directory with explicit entries
# Do print this node, but also recurse
traceTreeAttrs thisLine "${indent} " subtree
else
# Either a file, or a recursively included directory
# Do print this node but no further recursion needed
thisLine
) prevLine (attrNames tree);
# Evaluating this will print the first line
firstLine =
if tree == null || tree == "emptyDir" then
trace "(empty)" null
else
trace "${toString base}${treeSuffix tree}" null;
in
if isAttrs tree then
traceTreeAttrs firstLine "" tree
else
firstLine;
# Pretty-print a file set in a pretty way when the resulting value is evaluated
# Type: fileset -> Null
_printFileset = fileset:
if fileset._internalIsEmptyWithoutBase then
trace "(empty)" null
else
_printMinimalTree fileset._internalBase
(_normaliseTreeMinimal fileset._internalBase fileset._internalTree);
# Turn a fileset into a source filter function suitable for `builtins.path`
# Only directories recursively containing at least one files are recursed into
# Type: Path -> fileset -> (String -> String -> Bool)
_toSourceFilter = fileset:
let
# Simplify the tree, necessary to make sure all empty directories are null
# which has the effect that they aren't included in the result
tree = _normaliseTreeFilter fileset._internalBase fileset._internalTree;
# The base path as a string with a single trailing slash
baseString =
if fileset._internalBaseComponents == [] then
# Need to handle the filesystem root specially
"/"
else
"/" + concatStringsSep "/" fileset._internalBaseComponents + "/";
baseLength = stringLength baseString;
# Check whether a list of path components under the base path exists in the tree.
# This function is called often, so it should be fast.
# Type: [ String ] -> Bool
inTree = components:
let
recurse = index: localTree:
if isAttrs localTree then
# We have an attribute set, meaning this is a directory with at least one file
if index >= length components then
# The path may have no more components though, meaning the filter is running on the directory itself,
# so we always include it, again because there's at least one file in it.
true
else
# If we do have more components, the filter runs on some entry inside this directory, so we need to recurse
# We do +2 because builtins.split is an interleaved list of the inbetweens and the matches
recurse (index + 2) localTree.${elemAt components index}
else
# If it's not an attribute set it can only be either null (in which case it's not included)
# or a string ("directory" or "regular", etc.) in which case it's included
localTree != null;
in recurse 0 tree;
# Filter suited when there's no files
empty = _: _: false;
# Filter suited when there's some files
# This can't be used for when there's no files, because the base directory is always included
nonEmpty =
path: _:
let
# Add a slash to the path string, turning "/foo" to "/foo/",
# making sure to not have any false prefix matches below.
# Note that this would produce "//" for "/",
# but builtins.path doesn't call the filter function on the `path` argument itself,
# meaning this function can never receive "/" as an argument
pathSlash = path + "/";
in
# Same as `hasPrefix pathSlash baseString`, but more efficient.
# With base /foo/bar we need to include /foo:
# hasPrefix "/foo/" "/foo/bar/"
if substring 0 (stringLength pathSlash) baseString == pathSlash then
true
# Same as `! hasPrefix baseString pathSlash`, but more efficient.
# With base /foo/bar we need to exclude /baz
# ! hasPrefix "/baz/" "/foo/bar/"
else if substring 0 baseLength pathSlash != baseString then
false
else
# Same as `removePrefix baseString path`, but more efficient.
# From the above code we know that hasPrefix baseString pathSlash holds, so this is safe.
# We don't use pathSlash here because we only needed the trailing slash for the prefix matching.
# With base /foo and path /foo/bar/baz this gives
# inTree (split "/" (removePrefix "/foo/" "/foo/bar/baz"))
# == inTree (split "/" "bar/baz")
# == inTree [ "bar" "baz" ]
inTree (split "/" (substring baseLength (-1) path));
in
# Special case because the code below assumes that the _internalBase is always included in the result
# which shouldn't be done when we have no files at all in the base
# This also forces the tree before returning the filter, leads to earlier error messages
if fileset._internalIsEmptyWithoutBase || tree == null then
empty
else
nonEmpty;
# Computes the union of a list of filesets.
# The filesets must already be coerced and validated to be in the same filesystem root
# Type: [ Fileset ] -> Fileset
_unionMany = filesets:
let
# All filesets that have a base, aka not the ones that are the empty value without a base
filesetsWithBase = filter (fileset: ! fileset._internalIsEmptyWithoutBase) filesets;
# The first fileset that has a base.
# This value is only accessed if there are at all.
firstWithBase = head filesetsWithBase;
# To be able to union filesetTree's together, they need to have the same base path.
# Base paths can be unioned by taking their common prefix,
# e.g. such that `union /foo/bar /foo/baz` has the base path `/foo`
# A list of path components common to all base paths.
# Note that commonPrefix can only be fully evaluated,
# so this cannot cause a stack overflow due to a build-up of unevaluated thunks.
commonBaseComponents = foldl'
(components: el: commonPrefix components el._internalBaseComponents)
firstWithBase._internalBaseComponents
# We could also not do the `tail` here to avoid a list allocation,
# but then we'd have to pay for a potentially expensive
# but unnecessary `commonPrefix` call
(tail filesetsWithBase);
# The common base path assembled from a filesystem root and the common components
commonBase = append firstWithBase._internalBaseRoot (join commonBaseComponents);
# A list of filesetTree's that all have the same base path
# This is achieved by nesting the trees into the components they have over the common base path
# E.g. `union /foo/bar /foo/baz` has the base path /foo
# So the tree under `/foo/bar` gets nested under `{ bar = ...; ... }`,
# while the tree under `/foo/baz` gets nested under `{ baz = ...; ... }`
# Therefore allowing combined operations over them.
trees = map (fileset:
setAttrByPath
(drop (length commonBaseComponents) fileset._internalBaseComponents)
fileset._internalTree
) filesetsWithBase;
# Folds all trees together into a single one using _unionTree
# We do not use a fold here because it would cause a thunk build-up
# which could cause a stack overflow for a large number of trees
resultTree = _unionTrees trees;
in
# If there's no values with a base, we have no files
if filesetsWithBase == [ ] then
_emptyWithoutBase
else
_create commonBase resultTree;
# The union of multiple filesetTree's with the same base path.
# Later elements are only evaluated if necessary.
# Type: [ filesetTree ] -> filesetTree
_unionTrees = trees:
let
stringIndex = findFirstIndex isString null trees;
withoutNull = filter (tree: tree != null) trees;
in
if stringIndex != null then
# If there's a string, it's always a fully included tree (dir or file),
# no need to look at other elements
elemAt trees stringIndex
else if withoutNull == [ ] then
# If all trees are null, then the resulting tree is also null
null
else
# The non-null elements have to be attribute sets representing partial trees
# We need to recurse into those
zipAttrsWith (name: _unionTrees) withoutNull;
}

View file

@ -0,0 +1,26 @@
# This overlay implements mocking of the lib.path.splitRoot function
# It pretends that the last component named "mock-root" is the root:
#
# splitRoot /foo/mock-root/bar/mock-root/baz
# => {
# root = /foo/mock-root/bar/mock-root;
# subpath = "./baz";
# }
self: super: {
path = super.path // {
splitRoot = path:
let
parts = super.path.splitRoot path;
components = self.path.subpath.components parts.subpath;
count = self.length components;
rootIndex = count - self.lists.findFirstIndex
(component: component == "mock-root")
(self.length components)
(self.reverseList components);
root = self.path.append parts.root (self.path.subpath.join (self.take rootIndex components));
subpath = self.path.subpath.join (self.drop rootIndex components);
in {
inherit root subpath;
};
};
}

733
third_party/nixpkgs/lib/fileset/tests.sh vendored Executable file
View file

@ -0,0 +1,733 @@
#!/usr/bin/env bash
# shellcheck disable=SC2016
# Tests lib.fileset
# Run:
# [nixpkgs]$ lib/fileset/tests.sh
# or:
# [nixpkgs]$ nix-build lib/tests/release.nix
set -euo pipefail
shopt -s inherit_errexit dotglob
die() {
# The second to last entry contains the line number of the top-level caller
lineIndex=$(( ${#BASH_LINENO[@]} - 2 ))
echo >&2 -e "test case at ${BASH_SOURCE[0]}:${BASH_LINENO[$lineIndex]} failed:" "$@"
exit 1
}
if test -n "${TEST_LIB:-}"; then
NIX_PATH=nixpkgs="$(dirname "$TEST_LIB")"
else
NIX_PATH=nixpkgs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.."; pwd)"
fi
export NIX_PATH
tmp="$(mktemp -d)"
clean_up() {
rm -rf "$tmp"
}
trap clean_up EXIT SIGINT SIGTERM
work="$tmp/work"
mkdir "$work"
cd "$work"
# Crudely unquotes a JSON string by just taking everything between the first and the second quote.
# We're only using this for resulting /nix/store paths, which can't contain " anyways,
# nor can they contain any other characters that would need to be escaped specially in JSON
# This way we don't need to add a dependency on e.g. jq
crudeUnquoteJSON() {
cut -d \" -f2
}
prefixExpression='let
lib = import <nixpkgs/lib>;
internal = import <nixpkgs/lib/fileset/internal.nix> {
inherit lib;
};
in
with lib;
with internal;
with lib.fileset;'
# Check that two nix expression successfully evaluate to the same value.
# The expressions have `lib.fileset` in scope.
# Usage: expectEqual NIX NIX
expectEqual() {
local actualExpr=$1
local expectedExpr=$2
if actualResult=$(nix-instantiate --eval --strict --show-trace 2>"$tmp"/actualStderr \
--expr "$prefixExpression ($actualExpr)"); then
actualExitCode=$?
else
actualExitCode=$?
fi
actualStderr=$(< "$tmp"/actualStderr)
if expectedResult=$(nix-instantiate --eval --strict --show-trace 2>"$tmp"/expectedStderr \
--expr "$prefixExpression ($expectedExpr)"); then
expectedExitCode=$?
else
expectedExitCode=$?
fi
expectedStderr=$(< "$tmp"/expectedStderr)
if [[ "$actualExitCode" != "$expectedExitCode" ]]; then
echo "$actualStderr" >&2
echo "$actualResult" >&2
die "$actualExpr should have exited with $expectedExitCode, but it exited with $actualExitCode"
fi
if [[ "$actualResult" != "$expectedResult" ]]; then
die "$actualExpr should have evaluated to $expectedExpr:\n$expectedResult\n\nbut it evaluated to\n$actualResult"
fi
if [[ "$actualStderr" != "$expectedStderr" ]]; then
die "$actualExpr should have had this on stderr:\n$expectedStderr\n\nbut it was\n$actualStderr"
fi
}
# Check that a nix expression evaluates successfully to a store path and returns it (without quotes).
# The expression has `lib.fileset` in scope.
# Usage: expectStorePath NIX
expectStorePath() {
local expr=$1
if ! result=$(nix-instantiate --eval --strict --json --read-write-mode --show-trace \
--expr "$prefixExpression ($expr)"); then
die "$expr failed to evaluate, but it was expected to succeed"
fi
# This is safe because we assume to get back a store path in a string
crudeUnquoteJSON <<< "$result"
}
# Check that a nix expression fails to evaluate (strictly, read-write-mode).
# And check the received stderr against a regex
# The expression has `lib.fileset` in scope.
# Usage: expectFailure NIX REGEX
expectFailure() {
local expr=$1
local expectedErrorRegex=$2
if result=$(nix-instantiate --eval --strict --read-write-mode --show-trace 2>"$tmp/stderr" \
--expr "$prefixExpression $expr"); then
die "$expr evaluated successfully to $result, but it was expected to fail"
fi
stderr=$(<"$tmp/stderr")
if [[ ! "$stderr" =~ $expectedErrorRegex ]]; then
die "$expr should have errored with this regex pattern:\n\n$expectedErrorRegex\n\nbut this was the actual error:\n\n$stderr"
fi
}
# Check that the traces of a Nix expression are as expected when evaluated.
# The expression has `lib.fileset` in scope.
# Usage: expectTrace NIX STR
expectTrace() {
local expr=$1
local expectedTrace=$2
nix-instantiate --eval --show-trace >/dev/null 2>"$tmp"/stderrTrace \
--expr "$prefixExpression trace ($expr)" || true
actualTrace=$(sed -n 's/^trace: //p' "$tmp/stderrTrace")
nix-instantiate --eval --show-trace >/dev/null 2>"$tmp"/stderrTraceVal \
--expr "$prefixExpression traceVal ($expr)" || true
actualTraceVal=$(sed -n 's/^trace: //p' "$tmp/stderrTraceVal")
# Test that traceVal returns the same trace as trace
if [[ "$actualTrace" != "$actualTraceVal" ]]; then
cat "$tmp"/stderrTrace >&2
die "$expr traced this for lib.fileset.trace:\n\n$actualTrace\n\nand something different for lib.fileset.traceVal:\n\n$actualTraceVal"
fi
if [[ "$actualTrace" != "$expectedTrace" ]]; then
cat "$tmp"/stderrTrace >&2
die "$expr should have traced this:\n\n$expectedTrace\n\nbut this was actually traced:\n\n$actualTrace"
fi
}
# We conditionally use inotifywait in withFileMonitor.
# Check early whether it's available
# TODO: Darwin support, though not crucial since we have Linux CI
if type inotifywait 2>/dev/null >/dev/null; then
canMonitor=1
else
echo "Warning: Cannot check for paths not getting read since the inotifywait command (from the inotify-tools package) is not available" >&2
canMonitor=
fi
# Run a function while monitoring that it doesn't read certain paths
# Usage: withFileMonitor FUNNAME PATH...
# - FUNNAME should be a bash function that:
# - Performs some operation that should not read some paths
# - Delete the paths it shouldn't read without triggering any open events
# - PATH... are the paths that should not get read
#
# This function outputs the same as FUNNAME
withFileMonitor() {
local funName=$1
shift
# If we can't monitor files or have none to monitor, just run the function directly
if [[ -z "$canMonitor" ]] || (( "$#" == 0 )); then
"$funName"
else
# Use a subshell to start the coprocess in and use a trap to kill it when exiting the subshell
(
# Assigned by coproc, makes shellcheck happy
local watcher watcher_PID
# Start inotifywait in the background to monitor all excluded paths
coproc watcher {
# inotifywait outputs a string on stderr when ready
# Redirect it to stdout so we can access it from the coproc's stdout fd
# exec so that the coprocess is inotify itself, making the kill below work correctly
# See below why we listen to both open and delete_self events
exec inotifywait --format='%e %w' --event open,delete_self --monitor "$@" 2>&1
}
# This will trigger when this subshell exits, no matter if successful or not
# After exiting the subshell, the parent shell will continue executing
trap 'kill "${watcher_PID}"' exit
# Synchronously wait until inotifywait is ready
while read -r -u "${watcher[0]}" line && [[ "$line" != "Watches established." ]]; do
:
done
# Call the function that should not read the given paths and delete them afterwards
"$funName"
# Get the first event
read -r -u "${watcher[0]}" event file
# With funName potentially reading files first before deleting them,
# there's only these two possible event timelines:
# - open*, ..., open*, delete_self, ..., delete_self: If some excluded paths were read
# - delete_self, ..., delete_self: If no excluded paths were read
# So by looking at the first event we can figure out which one it is!
# This also means we don't have to wait to collect all events.
case "$event" in
OPEN*)
die "$funName opened excluded file $file when it shouldn't have"
;;
DELETE_SELF)
# Expected events
;;
*)
die "During $funName, Unexpected event type '$event' on file $file that should be excluded"
;;
esac
)
fi
}
# Check whether a file set includes/excludes declared paths as expected, usage:
#
# tree=(
# [a/b] =1 # Declare that file a/b should exist and expect it to be included in the store path
# [c/a] = # Declare that file c/a should exist and expect it to be excluded in the store path
# [c/d/]= # Declare that directory c/d/ should exist and expect it to be excluded in the store path
# )
# checkFileset './a' # Pass the fileset as the argument
declare -A tree
checkFileset() {
# New subshell so that we can have a separate trap handler, see `trap` below
local fileset=$1
# Process the tree into separate arrays for included paths, excluded paths and excluded files.
local -a included=()
local -a excluded=()
local -a excludedFiles=()
# Track which paths need to be created
local -a dirsToCreate=()
local -a filesToCreate=()
for p in "${!tree[@]}"; do
# If keys end with a `/` we treat them as directories, otherwise files
if [[ "$p" =~ /$ ]]; then
dirsToCreate+=("$p")
isFile=
else
filesToCreate+=("$p")
isFile=1
fi
case "${tree[$p]}" in
1)
included+=("$p")
;;
0)
excluded+=("$p")
if [[ -n "$isFile" ]]; then
excludedFiles+=("$p")
fi
;;
*)
die "Unsupported tree value: ${tree[$p]}"
esac
done
# Create all the necessary paths.
# This is done with only a fixed number of processes,
# in order to not be too slow
# Though this does mean we're a bit limited with how many files can be created
if (( ${#dirsToCreate[@]} != 0 )); then
mkdir -p "${dirsToCreate[@]}"
fi
if (( ${#filesToCreate[@]} != 0 )); then
readarray -d '' -t parentsToCreate < <(dirname -z "${filesToCreate[@]}")
mkdir -p "${parentsToCreate[@]}"
touch "${filesToCreate[@]}"
fi
expression="toSource { root = ./.; fileset = $fileset; }"
# We don't have lambda's in bash unfortunately,
# so we just define a function instead and then pass its name
# shellcheck disable=SC2317
run() {
# Call toSource with the fileset, triggering open events for all files that are added to the store
expectStorePath "$expression"
if (( ${#excludedFiles[@]} != 0 )); then
rm "${excludedFiles[@]}"
fi
}
# Runs the function while checking that the given excluded files aren't read
storePath=$(withFileMonitor run "${excludedFiles[@]}")
# For each path that should be included, make sure it does occur in the resulting store path
for p in "${included[@]}"; do
if [[ ! -e "$storePath/$p" ]]; then
die "$expression doesn't include path $p when it should have"
fi
done
# For each path that should be excluded, make sure it doesn't occur in the resulting store path
for p in "${excluded[@]}"; do
if [[ -e "$storePath/$p" ]]; then
die "$expression included path $p when it shouldn't have"
fi
done
rm -rf -- *
}
#### Error messages #####
# Absolute paths in strings cannot be passed as `root`
expectFailure 'toSource { root = "/nix/store/foobar"; fileset = ./.; }' 'lib.fileset.toSource: `root` \("/nix/store/foobar"\) is a string-like value, but it should be a path instead.
\s*Paths in strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
# Only paths are accepted as `root`
expectFailure 'toSource { root = 10; fileset = ./.; }' 'lib.fileset.toSource: `root` is of type int, but it should be a path instead.'
# Different filesystem roots in root and fileset are not supported
mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./foo/mock-root; fileset = ./bar/mock-root; }
' 'lib.fileset.toSource: Filesystem roots are not the same for `fileset` and `root` \("'"$work"'/foo/mock-root"\):
\s*`root`: root "'"$work"'/foo/mock-root"
\s*`fileset`: root "'"$work"'/bar/mock-root"
\s*Different roots are not supported.'
rm -rf *
# `root` needs to exist
expectFailure 'toSource { root = ./a; fileset = ./.; }' 'lib.fileset.toSource: `root` \('"$work"'/a\) does not exist.'
# `root` needs to be a file
touch a
expectFailure 'toSource { root = ./a; fileset = ./a; }' 'lib.fileset.toSource: `root` \('"$work"'/a\) is a file, but it should be a directory instead. Potential solutions:
\s*- If you want to import the file into the store _without_ a containing directory, use string interpolation or `builtins.path` instead of this function.
\s*- If you want to import the file into the store _with_ a containing directory, set `root` to the containing directory, such as '"$work"', and set `fileset` to the file path.'
rm -rf *
# The fileset argument should be evaluated, even if the directory is empty
expectFailure 'toSource { root = ./.; fileset = abort "This should be evaluated"; }' 'evaluation aborted with the following error message: '\''This should be evaluated'\'
# Only paths under `root` should be able to influence the result
mkdir a
expectFailure 'toSource { root = ./a; fileset = ./.; }' 'lib.fileset.toSource: `fileset` could contain files in '"$work"', which is not under the `root` \('"$work"'/a\). Potential solutions:
\s*- Set `root` to '"$work"' or any directory higher up. This changes the layout of the resulting store path.
\s*- Set `fileset` to a file set that cannot contain files outside the `root` \('"$work"'/a\). This could change the files included in the result.'
rm -rf *
# Path coercion only works for paths
expectFailure 'toSource { root = ./.; fileset = 10; }' 'lib.fileset.toSource: `fileset` is of type int, but it should be a path instead.'
expectFailure 'toSource { root = ./.; fileset = "/some/path"; }' 'lib.fileset.toSource: `fileset` \("/some/path"\) is a string-like value, but it should be a path instead.
\s*Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
# Path coercion errors for non-existent paths
expectFailure 'toSource { root = ./.; fileset = ./a; }' 'lib.fileset.toSource: `fileset` \('"$work"'/a\) does not exist.'
# File sets cannot be evaluated directly
expectFailure 'union ./. ./.' 'lib.fileset: Directly evaluating a file set is not supported.
\s*To turn it into a usable source, use `lib.fileset.toSource`.
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
expectFailure '_emptyWithoutBase' 'lib.fileset: Directly evaluating a file set is not supported.
\s*To turn it into a usable source, use `lib.fileset.toSource`.
\s*To pretty-print the contents, use `lib.fileset.trace` or `lib.fileset.traceVal`.'
# Past versions of the internal representation are supported
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 0; _internalBase = ./.; }' \
'{ _internalBase = ./.; _internalBaseComponents = path.subpath.components (path.splitRoot ./.).subpath; _internalBaseRoot = /.; _internalIsEmptyWithoutBase = false; _internalVersion = 3; _type = "fileset"; }'
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 1; }' \
'{ _type = "fileset"; _internalIsEmptyWithoutBase = false; _internalVersion = 3; }'
expectEqual '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 2; }' \
'{ _type = "fileset"; _internalIsEmptyWithoutBase = false; _internalVersion = 3; }'
# Future versions of the internal representation are unsupported
expectFailure '_coerce "<tests>: value" { _type = "fileset"; _internalVersion = 4; }' '<tests>: value is a file set created from a future version of the file set library with a different internal representation:
\s*- Internal version of the file set: 4
\s*- Internal version of the library: 3
\s*Make sure to update your Nixpkgs to have a newer version of `lib.fileset`.'
# _create followed by _coerce should give the inputs back without any validation
expectEqual '{
inherit (_coerce "<test>" (_create ./. "directory"))
_internalVersion _internalBase _internalTree;
}' '{ _internalBase = ./.; _internalTree = "directory"; _internalVersion = 3; }'
#### Resulting store path ####
# The store path name should be "source"
expectEqual 'toSource { root = ./.; fileset = ./.; }' 'sources.cleanSourceWith { name = "source"; src = ./.; }'
# We should be able to import an empty directory and end up with an empty result
tree=(
)
checkFileset './.'
# The empty value without a base should also result in an empty result
tree=(
[a]=0
)
checkFileset '_emptyWithoutBase'
# Directories recursively containing no files are not included
tree=(
[e/]=0
[d/e/]=0
[d/d/e/]=0
[d/d/f]=1
[d/f]=1
[f]=1
)
checkFileset './.'
# Check trees that could cause a naïve string prefix checking implementation to fail
tree=(
[a]=0
[ab/x]=0
[ab/xy]=1
[ab/xyz]=0
[abc]=0
)
checkFileset './ab/xy'
# Check path coercion examples in ../../doc/functions/fileset.section.md
tree=(
[a/x]=1
[a/b/y]=1
[c/]=0
[c/d/]=0
)
checkFileset './.'
tree=(
[a/x]=1
[a/b/y]=1
[c/]=0
[c/d/]=0
)
checkFileset './a'
tree=(
[a/x]=1
[a/b/y]=0
[c/]=0
[c/d/]=0
)
checkFileset './a/x'
tree=(
[a/x]=0
[a/b/y]=1
[c/]=0
[c/d/]=0
)
checkFileset './a/b'
tree=(
[a/x]=0
[a/b/y]=0
[c/]=0
[c/d/]=0
)
checkFileset './c'
# Test the source filter for the somewhat special case of files in the filesystem root
# We can't easily test this with the above functions because we can't write to the filesystem root and we don't want to make any assumptions which files are there in the sandbox
expectEqual '_toSourceFilter (_create /. null) "/foo" ""' 'false'
expectEqual '_toSourceFilter (_create /. { foo = "regular"; }) "/foo" ""' 'true'
expectEqual '_toSourceFilter (_create /. { foo = null; }) "/foo" ""' 'false'
## lib.fileset.union, lib.fileset.unions
# Different filesystem roots in root and fileset are not supported
mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = union ./foo/mock-root ./bar/mock-root; }
' 'lib.fileset.union: Filesystem roots are not the same:
\s*first argument: root "'"$work"'/foo/mock-root"
\s*second argument: root "'"$work"'/bar/mock-root"
\s*Different roots are not supported.'
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = unions [ ./foo/mock-root ./bar/mock-root ]; }
' 'lib.fileset.unions: Filesystem roots are not the same:
\s*element 0: root "'"$work"'/foo/mock-root"
\s*element 1: root "'"$work"'/bar/mock-root"
\s*Different roots are not supported.'
rm -rf *
# Coercion errors show the correct context
expectFailure 'toSource { root = ./.; fileset = union ./a ./.; }' 'lib.fileset.union: first argument \('"$work"'/a\) does not exist.'
expectFailure 'toSource { root = ./.; fileset = union ./. ./b; }' 'lib.fileset.union: second argument \('"$work"'/b\) does not exist.'
expectFailure 'toSource { root = ./.; fileset = unions [ ./a ./. ]; }' 'lib.fileset.unions: element 0 \('"$work"'/a\) does not exist.'
expectFailure 'toSource { root = ./.; fileset = unions [ ./. ./b ]; }' 'lib.fileset.unions: element 1 \('"$work"'/b\) does not exist.'
# unions needs a list
expectFailure 'toSource { root = ./.; fileset = unions null; }' 'lib.fileset.unions: Expected argument to be a list, but got a null.'
# The tree of later arguments should not be evaluated if a former argument already includes all files
tree=()
checkFileset 'union ./. (_create ./. (abort "This should not be used!"))'
checkFileset 'unions [ ./. (_create ./. (abort "This should not be used!")) ]'
# unions doesn't include any files for an empty list or only empty values without a base
tree=(
[x]=0
[y/z]=0
)
checkFileset 'unions [ ]'
checkFileset 'unions [ _emptyWithoutBase ]'
checkFileset 'unions [ _emptyWithoutBase _emptyWithoutBase ]'
checkFileset 'union _emptyWithoutBase _emptyWithoutBase'
# The empty value without a base is the left and right identity of union
tree=(
[x]=1
[y/z]=0
)
checkFileset 'union ./x _emptyWithoutBase'
checkFileset 'union _emptyWithoutBase ./x'
# union doesn't include files that weren't specified
tree=(
[x]=1
[y]=1
[z]=0
)
checkFileset 'union ./x ./y'
checkFileset 'unions [ ./x ./y ]'
# Also for directories
tree=(
[x/a]=1
[x/b]=1
[y/a]=1
[y/b]=1
[z/a]=0
[z/b]=0
)
checkFileset 'union ./x ./y'
checkFileset 'unions [ ./x ./y ]'
# And for very specific paths
tree=(
[x/a]=1
[x/b]=0
[y/a]=0
[y/b]=1
[z/a]=0
[z/b]=0
)
checkFileset 'union ./x/a ./y/b'
checkFileset 'unions [ ./x/a ./y/b ]'
# unions or chained union's can include more paths
tree=(
[x/a]=1
[x/b]=1
[y/a]=1
[y/b]=0
[z/a]=0
[z/b]=1
)
checkFileset 'unions [ ./x/a ./x/b ./y/a ./z/b ]'
checkFileset 'union (union ./x/a ./x/b) (union ./y/a ./z/b)'
checkFileset 'union (union (union ./x/a ./x/b) ./y/a) ./z/b'
# unions should not stack overflow, even if many elements are passed
tree=()
for i in $(seq 1000); do
tree[$i/a]=1
tree[$i/b]=0
done
# This is actually really hard to test:
# A lot of files would be needed to cause a stack overflow.
# And while we could limit the maximum stack size using `ulimit -s`,
# that turns out to not be very deterministic: https://github.com/NixOS/nixpkgs/pull/256417#discussion_r1339396686.
# Meanwhile, the test infra here is not the fastest, creating 10000 would be too slow.
# So, just using 1000 files for now.
checkFileset 'unions (mapAttrsToList (name: _: ./. + "/${name}/a") (builtins.readDir ./.))'
## Tracing
# The second trace argument is returned
expectEqual 'trace ./. "some value"' 'builtins.trace "(empty)" "some value"'
# The fileset traceVal argument is returned
expectEqual 'traceVal ./.' 'builtins.trace "(empty)" (_create ./. "directory")'
# The tracing happens before the final argument is needed
expectEqual 'trace ./.' 'builtins.trace "(empty)" (x: x)'
# Tracing an empty directory shows it as such
expectTrace './.' '(empty)'
# This also works if there are directories, but all recursively without files
mkdir -p a/b/c
expectTrace './.' '(empty)'
rm -rf -- *
# The empty file set without a base also prints as empty
expectTrace '_emptyWithoutBase' '(empty)'
expectTrace 'unions [ ]' '(empty)'
# If a directory is fully included, print it as such
touch a
expectTrace './.' "$work"' (all files in directory)'
rm -rf -- *
# If a directory is not fully included, recurse
mkdir a b
touch a/{x,y} b/{x,y}
expectTrace 'union ./a/x ./b' "$work"'
- a
- x (regular)
- b (all files in directory)'
rm -rf -- *
# If an included path is a file, print its type
touch a x
ln -s a b
mkfifo c
expectTrace 'unions [ ./a ./b ./c ]' "$work"'
- a (regular)
- b (symlink)
- c (unknown)'
rm -rf -- *
# Do not print directories without any files recursively
mkdir -p a/b/c
touch b x
expectTrace 'unions [ ./a ./b ]' "$work"'
- b (regular)'
rm -rf -- *
# If all children are either fully included or empty directories,
# the parent should be printed as fully included
touch a
mkdir b
expectTrace 'union ./a ./b' "$work"' (all files in directory)'
rm -rf -- *
mkdir -p x/b x/c
touch x/a
touch a
# If all children are either fully excluded or empty directories,
# the parent should be shown (or rather not shown) as fully excluded
expectTrace 'unions [ ./a ./x/b ./x/c ]' "$work"'
- a (regular)'
rm -rf -- *
# Completely filtered out directories also print as empty
touch a
expectTrace '_create ./. {}' '(empty)'
rm -rf -- *
# A general test to make sure the resulting format makes sense
# Such as indentation and ordering
mkdir -p bar/{qux,someDir}
touch bar/{baz,qux,someDir/a} foo
touch bar/qux/x
ln -s x bar/qux/a
mkfifo bar/qux/b
expectTrace 'unions [
./bar/baz
./bar/qux/a
./bar/qux/b
./bar/someDir/a
./foo
]' "$work"'
- bar
- baz (regular)
- qux
- a (symlink)
- b (unknown)
- someDir (all files in directory)
- foo (regular)'
rm -rf -- *
# For recursively included directories,
# `(all files in directory)` should only be used if there's at least one file (otherwise it would be `(empty)`)
# and this should be determined without doing a full search
#
# a is intentionally ordered first here in order to allow triggering the short-circuit behavior
# We then check that b is not read
# In a more realistic scenario, some directories might need to be recursed into,
# but a file would be quickly found to trigger the short-circuit.
touch a
mkdir b
# We don't have lambda's in bash unfortunately,
# so we just define a function instead and then pass its name
# shellcheck disable=SC2317
run() {
# This shouldn't read b/
expectTrace './.' "$work"' (all files in directory)'
# Remove all files immediately after, triggering delete_self events for all of them
rmdir b
}
# Runs the function while checking that b isn't read
withFileMonitor run b
rm -rf -- *
# Partially included directories trace entries as they are evaluated
touch a b c
expectTrace '_create ./. { a = null; b = "regular"; c = throw "b"; }' "$work"'
- b (regular)'
# Except entries that need to be evaluated to even figure out if it's only partially included:
# Here the directory could be fully excluded or included just from seeing a and b,
# so c needs to be evaluated before anything can be traced
expectTrace '_create ./. { a = null; b = null; c = throw "c"; }' ''
expectTrace '_create ./. { a = "regular"; b = "regular"; c = throw "c"; }' ''
rm -rf -- *
# We can trace large directories (10000 here) without any problems
filesToCreate=({0..9}{0..9}{0..9}{0..9})
expectedTrace=$work$'\n'$(printf -- '- %s (regular)\n' "${filesToCreate[@]}")
# We need an excluded file so it doesn't print as `(all files in directory)`
touch 0 "${filesToCreate[@]}"
expectTrace 'unions (mapAttrsToList (n: _: ./. + "/${n}") (removeAttrs (builtins.readDir ./.) [ "0" ]))' "$expectedTrace"
rm -rf -- *
# TODO: Once we have combinators and a property testing library, derive property tests from https://en.wikipedia.org/wiki/Algebra_of_sets
echo >&2 tests ok

View file

@ -189,10 +189,10 @@ rec {
* }
*
*> [url "ssh://git@github.com/"]
*> insteadOf = https://github.com/
*> insteadOf = "https://github.com"
*>
*> [user]
*> name = edolstra
*> name = "edolstra"
*/
toGitINI = attrs:
with builtins;
@ -209,9 +209,17 @@ rec {
else
''${section} "${subsection}"'';
mkValueString = v:
let
escapedV = ''
"${
replaceStrings [ "\n" " " ''"'' "\\" ] [ "\\n" "\\t" ''\"'' "\\\\" ] v
}"'';
in mkValueStringDefault { } (if isString v then escapedV else v);
# generation for multiple ini values
mkKeyValue = k: v:
let mkKeyValue = mkKeyValueDefault { } " = " k;
let mkKeyValue = mkKeyValueDefault { inherit mkValueString; } " = " k;
in concatStringsSep "\n" (map (kv: "\t" + mkKeyValue kv) (lib.toList v));
# converts { a.b.c = 5; } to { "a.b".c = 5; } for toINI
@ -230,6 +238,14 @@ rec {
in
toINI_ (gitFlattenAttrs attrs);
# mkKeyValueDefault wrapper that handles dconf INI quirks.
# The main differences of the format is that it requires strings to be quoted.
mkDconfKeyValue = mkKeyValueDefault { mkValueString = v: toString (lib.gvariant.mkValue v); } "=";
# Generates INI in dconf keyfile style. See https://help.gnome.org/admin/system-admin-guide/stable/dconf-keyfiles.html.en
# for details.
toDconfINI = toINI { mkKeyValue = mkDconfKeyValue; };
/* Generates JSON from an arbitrary (non-function) value.
* For more information see the documentation of the builtin.
*/

290
third_party/nixpkgs/lib/gvariant.nix vendored Normal file
View file

@ -0,0 +1,290 @@
# This file is based on https://github.com/nix-community/home-manager
# Copyright (c) 2017-2022 Home Manager contributors
#
{ lib }:
/* A partial and basic implementation of GVariant formatted strings.
See https://docs.gtk.org/glib/gvariant-format-strings.html for detauls.
Note, this API is not considered fully stable and it might therefore
change in backwards incompatible ways without prior notice.
*/
let
inherit (lib)
concatMapStringsSep concatStrings escape head replaceStrings;
mkPrimitive = t: v: {
_type = "gvariant";
type = t;
value = v;
__toString = self: "@${self.type} ${toString self.value}"; # https://docs.gtk.org/glib/gvariant-text.html
};
type = {
arrayOf = t: "a${t}";
maybeOf = t: "m${t}";
tupleOf = ts: "(${concatStrings ts})";
dictionaryEntryOf = nameType: valueType: "{${nameType}${valueType}}";
string = "s";
boolean = "b";
uchar = "y";
int16 = "n";
uint16 = "q";
int32 = "i";
uint32 = "u";
int64 = "x";
uint64 = "t";
double = "d";
variant = "v";
};
/* Check if a value is a GVariant value
Type:
isGVariant :: Any -> Bool
*/
isGVariant = v: v._type or "" == "gvariant";
in
rec {
inherit type isGVariant;
/* Returns the GVariant value that most closely matches the given Nix value.
If no GVariant value can be found unambiguously then error is thrown.
Type:
mkValue :: Any -> gvariant
*/
mkValue = v:
if builtins.isBool v then
mkBoolean v
else if builtins.isFloat v then
mkDouble v
else if builtins.isString v then
mkString v
else if builtins.isList v then
mkArray v
else if isGVariant v then
v
else
throw "The GVariant type of ${v} can't be inferred.";
/* Returns the GVariant array from the given type of the elements and a Nix list.
Type:
mkArray :: [Any] -> gvariant
Example:
# Creating a string array
lib.gvariant.mkArray [ "a" "b" "c" ]
*/
mkArray = elems:
let
vs = map mkValue (lib.throwIf (elems == [ ]) "Please create empty array with mkEmptyArray." elems);
elemType = lib.throwIfNot (lib.all (t: (head vs).type == t) (map (v: v.type) vs))
"Elements in a list should have same type."
(head vs).type;
in
mkPrimitive (type.arrayOf elemType) vs // {
__toString = self:
"@${self.type} [${concatMapStringsSep "," toString self.value}]";
};
/* Returns the GVariant array from the given empty Nix list.
Type:
mkEmptyArray :: gvariant.type -> gvariant
Example:
# Creating an empty string array
lib.gvariant.mkEmptyArray (lib.gvariant.type.string)
*/
mkEmptyArray = elemType: mkPrimitive (type.arrayOf elemType) [ ] // {
__toString = self: "@${self.type} []";
};
/* Returns the GVariant variant from the given Nix value. Variants are containers
of different GVariant type.
Type:
mkVariant :: Any -> gvariant
Example:
lib.gvariant.mkArray [
(lib.gvariant.mkVariant "a string")
(lib.gvariant.mkVariant (lib.gvariant.mkInt32 1))
]
*/
mkVariant = elem:
let gvarElem = mkValue elem;
in mkPrimitive type.variant gvarElem // {
__toString = self: "<${toString self.value}>";
};
/* Returns the GVariant dictionary entry from the given key and value.
Type:
mkDictionaryEntry :: String -> Any -> gvariant
Example:
# A dictionary describing an Epiphanys search provider
[
(lib.gvariant.mkDictionaryEntry "url" (lib.gvariant.mkVariant "https://duckduckgo.com/?q=%s&t=epiphany"))
(lib.gvariant.mkDictionaryEntry "bang" (lib.gvariant.mkVariant "!d"))
(lib.gvariant.mkDictionaryEntry "name" (lib.gvariant.mkVariant "DuckDuckGo"))
]
*/
mkDictionaryEntry =
# The key of the entry
name:
# The value of the entry
value:
let
name' = mkValue name;
value' = mkValue value;
dictionaryType = type.dictionaryEntryOf name'.type value'.type;
in
mkPrimitive dictionaryType { inherit name value; } // {
__toString = self: "@${self.type} {${name'},${value'}}";
};
/* Returns the GVariant maybe from the given element type.
Type:
mkMaybe :: gvariant.type -> Any -> gvariant
*/
mkMaybe = elemType: elem:
mkPrimitive (type.maybeOf elemType) elem // {
__toString = self:
if self.value == null then
"@${self.type} nothing"
else
"just ${toString self.value}";
};
/* Returns the GVariant nothing from the given element type.
Type:
mkNothing :: gvariant.type -> gvariant
*/
mkNothing = elemType: mkMaybe elemType null;
/* Returns the GVariant just from the given Nix value.
Type:
mkJust :: Any -> gvariant
*/
mkJust = elem: let gvarElem = mkValue elem; in mkMaybe gvarElem.type gvarElem;
/* Returns the GVariant tuple from the given Nix list.
Type:
mkTuple :: [Any] -> gvariant
*/
mkTuple = elems:
let
gvarElems = map mkValue elems;
tupleType = type.tupleOf (map (e: e.type) gvarElems);
in
mkPrimitive tupleType gvarElems // {
__toString = self:
"@${self.type} (${concatMapStringsSep "," toString self.value})";
};
/* Returns the GVariant boolean from the given Nix bool value.
Type:
mkBoolean :: Bool -> gvariant
*/
mkBoolean = v:
mkPrimitive type.boolean v // {
__toString = self: if self.value then "true" else "false";
};
/* Returns the GVariant string from the given Nix string value.
Type:
mkString :: String -> gvariant
*/
mkString = v:
let sanitize = s: replaceStrings [ "\n" ] [ "\\n" ] (escape [ "'" "\\" ] s);
in mkPrimitive type.string v // {
__toString = self: "'${sanitize self.value}'";
};
/* Returns the GVariant object path from the given Nix string value.
Type:
mkObjectpath :: String -> gvariant
*/
mkObjectpath = v:
mkPrimitive type.string v // {
__toString = self: "objectpath '${escape [ "'" ] self.value}'";
};
/* Returns the GVariant uchar from the given Nix int value.
Type:
mkUchar :: Int -> gvariant
*/
mkUchar = mkPrimitive type.uchar;
/* Returns the GVariant int16 from the given Nix int value.
Type:
mkInt16 :: Int -> gvariant
*/
mkInt16 = mkPrimitive type.int16;
/* Returns the GVariant uint16 from the given Nix int value.
Type:
mkUint16 :: Int -> gvariant
*/
mkUint16 = mkPrimitive type.uint16;
/* Returns the GVariant int32 from the given Nix int value.
Type:
mkInt32 :: Int -> gvariant
*/
mkInt32 = v:
mkPrimitive type.int32 v // {
__toString = self: toString self.value;
};
/* Returns the GVariant uint32 from the given Nix int value.
Type:
mkUint32 :: Int -> gvariant
*/
mkUint32 = mkPrimitive type.uint32;
/* Returns the GVariant int64 from the given Nix int value.
Type:
mkInt64 :: Int -> gvariant
*/
mkInt64 = mkPrimitive type.int64;
/* Returns the GVariant uint64 from the given Nix int value.
Type:
mkUint64 :: Int -> gvariant
*/
mkUint64 = mkPrimitive type.uint64;
/* Returns the GVariant double from the given Nix float value.
Type:
mkDouble :: Float -> gvariant
*/
mkDouble = v:
mkPrimitive type.double v // {
__toString = self: toString self.value;
};
}

View file

@ -30,6 +30,14 @@ in mkLicense lset) ({
fullName = "Abstyles License";
};
acsl14 = {
fullName = "Anti-Capitalist Software License v1.4";
url = "https://anticapitalist.software/";
/* restrictions on corporations apply for both use and redistribution */
free = false;
redistributable = false;
};
afl20 = {
spdxId = "AFL-2.0";
fullName = "Academic Free License v2.0";
@ -413,9 +421,9 @@ in mkLicense lset) ({
fullName = "Eiffel Forum License v2.0";
};
elastic = {
fullName = "ELASTIC LICENSE";
url = "https://github.com/elastic/elasticsearch/blob/master/licenses/ELASTIC-LICENSE.txt";
elastic20 = {
fullName = "Elastic License 2.0";
url = "https://github.com/elastic/elasticsearch/blob/main/licenses/ELASTIC-LICENSE-2.0.txt";
free = false;
};
@ -481,6 +489,11 @@ in mkLicense lset) ({
free = false;
};
fraunhofer-fdk = {
fullName = "Fraunhofer FDK AAC Codec Library";
spdxId = "FDK-AAC";
};
free = {
fullName = "Unspecified free software license";
};
@ -610,11 +623,17 @@ in mkLicense lset) ({
};
inria-icesl = {
fullName = "INRIA Non-Commercial License Agreement for IceSL";
fullName = "End User License Agreement for IceSL Software";
url = "https://icesl.loria.fr/assets/pdf/EULA_IceSL_binary.pdf";
free = false;
};
inria-zelus = {
fullName = "INRIA Non-Commercial License Agreement for the Zélus compiler";
url = "https://github.com/INRIA/zelus/raw/829f2b97cba93b0543a9ca0272269e6b8fdad356/LICENSE";
free = false;
};
ipa = {
spdxId = "IPA";
fullName = "IPA Font License";
@ -840,6 +859,14 @@ in mkLicense lset) ({
fullName = "University of Illinois/NCSA Open Source License";
};
ncul1 = {
spdxId = "NCUL1";
fullName = "Netdata Cloud UI License v1.0";
free = false;
redistributable = true; # Only if used in Netdata products.
url = "https://raw.githubusercontent.com/netdata/netdata/master/web/gui/v2/LICENSE.md";
};
nlpl = {
spdxId = "NLPL";
fullName = "No Limit Public License";
@ -856,6 +883,11 @@ in mkLicense lset) ({
free = false;
};
ocamlLgplLinkingException = {
spdxId = "OCaml-LGPL-linking-exception";
fullName = "OCaml LGPL Linking Exception";
};
ocamlpro_nc = {
fullName = "OCamlPro Non Commercial license version 1";
url = "https://alt-ergo.ocamlpro.com/http/alt-ergo-2.2.0/OCamlPro-Non-Commercial-License.pdf";

View file

@ -86,15 +86,63 @@ rec {
else op (foldl' (n - 1)) (elemAt list n);
in foldl' (length list - 1);
/* Strict version of `foldl`.
/*
Reduce a list by applying a binary operator from left to right,
starting with an initial accumulator.
The difference is that evaluation is forced upon access. Usually used
with small whole results (in contrast with lazily-generated list or large
lists where only a part is consumed.)
Before each application of the operator, the accumulator value is evaluated.
This behavior makes this function stricter than [`foldl`](#function-library-lib.lists.foldl).
Type: foldl' :: (b -> a -> b) -> b -> [a] -> b
Unlike [`builtins.foldl'`](https://nixos.org/manual/nix/unstable/language/builtins.html#builtins-foldl'),
the initial accumulator argument is evaluated before the first iteration.
A call like
```nix
foldl' op acc [ x x x ... x x ]
```
is (denotationally) equivalent to the following,
but with the added benefit that `foldl'` itself will never overflow the stack.
```nix
let
acc = builtins.seq acc (op acc x );
acc = builtins.seq acc (op acc x );
acc = builtins.seq acc (op acc x );
...
acc = builtins.seq acc (op acc x);
acc = builtins.seq acc (op acc x );
in
acc
# Or ignoring builtins.seq
op (op (... (op (op (op acc x) x) x) ...) x) x
```
Type: foldl' :: (acc -> x -> acc) -> acc -> [x] -> acc
Example:
foldl' (acc: x: acc + x) 0 [1 2 3]
=> 6
*/
foldl' = builtins.foldl' or foldl;
foldl' =
/* The binary operation to run, where the two arguments are:
1. `acc`: The current accumulator value: Either the initial one for the first iteration, or the result of the previous iteration
2. `x`: The corresponding list element for this iteration
*/
op:
# The initial accumulator value
acc:
# The list to fold
list:
# The builtin `foldl'` is a bit lazier than one might expect.
# See https://github.com/NixOS/nix/pull/7158.
# In particular, the initial accumulator value is not forced before the first iteration starts.
builtins.seq acc
(builtins.foldl' op acc list);
/* Map with index starting from 0

View file

@ -537,7 +537,7 @@ let
mergeModules' prefix modules
(concatMap (m: map (config: { file = m._file; inherit config; }) (pushDownProperties m.config)) modules);
mergeModules' = prefix: options: configs:
mergeModules' = prefix: modules: configs:
let
# an attrset 'name' => list of submodules that declare name.
declsByName =
@ -554,11 +554,11 @@ let
else
mapAttrs
(n: option:
[{ inherit (module) _file; options = option; }]
[{ inherit (module) _file; pos = builtins.unsafeGetAttrPos n subtree; options = option; }]
)
subtree
)
options);
modules);
# The root of any module definition must be an attrset.
checkedConfigs =
@ -762,9 +762,16 @@ let
else res.options;
in opt.options // res //
{ declarations = res.declarations ++ [opt._file];
# In the case of modules that are generated dynamically, we won't
# have exact declaration lines; fall back to just the file being
# evaluated.
declarationPositions = res.declarationPositions
++ (if opt.pos != null
then [opt.pos]
else [{ file = opt._file; line = null; column = null; }]);
options = submodules;
} // typeSet
) { inherit loc; declarations = []; options = []; } opts;
) { inherit loc; declarations = []; declarationPositions = []; options = []; } opts;
/* Merge all the definitions of an option to produce the final
config value. */

View file

@ -741,6 +741,64 @@ rec {
name = head (splitString sep filename);
in assert name != filename; name;
/* Create a "-D<feature>:<type>=<value>" string that can be passed to typical
CMake invocations.
Type: cmakeOptionType :: string -> string -> string -> string
@param feature The feature to be set
@param type The type of the feature to be set, as described in
https://cmake.org/cmake/help/latest/command/set.html
the possible values (case insensitive) are:
BOOL FILEPATH PATH STRING INTERNAL
@param value The desired value
Example:
cmakeOptionType "string" "ENGINE" "sdl2"
=> "-DENGINE:STRING=sdl2"
*/
cmakeOptionType = type: feature: value:
assert (lib.elem (lib.toUpper type)
[ "BOOL" "FILEPATH" "PATH" "STRING" "INTERNAL" ]);
assert (lib.isString feature);
assert (lib.isString value);
"-D${feature}:${lib.toUpper type}=${value}";
/* Create a -D<condition>={TRUE,FALSE} string that can be passed to typical
CMake invocations.
Type: cmakeBool :: string -> bool -> string
@param condition The condition to be made true or false
@param flag The controlling flag of the condition
Example:
cmakeBool "ENABLE_STATIC_LIBS" false
=> "-DENABLESTATIC_LIBS:BOOL=FALSE"
*/
cmakeBool = condition: flag:
assert (lib.isString condition);
assert (lib.isBool flag);
cmakeOptionType "bool" condition (lib.toUpper (lib.boolToString flag));
/* Create a -D<feature>:STRING=<value> string that can be passed to typical
CMake invocations.
This is the most typical usage, so it deserves a special case.
Type: cmakeFeature :: string -> string -> string
@param condition The condition to be made true or false
@param flag The controlling flag of the condition
Example:
cmakeFeature "MODULES" "badblock"
=> "-DMODULES:STRING=badblock"
*/
cmakeFeature = feature: value:
assert (lib.isString feature);
assert (lib.isString value);
cmakeOptionType "string" feature value;
/* Create a -D<feature>=<value> string that can be passed to typical Meson
invocations.

View file

@ -178,11 +178,18 @@ rec {
else if final.isLoongArch64 then "loongarch"
else final.parsed.cpu.name;
# https://source.denx.de/u-boot/u-boot/-/blob/9bfb567e5f1bfe7de8eb41f8c6d00f49d2b9a426/common/image.c#L81-106
ubootArch =
if final.isx86_32 then "x86" # not i386
else if final.isMips64 then "mips64" # uboot *does* distinguish between mips32/mips64
else final.linuxArch; # other cases appear to agree with linuxArch
qemuArch =
if final.isAarch32 then "arm"
else if final.isS390 && !final.isS390x then null
else if final.isx86_64 then "x86_64"
else if final.isx86 then "i386"
else if final.isMips64n32 then "mipsn32${lib.optionalString final.isLittleEndian "el"}"
else if final.isMips64 then "mips64${lib.optionalString final.isLittleEndian "el"}"
else final.uname.processor;
@ -224,6 +231,7 @@ rec {
gtkSupport = false;
sdlSupport = false;
pulseSupport = false;
pipewireSupport = false;
smbdSupport = false;
seccompSupport = false;
enableDocs = false;

View file

@ -206,6 +206,7 @@ rec {
aarch64-embedded = {
config = "aarch64-none-elf";
libc = "newlib";
rustc.config = "aarch64-unknown-none";
};
aarch64be-embedded = {
@ -313,6 +314,11 @@ rec {
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
};
ucrt64 = {
config = "x86_64-w64-mingw32";
libc = "ucrt"; # This distinguishes the mingw (non posix) toolchain
};
# BSDs
x86_64-freebsd = {

View file

@ -505,6 +505,38 @@ runTests {
};
};
testFoldl'Empty = {
expr = foldl' (acc: el: abort "operation not called") 0 [ ];
expected = 0;
};
testFoldl'IntegerAdding = {
expr = foldl' (acc: el: acc + el) 0 [ 1 2 3 ];
expected = 6;
};
# The accumulator isn't forced deeply
testFoldl'NonDeep = {
expr = take 3 (foldl'
(acc: el: [ el ] ++ acc)
[ (abort "unevaluated list entry") ]
[ 1 2 3 ]);
expected = [ 3 2 1 ];
};
# Compared to builtins.foldl', lib.foldl' evaluates the first accumulator strictly too
testFoldl'StrictInitial = {
expr = (builtins.tryEval (foldl' (acc: el: el) (throw "hello") [])).success;
expected = false;
};
# Make sure we don't get a stack overflow for large lists
# This number of elements would notably cause a stack overflow if it was implemented without the `foldl'` builtin
testFoldl'Large = {
expr = foldl' (acc: el: acc + el) 0 (range 0 100000);
expected = 5000050000;
};
testTake = testAllTrue [
([] == (take 0 [ 1 2 3 ]))
([1] == (take 1 [ 1 2 3 ]))
@ -708,7 +740,7 @@ runTests {
# should just return the initial value
emptySet = foldlAttrs (throw "function not needed") 123 { };
# should just evaluate to the last value
accNotNeeded = foldlAttrs (_acc: _name: v: v) (throw "accumulator not needed") { z = 3; a = 2; };
valuesNotNeeded = foldlAttrs (acc: _name: _v: acc) 3 { z = throw "value z not needed"; a = throw "value a not needed"; };
# the accumulator doesnt have to be an attrset it can be as trivial as being just a number or string
trivialAcc = foldlAttrs (acc: _name: v: acc * 10 + v) 1 { z = 1; a = 2; };
};
@ -718,7 +750,7 @@ runTests {
names = [ "bar" "foo" ];
};
emptySet = 123;
accNotNeeded = 3;
valuesNotNeeded = 3;
trivialAcc = 121;
};
};
@ -948,6 +980,51 @@ runTests {
'';
};
testToGitINI = {
expr = generators.toGitINI {
user = {
email = "user@example.org";
name = "John Doe";
signingKey = "00112233445566778899AABBCCDDEEFF";
};
gpg.program = "path-to-gpg";
tag.gpgSign = true;
include.path = "~/path/to/config.inc";
includeIf."gitdif:~/src/dir".path = "~/path/to/conditional.inc";
extra = {
boolean = true;
integer = 38;
name = "value";
subsection.value = "test";
};};
expected = ''
[extra]
${"\t"}boolean = true
${"\t"}integer = 38
${"\t"}name = "value"
[extra "subsection"]
${"\t"}value = "test"
[gpg]
${"\t"}program = "path-to-gpg"
[include]
${"\t"}path = "~/path/to/config.inc"
[includeIf "gitdif:~/src/dir"]
${"\t"}path = "~/path/to/conditional.inc"
[tag]
${"\t"}gpgSign = true
[user]
${"\t"}email = "user@example.org"
${"\t"}name = "John Doe"
${"\t"}signingKey = "00112233445566778899AABBCCDDEEFF"
'';
};
/* right now only invocation check */
testToJSONSimple =
let val = {

View file

@ -39,7 +39,7 @@ reportFailure() {
checkConfigOutput() {
local outputContains=$1
shift
if evalConfig "$@" 2>/dev/null | grep --silent "$outputContains" ; then
if evalConfig "$@" 2>/dev/null | grep -E --silent "$outputContains" ; then
((++pass))
else
echo 2>&1 "error: Expected result matching '$outputContains', while evaluating"
@ -91,6 +91,9 @@ checkConfigOutput '^true$' config.result ./test-mergeAttrDefinitionsWithPrio.nix
# is the option.
checkConfigOutput '^true$' config.result ./module-argument-default.nix
# gvariant
checkConfigOutput '^true$' config.assertion ./gvariant.nix
# types.pathInStore
checkConfigOutput '".*/store/0lz9p8xhf89kb1c1kk6jxrzskaiygnlh-bash-5.2-p15.drv"' config.pathInStore.ok1 ./types.nix
checkConfigOutput '".*/store/0fb3ykw9r5hpayd05sr0cizwadzq1d8q-bash-5.2-p15"' config.pathInStore.ok2 ./types.nix
@ -444,6 +447,24 @@ checkConfigOutput '^"The option `a\.b. defined in `.*/doRename-warnings\.nix. ha
checkConfigOutput '^"pear"$' config.once.raw ./merge-module-with-key.nix
checkConfigOutput '^"pear\\npear"$' config.twice.raw ./merge-module-with-key.nix
# Declaration positions
# Line should be present for direct options
checkConfigOutput '^10$' options.imported.line10.declarationPositions.0.line ./declaration-positions.nix
checkConfigOutput '/declaration-positions.nix"$' options.imported.line10.declarationPositions.0.file ./declaration-positions.nix
# Generated options may not have line numbers but they will at least get the
# right file
checkConfigOutput '/declaration-positions.nix"$' options.generated.line18.declarationPositions.0.file ./declaration-positions.nix
checkConfigOutput '^null$' options.generated.line18.declarationPositions.0.line ./declaration-positions.nix
# Submodules don't break it
checkConfigOutput '^39$' config.submoduleLine34.submodDeclLine39.0.line ./declaration-positions.nix
checkConfigOutput '/declaration-positions.nix"$' config.submoduleLine34.submodDeclLine39.0.file ./declaration-positions.nix
# New options under freeform submodules get collected into the parent submodule
# (consistent with .declarations behaviour, but weird; notably appears in system.build)
checkConfigOutput '^34|23$' options.submoduleLine34.declarationPositions.0.line ./declaration-positions.nix
checkConfigOutput '^34|23$' options.submoduleLine34.declarationPositions.1.line ./declaration-positions.nix
# nested options work
checkConfigOutput '^30$' options.nested.nestedLine30.declarationPositions.0.line ./declaration-positions.nix
cat <<EOF
====== module tests ======
$pass Pass

View file

@ -0,0 +1,49 @@
{ lib, options, ... }:
let discardPositions = lib.mapAttrs (k: v: v);
in
# unsafeGetAttrPos is unspecified best-effort behavior, so we only want to consider this test on an evaluator that satisfies some basic assumptions about this function.
assert builtins.unsafeGetAttrPos "a" { a = true; } != null;
assert builtins.unsafeGetAttrPos "a" (discardPositions { a = true; }) == null;
{
imports = [
{
options.imported.line10 = lib.mkOption {
type = lib.types.int;
};
# Simulates various patterns of generating modules such as
# programs.firefox.nativeMessagingHosts.ff2mpv. We don't expect to get
# line numbers for these, but we can fall back on knowing the file.
options.generated = discardPositions {
line18 = lib.mkOption {
type = lib.types.int;
};
};
options.submoduleLine34.extraOptLine23 = lib.mkOption {
default = 1;
type = lib.types.int;
};
}
];
options.nested.nestedLine30 = lib.mkOption {
type = lib.types.int;
};
options.submoduleLine34 = lib.mkOption {
default = { };
type = lib.types.submoduleWith {
modules = [
({ options, ... }: {
options.submodDeclLine39 = lib.mkOption { };
})
{ freeformType = with lib.types; lazyAttrsOf (uniq unspecified); }
];
};
};
config = {
submoduleLine34.submodDeclLine39 = (options.submoduleLine34.type.getSubOptions [ ]).submodDeclLine39.declarationPositions;
};
}

View file

@ -0,0 +1,61 @@
{ config, lib, ... }:
{
options = {
examples = lib.mkOption { type = lib.types.attrs; };
assertion = lib.mkOption { type = lib.types.bool; };
};
config = {
examples = with lib.gvariant; {
bool = true;
float = 3.14;
int32 = mkInt32 (- 42);
uint32 = mkUint32 42;
int16 = mkInt16 (-42);
uint16 = mkUint16 42;
int64 = mkInt64 (-42);
uint64 = mkUint64 42;
array1 = [ "one" ];
array2 = mkArray [ (mkInt32 1) ];
array3 = mkArray [ (mkUint32 2) ];
emptyArray = mkEmptyArray type.uint32;
string = "foo";
escapedString = ''
'\
'';
tuple = mkTuple [ (mkInt32 1) [ "foo" ] ];
maybe1 = mkNothing type.string;
maybe2 = mkJust (mkUint32 4);
variant = mkVariant "foo";
dictionaryEntry = mkDictionaryEntry (mkInt32 1) [ "foo" ];
};
assertion =
let
mkLine = n: v: "${n} = ${toString (lib.gvariant.mkValue v)}";
result = lib.concatStringsSep "\n" (lib.mapAttrsToList mkLine config.examples);
in
(result + "\n") == ''
array1 = @as ['one']
array2 = @ai [1]
array3 = @au [@u 2]
bool = true
dictionaryEntry = @{ias} {1,@as ['foo']}
emptyArray = @au []
escapedString = '\'\\\n'
float = 3.140000
int16 = @n -42
int32 = -42
int64 = @x -42
maybe1 = @ms nothing
maybe2 = just @u 4
string = 'foo'
tuple = @(ias) (1,@as ['foo'])
uint16 = @q 42
uint32 = @u 42
uint64 = @t 42
variant = <'foo'>
'';
};
}

View file

@ -6,6 +6,7 @@
}:
let
lib = import ../.;
testWithNix = nix:
pkgs.runCommand "nixpkgs-lib-tests-nix-${nix.version}" {
buildInputs = [
@ -24,7 +25,7 @@ let
];
nativeBuildInputs = [
nix
];
] ++ lib.optional pkgs.stdenv.isLinux pkgs.inotify-tools;
strictDeps = true;
} ''
datadir="${nix}/share"
@ -50,6 +51,9 @@ let
echo "Running lib/tests/sources.sh"
TEST_LIB=$PWD/lib bash lib/tests/sources.sh
echo "Running lib/fileset/tests.sh"
TEST_LIB=$PWD/lib bash lib/fileset/tests.sh
echo "Running lib/tests/systems.nix"
[[ $(nix-instantiate --eval --strict lib/tests/systems.nix | tee /dev/stderr) == '[ ]' ]];

View file

@ -1,10 +1,63 @@
# Nixpkgs Maintainers
The *Nixpkgs maintainers* are people who have assigned themselves to
maintain specific individual packages. We encourage people who care
about a package to assign themselves as a maintainer. When a pull
request is made against a package, OfBorg will notify the appropriate
maintainer(s).
Unlike other packaging ecosystems, the maintainer doesn't have exclusive
control over the packages and modules they maintain. This more fluid approach
is one reason why we scale to so many packages.
## Definition and role of the maintainer
The main responsibility of a maintainer is to keep the packages they maintain
in a functioning state, and keep up with updates. In order to do that, they
are empowered to make decisions over the packages they maintain.
That being said, the maintainer is not alone proposing changes to the
packages. Anybody (both bots and humans) can send PRs to bump or tweak the
package.
We also allow other non-maintainer committers to merge changes to the package,
provided enough time and priority has been given to the maintainer.
For most packages, we expect committers to wait at least a week before merging
changes not endorsed by a package maintainer (which may be themselves). This should leave enough time
for the maintainers to provide feedback.
For critical packages, this convention needs to be negotiated with the
maintainer. A critical package is one that causes mass-rebuild, or where an
author is listed in the [`CODEOWNERS`](../.github/CODEOWNERS) file.
In case of critical security updates, the [security team](https://nixos.org/community/teams/security) might override these
heuristics in order to get the fixes in as fast as possible.
In case of conflict, the maintainer takes priority and is allowed to revert
the changes. This can happen for example if the maintainer was on holiday.
### How to become a maintainer
We encourage people who care about a package to assign themselves as a
maintainer. Commit access to the Nixpkgs repository is not required for that.
In order to do so, add yourself to the
[`maintainer-list.nix`](./maintainer-list.nix), and then to the desired
package's `meta.maintainers` list, and send a PR with the changes.
### How to lose maintainer status
Maintainers who have become inactive on a given package can be removed. This
helps us keep an accurate view of the state of maintenance in Nixpkgs.
The inactivity measure is currently not strictly enforced. We would typically
look at it if we notice that the author hasn't reacted to package-related
notifications for more than 3 months.
Removing the maintainer happens by making a PR on the package, adding that
person as a reviewer, and then waiting a week for a reaction.
The maintainer is welcome to come back at any time.
### Tools for maintainers
When a pull request is made against a package, OfBorg will notify the
appropriate maintainer(s).
## Reviewing contributions

File diff suppressed because it is too large Load diff

View file

@ -12,5 +12,5 @@ import ../../pkgs/top-level/release.nix
scrubJobs = false;
# No need to evaluate on i686.
supportedSystems = [ "x86_64-linux" ];
limitedSupportedSystems = [];
bootstrapConfigs = [];
}

View file

@ -30,7 +30,7 @@ Because step 1) is quite expensive and takes roughly ~5 minutes the result is ca
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE DataKinds #-}
import Control.Monad (forM_, (<=<))
import Control.Monad (forM_, forM, (<=<))
import Control.Monad.Trans (MonadIO (liftIO))
import Data.Aeson (
FromJSON,
@ -108,6 +108,7 @@ newtype JobsetEvalInputs = JobsetEvalInputs {nixpkgs :: Nixpkgs}
data Eval = Eval
{ id :: Int
, jobsetevalinputs :: JobsetEvalInputs
, builds :: Seq Int
}
deriving (Generic, ToJSON, FromJSON, Show)
@ -151,15 +152,20 @@ data Build = Build
}
deriving (Generic, ToJSON, FromJSON, Show)
data HydraSlownessWorkaroundFlag = HydraSlownessWorkaround | NoHydraSlownessWorkaround
data RequestLogsFlag = RequestLogs | NoRequestLogs
main :: IO ()
main = do
args <- getArgs
case args of
["get-report"] -> getBuildReports
["get-report", "--slow"] -> getBuildReports HydraSlownessWorkaround
["get-report"] -> getBuildReports NoHydraSlownessWorkaround
["ping-maintainers"] -> printMaintainerPing
["mark-broken-list"] -> printMarkBrokenList
["mark-broken-list", "--no-request-logs"] -> printMarkBrokenList NoRequestLogs
["mark-broken-list"] -> printMarkBrokenList RequestLogs
["eval-info"] -> printEvalInfo
_ -> putStrLn "Usage: get-report | ping-maintainers | mark-broken-list | eval-info"
_ -> putStrLn "Usage: get-report [--slow] | ping-maintainers | mark-broken-list [--no-request-logs] | eval-info"
reportFileName :: IO FilePath
reportFileName = getXdgDirectory XdgCache "haskell-updates-build-report.json"
@ -167,18 +173,27 @@ reportFileName = getXdgDirectory XdgCache "haskell-updates-build-report.json"
showT :: Show a => a -> Text
showT = Text.pack . show
getBuildReports :: IO ()
getBuildReports = runReq defaultHttpConfig do
getBuildReports :: HydraSlownessWorkaroundFlag -> IO ()
getBuildReports opt = runReq defaultHttpConfig do
evalMay <- Seq.lookup 0 . evals <$> hydraJSONQuery mempty ["jobset", "nixpkgs", "haskell-updates", "evals"]
eval@Eval{id} <- maybe (liftIO $ fail "No Evalution found") pure evalMay
eval@Eval{id} <- maybe (liftIO $ fail "No Evaluation found") pure evalMay
liftIO . putStrLn $ "Fetching evaluation " <> show id <> " from Hydra. This might take a few minutes..."
buildReports :: Seq Build <- hydraJSONQuery (responseTimeout 600000000) ["eval", showT id, "builds"]
buildReports <- getEvalBuilds opt id
liftIO do
fileName <- reportFileName
putStrLn $ "Finished fetching all builds from Hydra, saving report as " <> fileName
now <- getCurrentTime
encodeFile fileName (eval, now, buildReports)
getEvalBuilds :: HydraSlownessWorkaroundFlag -> Int -> Req (Seq Build)
getEvalBuilds NoHydraSlownessWorkaround id =
hydraJSONQuery (responseTimeout 900000000) ["eval", showT id, "builds"]
getEvalBuilds HydraSlownessWorkaround id = do
Eval{builds} <- hydraJSONQuery mempty [ "eval", showT id ]
forM builds $ \buildId -> do
liftIO $ putStrLn $ "Querying build " <> show buildId
hydraJSONQuery mempty [ "build", showT buildId ]
hydraQuery :: HttpResponse a => Proxy a -> Option 'Https -> [Text] -> Req (HttpResponseBody a)
hydraQuery responseType option query =
responseBody
@ -187,7 +202,7 @@ hydraQuery responseType option query =
(foldl' (/:) (https "hydra.nixos.org") query)
NoReqBody
responseType
(header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
(header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell) pls fix https://github.com/NixOS/nixos-org-configurations/issues/270" <> option)
hydraJSONQuery :: FromJSON a => Option 'Https -> [Text] -> Req a
hydraJSONQuery = hydraQuery jsonResponse
@ -775,16 +790,20 @@ printMaintainerPing = do
textBuildSummary = printBuildSummary eval fetchTime buildSum topBrokenRdeps
Text.putStrLn textBuildSummary
printMarkBrokenList :: IO ()
printMarkBrokenList = do
printMarkBrokenList :: RequestLogsFlag -> IO ()
printMarkBrokenList reqLogs = do
(_, fetchTime, buildReport) <- readBuildReports
runReq defaultHttpConfig $ forM_ buildReport \build@Build{job, id} ->
case (getBuildState build, Text.splitOn "." $ unJobName job) of
(Failed, ["haskellPackages", name, "x86_64-linux"]) -> do
-- Fetch build log from hydra to figure out the cause of the error.
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]
-- We use the last probable error cause found in the build log file.
let error_message = fromMaybe " failure " $ safeLast $ mapMaybe probableErrorCause build_log
error_message <- fromMaybe "failure" <$>
case reqLogs of
NoRequestLogs -> pure Nothing
RequestLogs -> do
-- Fetch build log from hydra to figure out the cause of the error.
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]
pure $ safeLast $ mapMaybe probableErrorCause build_log
liftIO $ putStrLn $ " - " <> Text.unpack name <> " # " <> error_message <> " in job https://hydra.nixos.org/build/" <> show id <> " at " <> formatTime defaultTimeLocale "%Y-%m-%d" fetchTime
_ -> pure ()

View file

@ -10,6 +10,24 @@
set -euo pipefail
do_commit=false
mark_broken_list_flags=""
for arg in "$@"; do
case "$arg" in
--do-commit)
do_commit=true
;;
--no-request-logs)
mark_broken_list_flags="$mark_broken_list_flags $arg"
;;
*)
echo "$0: unknown flag: $arg"
exit 100
;;
esac
done
broken_config="pkgs/development/haskell-modules/configuration-hackage2nix/broken.yaml"
tmpfile=$(mktemp)
@ -17,7 +35,7 @@ trap "rm ${tmpfile}" 0
echo "Remember that you need to manually run 'maintainers/scripts/haskell/hydra-report.hs get-report' sometime before running this script."
echo "Generating a list of broken builds and displaying for manual confirmation ..."
maintainers/scripts/haskell/hydra-report.hs mark-broken-list | sort -i > "$tmpfile"
maintainers/scripts/haskell/hydra-report.hs mark-broken-list $mark_broken_list_flags | sort -i > "$tmpfile"
$EDITOR "$tmpfile"
@ -34,7 +52,7 @@ clear="env -u HOME -u NIXPKGS_CONFIG"
$clear maintainers/scripts/haskell/regenerate-hackage-packages.sh
evalline=$(maintainers/scripts/haskell/hydra-report.hs eval-info)
if [[ "${1:-}" == "--do-commit" ]]; then
if $do_commit; then
git add $broken_config
git add pkgs/development/haskell-modules/configuration-hackage2nix/transitive-broken.yaml
git add pkgs/development/haskell-modules/hackage-packages.nix

View file

@ -89,6 +89,7 @@ lyaml,,,,,,lblasc
magick,,,,,,donovanglover
markdown,,,,,,
mediator_lua,,,,,,
middleclass,,,,,,
mpack,,,,,,
moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn
nvim-client,https://github.com/neovim/lua-client.git,,,,,

1 name src ref server version luaversion maintainers
89 magick donovanglover
90 markdown
91 mediator_lua
92 middleclass
93 mpack
94 moonscript https://github.com/leafo/moonscript.git dev-1 arobyn
95 nvim-client https://github.com/neovim/lua-client.git

View file

@ -321,8 +321,14 @@ def load_plugins_from_csv(
return plugins
def run_nix_expr(expr):
with CleanEnvironment() as nix_path:
def run_nix_expr(expr, nixpkgs: str):
'''
:param expr nix expression to fetch current plugins
:param nixpkgs Path towards a nixpkgs checkout
'''
# local_pkgs = str(Path(__file__).parent.parent.parent)
with CleanEnvironment(nixpkgs) as nix_path:
cmd = [
"nix",
"eval",
@ -396,9 +402,9 @@ class Editor:
"""CSV spec"""
print("the update member function should be overriden in subclasses")
def get_current_plugins(self) -> List[Plugin]:
def get_current_plugins(self, nixpkgs) -> List[Plugin]:
"""To fill the cache"""
data = run_nix_expr(self.get_plugins)
data = run_nix_expr(self.get_plugins, nixpkgs)
plugins = []
for name, attr in data.items():
p = Plugin(name, attr["rev"], attr["submodules"], attr["sha256"])
@ -414,7 +420,7 @@ class Editor:
raise NotImplementedError()
def get_update(self, input_file: str, outfile: str, config: FetchConfig):
cache: Cache = Cache(self.get_current_plugins(), self.cache_file)
cache: Cache = Cache(self.get_current_plugins(self.nixpkgs), self.cache_file)
_prefetch = functools.partial(prefetch, cache=cache)
def update() -> dict:
@ -453,6 +459,12 @@ class Editor:
By default from {self.default_in} to {self.default_out}"""
),
)
common.add_argument(
"--nixpkgs",
type=str,
default=os.getcwd(),
help="Adjust log level",
)
common.add_argument(
"--input-names",
"-i",
@ -541,22 +553,27 @@ class Editor:
command = args.command or "update"
log.setLevel(LOG_LEVELS[args.debug])
log.info("Chose to run command: %s", command)
self.nixpkgs = args.nixpkgs
if not args.no_commit:
self.nixpkgs_repo = git.Repo(self.root, search_parent_directories=True)
self.nixpkgs_repo = git.Repo(args.nixpkgs, search_parent_directories=True)
getattr(self, command)(args)
class CleanEnvironment(object):
def __init__(self, nixpkgs):
self.local_pkgs = nixpkgs
def __enter__(self) -> str:
self.old_environ = os.environ.copy()
"""
local_pkgs = str(Path(__file__).parent.parent.parent)
"""
self.old_environ = os.environ.copy()
self.empty_config = NamedTemporaryFile()
self.empty_config.write(b"{}")
self.empty_config.flush()
os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
return f"localpkgs={local_pkgs}"
# os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
return f"localpkgs={self.local_pkgs}"
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
os.environ.update(self.old_environ)
@ -758,7 +775,8 @@ def commit(repo: git.Repo, message: str, files: List[Path]) -> None:
def update_plugins(editor: Editor, args):
"""The main entry function of this module. All input arguments are grouped in the `Editor`."""
"""The main entry function of this module.
All input arguments are grouped in the `Editor`."""
log.info("Start updating plugins")
fetch_config = FetchConfig(args.proc, args.github_token)

View file

@ -102,12 +102,13 @@ def convert_to_throw(date_older_list: list[str]) -> list[tuple[str, str]]:
alias = before_equal
alias_unquoted = before_equal.strip('"')
after_equal_list = [x.strip(";:") for x in after_equal.split()]
replacement = next(x.strip(";:") for x in after_equal.split())
replacement = replacement.removeprefix("pkgs.")
converted = (
f"{indent}{alias} = throw \"'{alias_unquoted}' has been renamed to/replaced by"
f" '{after_equal_list.pop(0)}'\";"
f' # Converted to throw {datetime.today().strftime("%Y-%m-%d")}'
f"{indent}{alias} = throw \"'{alias_unquoted}' has been"
f" renamed to/replaced by '{replacement}'\";"
f" # Converted to throw {datetime.today().strftime('%Y-%m-%d')}"
)
converted_list.append((line, converted))

View file

@ -0,0 +1,228 @@
#!/usr/bin/env nix-shell
#! nix-shell -i "python3 -I" -p "python3.withPackages(p: with p; [ rich structlog ])"
from abc import ABC, abstractclassmethod, abstractmethod
from contextlib import contextmanager
from pathlib import Path
from structlog.contextvars import bound_contextvars as log_context
from typing import ClassVar, List, Tuple
import hashlib, re, structlog
logger = structlog.getLogger("sha-to-SRI")
class Encoding(ABC):
alphabet: ClassVar[str]
@classmethod
@property
def name(cls) -> str:
return cls.__name__.lower()
def toSRI(self, s: str) -> str:
digest = self.decode(s)
assert len(digest) == self.n
from base64 import b64encode
return f"{self.hashName}-{b64encode(digest).decode()}"
@classmethod
def all(cls, h) -> 'List[Encoding]':
return [ c(h) for c in cls.__subclasses__() ]
def __init__(self, h):
self.n = h.digest_size
self.hashName = h.name
@property
@abstractmethod
def length(self) -> int:
...
@property
def regex(self) -> str:
return f"[{self.alphabet}]{{{self.length}}}"
@abstractmethod
def decode(self, s: str) -> bytes:
...
class Nix32(Encoding):
alphabet = "0123456789abcdfghijklmnpqrsvwxyz"
inverted = { c: i for i, c in enumerate(alphabet) }
@property
def length(self):
return 1 + (8 * self.n) // 5
def decode(self, s: str):
assert len(s) == self.length
out = [ 0 for _ in range(self.n) ]
# TODO: Do better than a list of byte-sized ints
for n, c in enumerate(reversed(s)):
digit = self.inverted[c]
i, j = divmod(5 * n, 8)
out[i] = out[i] | (digit << j) & 0xff
rem = digit >> (8 - j)
if rem == 0:
continue
elif i < self.n:
out[i+1] = rem
else:
raise ValueError(f"Invalid nix32 hash: '{s}'")
return bytes(out)
class Hex(Encoding):
alphabet = "0-9A-Fa-f"
@property
def length(self):
return 2 * self.n
def decode(self, s: str):
from binascii import unhexlify
return unhexlify(s)
class Base64(Encoding):
alphabet = "A-Za-z0-9+/"
@property
def format(self) -> Tuple[int, int]:
"""Number of characters in data and padding."""
i, k = divmod(self.n, 3)
return 4 * i + (0 if k == 0 else k + 1), (3 - k) % 3
@property
def length(self):
return sum(self.format)
@property
def regex(self):
data, padding = self.format
return f"[{self.alphabet}]{{{data}}}={{{padding}}}"
def decode(self, s):
from base64 import b64decode
return b64decode(s, validate = True)
_HASHES = (hashlib.new(n) for n in ('SHA-256', 'SHA-512'))
ENCODINGS = {
h.name: Encoding.all(h)
for h in _HASHES
}
RE = {
h: "|".join(
(f"({h}-)?" if e.name == 'base64' else '') +
f"(?P<{h}_{e.name}>{e.regex})"
for e in encodings
) for h, encodings in ENCODINGS.items()
}
_DEF_RE = re.compile("|".join(
f"(?P<{h}>{h} = (?P<{h}_quote>['\"])({re})(?P={h}_quote);)"
for h, re in RE.items()
))
def defToSRI(s: str) -> str:
def f(m: re.Match[str]) -> str:
try:
for h, encodings in ENCODINGS.items():
if m.group(h) is None:
continue
for e in encodings:
s = m.group(f"{h}_{e.name}")
if s is not None:
return f'hash = "{e.toSRI(s)}";'
raise ValueError(f"Match with '{h}' but no subgroup")
raise ValueError("Match with no hash")
except ValueError as exn:
logger.error(
"Skipping",
exc_info = exn,
)
return m.group()
return _DEF_RE.sub(f, s)
@contextmanager
def atomicFileUpdate(target: Path):
'''Atomically replace the contents of a file.
Guarantees that no temporary files are left behind, and `target` is either
left untouched, or overwritten with new content if no exception was raised.
Yields a pair `(original, new)` of open files.
`original` is the pre-existing file at `target`, open for reading;
`new` is an empty, temporary file in the same filder, open for writing.
Upon exiting the context, the files are closed; if no exception was
raised, `new` (atomically) replaces the `target`, otherwise it is deleted.
'''
# That's mostly copied from noto-emoji.py, should DRY it out
from tempfile import mkstemp
fd, _p = mkstemp(
dir = target.parent,
prefix = target.name,
)
tmpPath = Path(_p)
try:
with target.open() as original:
with tmpPath.open('w') as new:
yield (original, new)
tmpPath.replace(target)
except Exception:
tmpPath.unlink(missing_ok = True)
raise
def fileToSRI(p: Path):
with atomicFileUpdate(p) as (og, new):
for i, line in enumerate(og):
with log_context(line=i):
new.write(defToSRI(line))
_SKIP_RE = re.compile(
"(generated by)|(do not edit)",
re.IGNORECASE
)
if __name__ == "__main__":
from sys import argv, stderr
logger.info("Starting!")
for arg in argv[1:]:
p = Path(arg)
with log_context(path=str(p)):
try:
if p.name == "yarn.nix" or p.name.find("generated") != -1:
logger.warning("File looks autogenerated, skipping!")
continue
with p.open() as f:
for line in f:
if line.strip():
break
if _SKIP_RE.search(line):
logger.warning("File looks autogenerated, skipping!")
continue
fileToSRI(p)
except Exception as exn:
logger.error(
"Unhandled exception, skipping file!",
exc_info = exn,
)
else:
logger.info("Finished processing file")

View file

@ -2,11 +2,11 @@
#!nix-shell update-luarocks-shell.nix -i python3
# format:
# $ nix run nixpkgs.python3Packages.black -c black update.py
# $ nix run nixpkgs#python3Packages.black -- update.py
# type-check:
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
# $ nix run nixpkgs#python3Packages.mypy -- update.py
# linted:
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265,E402 update.py
# $ nix run nixpkgs#python3Packages.flake8 -- --ignore E501,E265,E402 update.py
import inspect
import os
@ -25,14 +25,14 @@ from pathlib import Path
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
ROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))).parent.parent # type: ignore
ROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))).parent.parent # type: ignore
import pluginupdate
from pluginupdate import update_plugins, FetchConfig, CleanEnvironment
PKG_LIST="maintainers/scripts/luarocks-packages.csv"
TMP_FILE="$(mktemp)"
GENERATED_NIXFILE="pkgs/development/lua-modules/generated-packages.nix"
LUAROCKS_CONFIG="maintainers/scripts/luarocks-config.lua"
PKG_LIST = "maintainers/scripts/luarocks-packages.csv"
TMP_FILE = "$(mktemp)"
GENERATED_NIXFILE = "pkgs/development/lua-modules/generated-packages.nix"
LUAROCKS_CONFIG = "maintainers/scripts/luarocks-config.lua"
HEADER = """/* {GENERATED_NIXFILE} is an auto-generated file -- DO NOT EDIT!
Regenerate it with:
@ -40,36 +40,40 @@ nixpkgs$ ./maintainers/scripts/update-luarocks-packages
You can customize the generated packages in pkgs/development/lua-modules/overrides.nix
*/
""".format(GENERATED_NIXFILE=GENERATED_NIXFILE)
""".format(
GENERATED_NIXFILE=GENERATED_NIXFILE
)
FOOTER="""
FOOTER = """
}
/* GENERATED - do not edit this file */
"""
@dataclass
class LuaPlugin:
name: str
'''Name of the plugin, as seen on luarocks.org'''
"""Name of the plugin, as seen on luarocks.org"""
src: str
'''address to the git repository'''
"""address to the git repository"""
ref: Optional[str]
'''git reference (branch name/tag)'''
"""git reference (branch name/tag)"""
version: Optional[str]
'''Set it to pin a package '''
"""Set it to pin a package """
server: Optional[str]
'''luarocks.org registers packages under different manifests.
"""luarocks.org registers packages under different manifests.
Its value can be 'http://luarocks.org/dev'
'''
"""
luaversion: Optional[str]
'''Attribue of the lua interpreter if a package is available only for a specific lua version'''
"""Attribue of the lua interpreter if a package is available only for a specific lua version"""
maintainers: Optional[str]
''' Optional string listing maintainers separated by spaces'''
""" Optional string listing maintainers separated by spaces"""
@property
def normalized_name(self) -> str:
return self.name.replace(".", "-")
# rename Editor to LangUpdate/ EcosystemUpdater
class LuaEditor(pluginupdate.Editor):
def get_current_plugins(self):
@ -77,11 +81,13 @@ class LuaEditor(pluginupdate.Editor):
def load_plugin_spec(self, input_file) -> List[LuaPlugin]:
luaPackages = []
csvfilename=input_file
csvfilename = input_file
log.info("Loading package descriptions from %s", csvfilename)
with open(csvfilename, newline='') as csvfile:
reader = csv.DictReader(csvfile,)
with open(csvfilename, newline="") as csvfile:
reader = csv.DictReader(
csvfile,
)
for row in reader:
# name,server,version,luaversion,maintainers
plugin = LuaPlugin(**row)
@ -91,23 +97,19 @@ class LuaEditor(pluginupdate.Editor):
def update(self, args):
update_plugins(self, args)
def generate_nix(
self,
results: List[Tuple[LuaPlugin, str]],
outfilename: str
):
def generate_nix(self, results: List[Tuple[LuaPlugin, str]], outfilename: str):
with tempfile.NamedTemporaryFile("w+") as f:
f.write(HEADER)
header2 = textwrap.dedent(
# header2 = inspect.cleandoc(
"""
# header2 = inspect.cleandoc(
"""
{ self, stdenv, lib, fetchurl, fetchgit, callPackage, ... } @ args:
final: prev:
{
""")
"""
)
f.write(header2)
for (plugin, nix_expr) in results:
for plugin, nix_expr in results:
f.write(f"{plugin.normalized_name} = {nix_expr}")
f.write(FOOTER)
f.flush()
@ -156,19 +158,20 @@ class LuaEditor(pluginupdate.Editor):
# luaPackages.append(plugin)
pass
def generate_pkg_nix(plug: LuaPlugin):
'''
"""
Generate nix expression for a luarocks package
Our cache key associates "p.name-p.version" to its rockspec
'''
"""
log.debug("Generating nix expression for %s", plug.name)
custom_env = os.environ.copy()
custom_env['LUAROCKS_CONFIG'] = LUAROCKS_CONFIG
custom_env["LUAROCKS_CONFIG"] = LUAROCKS_CONFIG
# we add --dev else luarocks wont find all the "scm" (=dev) versions of the
# packages
# , "--dev"
cmd = [ "luarocks", "nix" ]
# , "--dev"
cmd = ["luarocks", "nix"]
if plug.maintainers:
cmd.append(f"--maintainers={plug.maintainers}")
@ -176,7 +179,10 @@ def generate_pkg_nix(plug: LuaPlugin):
# if plug.server == "src":
if plug.src != "":
if plug.src is None:
msg = "src must be set when 'version' is set to \"src\" for package %s" % plug.name
msg = (
"src must be set when 'version' is set to \"src\" for package %s"
% plug.name
)
log.error(msg)
raise RuntimeError(msg)
log.debug("Updating from source %s", plug.src)
@ -185,7 +191,6 @@ def generate_pkg_nix(plug: LuaPlugin):
else:
cmd.append(plug.name)
if plug.version and plug.version != "src":
cmd.append(plug.version)
if plug.server != "src" and plug.server:
@ -194,23 +199,26 @@ def generate_pkg_nix(plug: LuaPlugin):
if plug.luaversion:
cmd.append(f"--lua-version={plug.luaversion}")
log.debug("running %s", ' '.join(cmd))
log.debug("running %s", " ".join(cmd))
output = subprocess.check_output(cmd, env=custom_env, text=True)
output = "callPackage(" + output.strip() + ") {};\n\n"
return (plug, output)
def main():
editor = LuaEditor("lua", ROOT, '',
default_in = ROOT.joinpath(PKG_LIST),
default_out = ROOT.joinpath(GENERATED_NIXFILE)
)
def main():
editor = LuaEditor(
"lua",
ROOT,
"",
default_in=ROOT.joinpath(PKG_LIST),
default_out=ROOT.joinpath(GENERATED_NIXFILE),
)
editor.run()
if __name__ == "__main__":
if __name__ == "__main__":
main()
# vim: set ft=python noet fdm=manual fenc=utf-8 ff=unix sts=0 sw=4 ts=4 :

View file

@ -287,13 +287,24 @@ with lib.maintainers; {
};
flutter = {
members = [ gilice mkg20001 RossComputerGuy FlafyDev hacker1024 ];
members = [ mkg20001 RossComputerGuy FlafyDev hacker1024 ];
scope = "Maintain Flutter and Dart-related packages and build tools";
shortName = "flutter";
enableFeatureFreezePing = false;
githubTeams = [ "flutter" ];
};
flyingcircus = {
# Verify additions by approval of an already existing member of the team.
members = [
theuni
dpausp
leona
];
scope = "Team for Flying Circus employees who collectively maintain packages.";
shortName = "Flying Circus employees";
};
freedesktop = {
members = [ jtojnar ];
scope = "Maintain Freedesktop.org packages for graphical desktop.";
@ -354,7 +365,7 @@ with lib.maintainers; {
hedning
jtojnar
dasj19
maxeaubrey
amaxine
];
githubTeams = [
"gnome"
@ -638,15 +649,13 @@ with lib.maintainers; {
enableFeatureFreezePing = true;
};
nixos-modules = {
module-system = {
members = [
ericson2314
infinisil
qyliss
roberth
];
scope = "Maintain nixpkgs module system internals.";
shortName = "NixOS Modules / internals";
scope = "Maintain the Nixpkgs module system.";
shortName = "Module system";
enableFeatureFreezePing = true;
};
@ -681,6 +690,17 @@ with lib.maintainers; {
shortName = "OpenStack";
};
ororatech = {
# email: nixdevs@ororatech.com
shortName = "OroraTech GmbH. employees";
scope = "Team for packages maintained by employees of OroraTech GmbH.";
# Edits to this list should only be done by an already existing member.
members = [
kip93
victormeriqui
];
};
pantheon = {
members = [
davidak
@ -896,6 +916,18 @@ with lib.maintainers; {
shortName = "Vim/Neovim";
};
wdz = {
members = [
n0emis
netali
vidister
johannwagner
yuka
];
scope = "Group registration for WDZ GmbH team members who collectively maintain packages.";
shortName = "WDZ GmbH";
};
xfce = {
members = [
bobby285271

View file

@ -44,7 +44,7 @@ environment.systemPackages =
name = "hello-2.8";
src = fetchurl {
url = "mirror://gnu/hello/${name}.tar.gz";
sha256 = "0wqd8sjmxfskrflaxywc7gqw7sfawrfvdxd9skxawzfgyy0pzdz6";
hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
};
};
in
@ -67,7 +67,7 @@ stdenv.mkDerivation rec {
name = "hello-2.8";
src = fetchurl {
url = "mirror://gnu/hello/${name}.tar.gz";
sha256 = "0wqd8sjmxfskrflaxywc7gqw7sfawrfvdxd9skxawzfgyy0pzdz6";
hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
};
}
```

View file

@ -1,11 +1,7 @@
# Customising Packages {#sec-customising-packages}
Some packages in Nixpkgs have options to enable or disable optional
functionality or change other aspects of the package. For instance, the
Firefox wrapper package (which provides Firefox with a set of plugins
such as the Adobe Flash player) has an option to enable the Google Talk
plugin. It can be set in `configuration.nix` as follows:
`nixpkgs.config.firefox.enableGoogleTalkPlugin = true;`
functionality or change other aspects of the package.
::: {.warning}
Unfortunately, Nixpkgs currently lacks a way to query available
@ -13,7 +9,7 @@ configuration options.
:::
::: {.note}
Alternatively, many packages come with extensions one might add.
For example, many packages come with extensions one might add.
Examples include:
- [`passExtensions.pass-otp`](https://search.nixos.org/packages/query=passExtensions.pass-otp)
- [`python310Packages.requests`](https://search.nixos.org/packages/query=python310Packages.requests)

View file

@ -25,8 +25,11 @@ checks:
since changes in their values are applied by systemd when systemd is
reloaded.
- `.mount` units are **reload**ed. These mostly come from the `/etc/fstab`
parser.
- `.mount` units are **reload**ed if only their `Options` changed. If anything
else changed (like `What`), they are **restart**ed unless they are the mount
unit for `/` or `/nix` in which case they are reloaded to prevent the system
from crashing. Note that this is the case for `.mount` units and not for
mounts from `/etc/fstab`. These are explained in [](#sec-switching-systems).
- `.socket` units are currently ignored. This is to be fixed at a later
point.

View file

@ -21,8 +21,9 @@ If the action is `switch` or `test`, the currently running system is inspected
and the actions to switch to the new system are calculated. This process takes
two data sources into account: `/etc/fstab` and the current systemd status.
Mounts and swaps are read from `/etc/fstab` and the corresponding actions are
generated. If a new mount is added, for example, the proper `.mount` unit is
marked to be started. The current systemd state is inspected, the difference
generated. If the options of a mount are modified, for example, the proper `.mount`
unit is reloaded (or restarted if anything else changed and it's neither the root
mount or the nix store). The current systemd state is inspected, the difference
between the current system and the desired configuration is calculated and
actions are generated to get to this state. There are a lot of nuances that can
be controlled by the units which are explained here.

View file

@ -353,7 +353,7 @@ When upgrading from a previous release, please be aware of the following incompa
Another benefit of the refactoring is that we can now issue reloads via either `pkill -HUP unbound` and `systemctl reload unbound` to reload the running configuration without taking the daemon offline. A prerequisite of this was that unbound configuration is available on a well known path on the file system. We are using the path `/etc/unbound/unbound.conf` as that is the default in the CLI tooling which in turn enables us to use `unbound-control` without passing a custom configuration location.
The module has also been reworked to be [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant. As such, `sevices.unbound.extraConfig` has been removed and replaced by [services.unbound.settings](options.html#opt-services.unbound.settings). `services.unbound.interfaces` has been renamed to `services.unbound.settings.server.interface`.
The module has also been reworked to be [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant. As such, `services.unbound.extraConfig` has been removed and replaced by [services.unbound.settings](options.html#opt-services.unbound.settings). `services.unbound.interfaces` has been renamed to `services.unbound.settings.server.interface`.
`services.unbound.forwardAddresses` and `services.unbound.allowedAccess` have also been changed to use the new settings interface. You can follow the instructions when executing `nixos-rebuild` to upgrade your configuration to use the new interface.

View file

@ -935,8 +935,7 @@ In addition to numerous new and upgraded packages, this release has the followin
using the `pomerium-cli` command, you should now install the `pomerium-cli`
package.
- The option
[services.networking.networkmanager.enableFccUnlock](#opt-networking.networkmanager.enableFccUnlock)
- The option `services.networking.networkmanager.enableFccUnlock`
was added to support FCC unlock procedures. Since release 1.18.4, the ModemManager
daemon no longer automatically performs the FCC unlock procedure by default. See
[the docs](https://modemmanager.org/docs/modemmanager/fcc-unlock/) for more details.

View file

@ -87,7 +87,7 @@ In addition to numerous new and updated packages, this release has the following
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
- [go2rtc](https://github.com/AlexxIT/go2rtc), a camera streaming appliation with support for RTSP, WebRTC, HomeKit, FFMPEG, RTMP and other protocols. Available as [services.go2rtc](options.html#opt-services.go2rtc.enable).
- [go2rtc](https://github.com/AlexxIT/go2rtc), a camera streaming application with support for RTSP, WebRTC, HomeKit, FFMPEG, RTMP and other protocols. Available as [services.go2rtc](options.html#opt-services.go2rtc.enable).
- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in Golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
@ -203,7 +203,7 @@ In addition to numerous new and updated packages, this release has the following
- `graylog` has been updated to version 5, which can not be updated directly from the previously packaged version 3.3. If you had installed the previously packaged version 3.3, please follow the [upgrade path](https://go2docs.graylog.org/5-0/upgrading_graylog/upgrade_path.htm) from 3.3 to 4.0 to 4.3 to 5.0.
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implemenation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
- `nushell` has been updated to at least version 0.77.0, which includes potential breaking changes in aliases. The old aliases are now available as `old-alias` but it is recommended you migrate to the new format. See [Reworked aliases](https://www.nushell.sh/blog/2023-03-14-nushell_0_77.html#reworked-aliases-breaking-changes-kubouch).
@ -555,7 +555,7 @@ In addition to numerous new and updated packages, this release has the following
- `buildDunePackage` now defaults to `strictDeps = true` which means that any library should go into `buildInputs` or `checkInputs`. Any executable that is run on the building machine should go into `nativeBuildInputs` or `nativeCheckInputs` respectively. Example of executables are `ocaml`, `findlib` and `menhir`. PPXs are libraries which are built by dune and should therefore not go into `nativeBuildInputs`.
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implemenation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
- Top-level `buildPlatform`, `hostPlatform`, `targetPlatform` have been deprecated, use `stdenv.X` instead.

View file

@ -6,12 +6,34 @@
- Support for WiFi6 (IEEE 802.11ax) and WPA3-SAE-PK was enabled in the `hostapd` package, along with a significant rework of the hostapd module.
- LXD now supports virtual machine instances to complement the existing container support
- The `nixos-rebuild` command has been given a `list-generations` subcommand. See `man nixos-rebuild` for more details.
- [systemd](https://systemd.io) has been updated from v253 to v254, see [the release notes](https://github.com/systemd/systemd/blob/v254/NEWS#L3-L659) for more information on the changes.
- `boot.resumeDevice` **must be specified** when hibernating if not in EFI mode.
- systemd may warn your system about the permissions of your ESP partition (often `/boot`), this warning can be ignored for now, we are looking
into a satisfying solution regarding this problem.
- Updating with `nixos-rebuild boot` and rebooting is recommended, since in some rare cases the `nixos-rebuild switch` into the new generation on a live system might fail due to missing mount units.
- [`sudo-rs`], a reimplementation of `sudo` in Rust, is now supported.
An experimental new module `security.sudo-rs` was added.
Switching to it (via `security.sudo.enable = false; security.sudo-rs.enable = true;`) introduces
slight changes in sudo behaviour, due to `sudo-rs`' current limitations:
- terminfo-related environment variables aren't preserved for `root` and `wheel`;
- `root` and `wheel` are not given the ability to set (or preserve)
arbitrary environment variables.
[`sudo-rs`]: https://github.com/memorysafety/sudo-rs/
## New Services {#sec-release-23.11-new-services}
- [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable).
- [acme-dns](https://github.com/joohoi/acme-dns), a limited DNS server to handle ACME DNS challenges easily and securely. Available as [services.acme-dns](#opt-services.acme-dns.enable).
- [frp](https://github.com/fatedier/frp), a fast reverse proxy to help you expose a local server behind a NAT or firewall to the Internet. Available as [services.frp](#opt-services.frp.enable).
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- [river](https://github.com/riverwm/river), A dynamic tiling wayland compositor. Available as [programs.river](#opt-programs.river.enable).
@ -20,17 +42,27 @@
- [mautrix-whatsapp](https://docs.mau.fi/bridges/go/whatsapp/index.html) A Matrix-WhatsApp puppeting bridge
- [hddfancontrol](https://github.com/desbma/hddfancontrol), a service to regulate fan speeds based on hard drive temperature. Available as [services.hddfancontrol](#opt-services.hddfancontrol.enable).
- [GoToSocial](https://gotosocial.org/), an ActivityPub social network server, written in Golang. Available as [services.gotosocial](#opt-services.gotosocial.enable).
- [Castopod](https://castopod.org/), an open-source hosting platform made for podcasters who want to engage and interact with their audience. Available as [services.castopod](#opt-services.castopod.enable).
- [Typesense](https://github.com/typesense/typesense), a fast, typo-tolerant search engine for building delightful search experiences. Available as [services.typesense](#opt-services.typesense.enable).
* [NS-USBLoader](https://github.com/developersu/ns-usbloader/), an all-in-one tool for managing Nintendo Switch homebrew. Available as [programs.ns-usbloader](#opt-programs.ns-usbloader.enable).
- [Mobilizon](https://joinmobilizon.org/), a Fediverse platform for publishing events.
- [Anuko Time Tracker](https://github.com/anuko/timetracker), a simple, easy to use, open source time tracking system. Available as [services.anuko-time-tracker](#opt-services.anuko-time-tracker.enable).
- [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable).
- [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
- [Jool](https://nicmx.github.io/Jool/en/index.html), an Open Source implementation of IPv4/IPv6 translation on Linux. Available as [networking.jool.enable](#opt-networking.jool.enable).
- [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable).
- [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable).
- [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
@ -46,8 +78,26 @@
- [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
- hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
- [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs.
Available as [services.honk](#opt-services.honk.enable).
- [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable).
- [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers.
- [audiobookshelf](https://github.com/advplyr/audiobookshelf/), a self-hosted audiobook and podcast server. Available as [services.audiobookshelf](#opt-services.audiobookshelf.enable).
- [ZITADEL](https://zitadel.com), a turnkey identity and access management platform. Available as [services.zitadel](#opt-services.zitadel.enable).
- [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
- `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
Workarounds for this can be removed.
- The `boot.loader.raspberryPi` options have been marked deprecated, with intent for removal for NixOS 24.11. They had a limited use-case, and do not work like people expect. They required either very old installs ([before mid-2019](https://github.com/NixOS/nixpkgs/pull/62462)) or customized builds out of scope of the standard and generic AArch64 support. That option set never supported the Raspberry Pi 4 family of devices.
- `python3.pkgs.sequoia` was removed in favor of `python3.pkgs.pysequoia`. The latter package is based on upstream's dedicated repository for sequoia's Python bindings, where the Python bindings from [gitlab:sequoia-pgp/sequoia](https://gitlab.com/sequoia-pgp/sequoia) were removed long ago.
@ -64,12 +114,25 @@
- `python3.pkgs.fetchPypi` (and `python3Packages.fetchPypi`) has been deprecated in favor of top-level `fetchPypi`.
- `pass` now does not contain `password-store.el`. Users should get `password-store.el` from Emacs lisp package set `emacs.pkgs.password-store`.
- `services.knot` now supports `.settings` from RFC42. The previous `.extraConfig` still works the same, but it displays a warning now.
- `mu` now does not install `mu4e` files by default. Users should get `mu4e` from Emacs lisp package set `emacs.pkgs.mu4e`.
- `mariadb` now defaults to `mariadb_1011` instead of `mariadb_106`, meaning the default version was upgraded from 10.6.x to 10.11.x. See the [upgrade notes](https://mariadb.com/kb/en/upgrading-from-mariadb-10-6-to-mariadb-10-11/) for potential issues.
- `getent` has been moved from `glibc`'s `bin` output to its own dedicated output, reducing closure size for many dependents. Dependents using the `getent` alias should not be affected; others should move from using `glibc.bin` or `getBin glibc` to `getent` (which also improves compatibility with non-glibc platforms).
- The `users.users.<name>.passwordFile` has been renamed to `users.users.<name>.hashedPasswordFile` to avoid possible confusions. The option is in fact the file-based version of `hashedPassword`, not `password`, and expects a file containing the {manpage}`crypt(3)` hash of the user password.
- The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
- `buildVimPluginFrom2Nix` has been renamed to `buildVimPlugin`, which now
now skips `configurePhase` and `buildPhase`
- JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
- The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
- The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
- The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
@ -77,12 +140,18 @@
- A list of all extras (and the extras enabled by default) can be found at the [option's reference for `services.matrix-synapse.extras`](#opt-services.matrix-synapse.extras).
- In some cases (e.g. for running synapse workers) it was necessary to re-use the `PYTHONPATH` of `matrix-synapse.service`'s environment to have all plugins available. This isn't necessary anymore, instead `config.services.matrix-synapse.package` can be used as it points to the wrapper with properly configured `extras` and also all plugins defined via [`services.matrix-synapse.plugins`](#opt-services.matrix-synapse.plugins) available. This is also the reason for why the option is read-only now, it's supposed to be set by the module only.
- `netbox` was updated to 3.6. NixOS' `services.netbox.package` still defaults to 3.5 if `stateVersion` is earlier than 23.11. Please review upstream's breaking changes [for 3.6.0](https://github.com/netbox-community/netbox/releases/tag/v3.6.0) and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
- `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
- `gitlab` installations created or updated between versions \[15.11.0, 15.11.2] have an incorrect database schema. This will become a problem when upgrading to `gitlab` >=16.2.0. A workaround for affected users can be found in the [GitLab docs](https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later).
- `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
- `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
- `nix-prefetch-git` now ignores global and user git config, to improve reproducibility.
- The [services.caddy.acmeCA](#opt-services.caddy.acmeCA) option now defaults to `null` instead of `"https://acme-v02.api.letsencrypt.org/directory"`, to use all of Caddy's default ACME CAs and enable Caddy's automatic issuer fallback feature by default, as recommended by upstream.
- The default priorities of [`services.nextcloud.phpOptions`](#opt-services.nextcloud.phpOptions) have changed. This means that e.g.
@ -105,12 +174,23 @@
- PHP now defaults to PHP 8.2, updated from 8.1.
- GraalVM has been updated to the latest version, and this brings significant changes. Upstream don't release multiple versions targeting different JVMs anymore, so now we only have one GraalVM derivation (`graalvm-ce`). While at first glance the version may seem a downgrade (22.3.1 -> 21.0.0), the major version is now following the JVM it targets (so this latest version targets JVM 21). Also some products like `llvm-installable-svm` and `native-image-svm` were incorporate to the main GraalVM derivation, so they're included by default.
- GraalPy (`graalCEPackages.graalpy`), TruffleRuby (`graalCEPackages.truffleruby`), GraalJS (`graalCEPackages.graaljs`) and GraalNodeJS (`grallCEPackages.graalnodejs`) are now indepedent from the main GraalVM derivation.
- The ISC DHCP package and corresponding module have been removed, because they are end of life upstream. See https://www.isc.org/blogs/isc-dhcp-eol/ for details and switch to a different DHCP implementation like kea or dnsmasq.
- `prometheus-unbound-exporter` has been replaced by the Let's Encrypt maintained version, since the previous version was archived. This requires some changes to the module configuration, most notable `controlInterface` needs migration
towards `unbound.host` and requires either the `tcp://` or `unix://` URI scheme.
- `odoo` now defaults to 16, updated from 15.
- `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities.
- `services.keyd` changed API. Now you can create multiple configuration files.
- `baloo`, the file indexer/search engine used by KDE now has a patch to prevent files from constantly being reindexed when the device ids of the their underlying storage changes. This happens frequently when using btrfs or LVM. The patch has not yet been accepted upstream but it provides a significantly improved experience. When upgrading, reset baloo to get a clean index: `balooctl disable ; balooctl purge ; balooctl enable`.
- `services.ddclient` has been removed on the request of the upstream maintainer because it is unmaintained and has bugs. Please switch to a different software like `inadyn` or `knsupdate`.
- The `vlock` program from the `kbd` package has been moved into its own package output and should now be referenced explicitly as `kbd.vlock` or replaced with an alternative such as the standalone `vlock` package or `physlock`.
@ -129,6 +209,10 @@
- `spamassassin` no longer supports the `Hashcash` module. The module needs to be removed from the `loadplugin` list if it was copied over from the default `initPreConf` option.
- `nano` was removed from `environment.defaultPackages`. To not leave systems without a editor, now `programs.nano.enable` is enabled by default.
- `programs.nano.nanorc` and `programs.nano.syntaxHighlight` no longer have an effect unless `programs.nano.enable` is set to true which is the default.
- `services.outline.sequelizeArguments` has been removed, as `outline` no longer executes database migrations via the `sequelize` cli.
- The binary of the package `cloud-sql-proxy` has changed from `cloud_sql_proxy` to `cloud-sql-proxy`.
@ -137,11 +221,13 @@
- The Caddy module gained a new option named `services.caddy.enableReload` which is enabled by default. It allows reloading the service instead of restarting it, if only a config file has changed. This option must be disabled if you have turned off the [Caddy admin API](https://caddyserver.com/docs/caddyfile/options#admin). If you keep this option enabled, you should consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period) to a non-infinite value to prevent Caddy from delaying the reload indefinitely.
- mdraid support is now optional. This reduces initramfs size and prevents the potentially undesired automatic detection and activation of software RAID pools. It is disabled by default in new configurations (determined by `stateVersion`), but the appropriate settings will be generated by `nixos-generate-config` when installing to a software RAID device, so the standard installation procedure should be unaffected. If you have custom configs relying on mdraid, ensure that you use `stateVersion` correctly or set `boot.swraid.enable` manually.
- mdraid support is now optional. This reduces initramfs size and prevents the potentially undesired automatic detection and activation of software RAID pools. It is disabled by default in new configurations (determined by `stateVersion`), but the appropriate settings will be generated by `nixos-generate-config` when installing to a software RAID device, so the standard installation procedure should be unaffected. If you have custom configs relying on mdraid, ensure that you use `stateVersion` correctly or set `boot.swraid.enable` manually. On systems with an updated `stateVersion` we now also emit warnings if `mdadm.conf` does not contain the minimum required configuration necessary to run the dynamically enabled monitoring daemons.
- The `go-ethereum` package has been updated to v1.12.0. This drops support for proof-of-work. Its GraphQL API now encodes all numeric values as hex strings and the GraphQL UI is updated to version 2.0. The default database has changed from `leveldb` to `pebble` but `leveldb` can be forced with the --db.engine=leveldb flag. The `checkpoint-admin` command was [removed along with trusted checkpoints](https://github.com/ethereum/go-ethereum/pull/27147).
- The default `kops` version is now 1.27.0 and support for 1.24 and older has been dropped.
- The `aseprite-unfree` package has been upgraded from 1.2.16.3 to 1.2.40. The free version of aseprite has been dropped because it is EOL and the package attribute now points to the unfree version. A maintained fork of the last free version of Aseprite, named 'LibreSprite', is available in the `libresprite` package.
- The default `kops` version is now 1.28.0 and support for 1.25 and older has been dropped.
- `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
@ -149,8 +235,37 @@
- Emacs macport version 29 was introduced.
- The option `services.networking.networkmanager.enableFccUnlock` was removed in favor of `networking.networkmanager.fccUnlockScripts`, which allows specifying unlock scripts explicitly. The previous option simply did enable all unlock scripts bundled with ModemManager, which is risky, and didn't allow using vendor-provided unlock scripts at all.
- The `html-proofer` package has been updated from major version 3 to major version 5, which includes [breaking changes](https://github.com/gjtorikian/html-proofer/blob/v5.0.8/UPGRADING.md).
- `kratos` has been updated from 0.10.1 to the first stable version 1.0.0, please read the [0.10.1 to 0.11.0](https://github.com/ory/kratos/releases/tag/v0.11.0), [0.11.0 to 0.11.1](https://github.com/ory/kratos/releases/tag/v0.11.1), [0.11.1 to 0.13.0](https://github.com/ory/kratos/releases/tag/v0.13.0) and [0.13.0 to 1.0.0](https://github.com/ory/kratos/releases/tag/v1.0.0) upgrade guides. The most notable breaking change is the introduction of one-time passwords (`code`) and update of the default recovery strategy from `link` to `code`.
- The `hail` NixOS module was removed, as `hail` was unmaintained since 2017.
- Package `noto-fonts-emoji` was renamed to `noto-fonts-color-emoji`;
see [#221181](https://github.com/NixOS/nixpkgs/issues/221181).
- Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
- `security.sudo.extraRules` now includes `root`'s default rule, with ordering
priority 400. This is functionally identical for users not specifying rule
order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
`mkOrder n` with n  400.
- `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime) now always evaluates the initial accumulator argument first.
If you depend on the lazier behavior, consider using [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl) or [`builtins.foldl'`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-foldl') instead.
- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.foldlAttrs) now always evaluates the initial accumulator argument first.
- `rome` was removed because it is no longer maintained and is succeeded by `biome`.
- The `services.mtr-exporter.target` has been removed in favor of `services.mtr-exporter.jobs` which allows specifying multiple targets.
- Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays.
## Other Notable Changes {#sec-release-23.11-notable-changes}
- The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
@ -175,6 +290,8 @@
- New options were added to `services.searx` for better SearXNG support, including options for the built-in rate limiter and bot protection and automatically configuring a local redis server.
- `jq` was updated to 1.7, its [first release in 5 years](https://github.com/jqlang/jq/releases/tag/jq-1.7).
- A new option was added to the virtualisation module that enables specifying explicitly named network interfaces in QEMU VMs. The existing `virtualisation.vlans` is still supported for cases where the name of the network interface is irrelevant.
- DocBook option documentation is no longer supported, all module documentation now uses markdown.
@ -191,22 +308,83 @@
Unfortunately all servers supporting new clients (newer version of anki-sync-server, anki's built in sync server and this new rust package) do not support the older sync protocol that was used in the old server, so such old clients will also need updating and in particular the anki package in nixpkgs is also being updated in this release.
The module update takes care of the new config syntax and the data itself (user login and cards) are compatible, so users of the module will be able to just log in again after updating both client and server without any extra action.
- `services.matrix-synapse` has new options to configure worker processes for matrix-synapse using [`services.matrix-synapse.workers`](#opt-services.matrix-synapse.workers). It's also now possible to configure a local redis server using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
- `services.nginx` gained a `defaultListen` option at server-level with support for PROXY protocol listeners, also `proxyProtocol` is now exposed in `services.nginx.virtualHosts.<name>.listen` option. It is now possible to run PROXY listeners and non-PROXY listeners at a server-level, see [#213510](https://github.com/NixOS/nixpkgs/pull/213510/) for more details.
- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easily be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
- `services.prometheus.exporters` has a new exporter to monitor electrical power consumption based on PowercapRAPL sensor called [Scaphandre](https://github.com/hubblo-org/scaphandre), see [#239803](https://github.com/NixOS/nixpkgs/pull/239803) for more details.
- The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recommended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
- The module `services.calibre-server` has new options to configure the `host`, `port`, `auth.enable`, `auth.mode` and `auth.userDb` path, see [#216497](https://github.com/NixOS/nixpkgs/pull/216497/) for more details.
- Mattermost has been upgraded to extended support version 8.1 as the previously
packaged extended support version 7.8 is [reaching end of life](https://docs.mattermost.com/upgrade/extended-support-release.html).
Migration may take some time, see the [changelog](https://docs.mattermost.com/install/self-managed-changelog.html#release-v8-1-extended-support-release)
and [important upgrade notes](https://docs.mattermost.com/upgrade/important-upgrade-notes.html).
- `services.prometheus.exporters` has a new [exporter](https://github.com/hipages/php-fpm_exporter) to monitor PHP-FPM processes, see [#240394](https://github.com/NixOS/nixpkgs/pull/240394) for more details.
- `services.github-runner` / `services.github-runners.<name>` gained the option `nodeRuntimes`. The option defaults to `[ "node20" ]`, i.e., the service supports Node.js 20 GitHub Actions only. The list of Node.js versions accepted by `nodeRuntimes` tracks the versions the upstream GitHub Actions runner supports. See [#249103](https://github.com/NixOS/nixpkgs/pull/249103) for details.
- `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
- `programs.gnupg` now has the option `agent.settings` to set verbatim config values in `/etc/gnupg/gpg-agent.conf`.
- `dockerTools.buildImage`, `dockerTools.buildLayeredImage` and `dockerTools.streamLayeredImage` now use `lib.makeOverridable` to allow `dockerTools`-based images to be customized more efficiently at the nix-level.
- `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
- `wrapHelm` now exposes `passthru.pluginsDir` which can be passed to `helmfile`. For convenience, a top-level package `helmfile-wrapped` has been added, which inherits `passthru.pluginsDir` from `kubernetes-helm-wrapped`. See [#217768](https://github.com/NixOS/nixpkgs/issues/217768) for details.
- `boot.initrd.network.udhcp.enable` allows control over dhcp during stage 1 regardless of what `networking.useDHCP` is set to.
- Suricata was upgraded from 6.0 to 7.0 and no longer considers HTTP/2 support as experimental, see [upstream release notes](https://forum.suricata.io/t/suricata-7-0-0-released/3715) for more details.
- `networking.nftables` now has the option `networking.nftables.table.<table>` to create tables
and have them be updated atomically, instead of flushing the ruleset.
- `networking.nftables` is no longer flushing all rulesets on every reload.
Use `networking.nftables.flushRuleset = true;` to get back the old behaviour.
- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
- The module `services.mbpfan` now has the option `aggressive` enabled by default for better heat moderation. You can disable it for upstream defaults.
- `security.sudo` now provides two extra options, that do not change the
module's default behaviour:
- `defaultOptions` controls the options used for the default rules;
- `keepTerminfo` controls whether `TERMINFO` and `TERMINFO_DIRS` are preserved
for `root` and the `wheel` group.
- CoreDNS can now be built with external plugins by overriding `externalPlugins` and `vendorHash` arguments like this:
```
services.coredns = {
enable = true;
package = pkgs.coredns.override {
externalPlugins = [
{name = "fanout"; repo = "github.com/networkservicemesh/fanout"; version = "v1.9.1";}
];
vendorHash = "<SRI hash>";
};
};
```
To get the necessary SRI hash, set `vendorHash = "";`. The build will fail and produce the correct `vendorHash` in the error message.
If you use this feature, updates to CoreDNS may require updating `vendorHash` by following these steps again.
- `fusuma` now enables the following plugins: [appmatcher](https://github.com/iberianpig/fusuma-plugin-appmatcher), [keypress](https://github.com/iberianpig/fusuma-plugin-keypress), [sendkey](https://github.com/iberianpig/fusuma-plugin-sendkey), [tap](https://github.com/iberianpig/fusuma-plugin-tap) and [wmctrl](https://github.com/iberianpig/fusuma-plugin-wmctrl).
## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
- The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
@ -225,7 +403,7 @@ The module update takes care of the new config syntax and the data itself (user
- The `qemu-vm.nix` module by default now identifies block devices via
persistent names available in `/dev/disk/by-*`. Because the rootDevice is
identfied by its filesystem label, it needs to be formatted before the VM is
identified by its filesystem label, it needs to be formatted before the VM is
started. The functionality of automatically formatting the rootDevice in the
initrd is removed from the QEMU module. However, for tests that depend on
this functionality, a test utility for the scripted initrd is added
@ -234,3 +412,16 @@ The module update takes care of the new config syntax and the data itself (user
./common/auto-format-root-device.nix ];` When you use the systemd initrd, you
can automatically format the root device by setting
`virtualisation.fileSystems."/".autoFormat = true;`.
- `python3.pkgs.flitBuildHook` has been removed. Use `flit-core` and `format = "pyproject"` instead.
- The `qemu-vm.nix` module now supports disabling overriding `fileSystems` with
`virtualisation.fileSystems`. This enables the user to boot VMs from
"external" disk images not created by the qemu-vm module. You can stop the
qemu-vm module from overriding `fileSystems` by setting
`virtualisation.fileSystems = lib.mkForce { };`.
- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
- `teleport` has been upgraded from major version 12 to major version 14. Please see upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/) and release notes for versions [13](https://goteleport.com/docs/changelog/#1300-050823) and [14](https://goteleport.com/docs/changelog/#1400-092023). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 13.x version by setting `services.teleport.package = pkgs.teleport_13`. Afterwards, this option can be removed to upgrade to the default version (14).

View file

@ -0,0 +1,65 @@
# Builds an btrfs image containing a populated /nix/store with the closure
# of store paths passed in the storePaths parameter, in addition to the
# contents of a directory that can be populated with commands. The
# generated image is sized to only fit its contents, with the expectation
# that a script resizes the filesystem at boot time.
{ pkgs
, lib
# List of derivations to be included
, storePaths
# Whether or not to compress the resulting image with zstd
, compressImage ? false, zstd
# Shell commands to populate the ./files directory.
# All files in that directory are copied to the root of the FS.
, populateImageCommands ? ""
, volumeLabel
, uuid ? "44444444-4444-4444-8888-888888888888"
, btrfs-progs
}:
let
sdClosureInfo = pkgs.buildPackages.closureInfo { rootPaths = storePaths; };
in
pkgs.stdenv.mkDerivation {
name = "btrfs-fs.img${lib.optionalString compressImage ".zst"}";
nativeBuildInputs = [ btrfs-progs ] ++ lib.optional compressImage zstd;
buildCommand =
''
${if compressImage then "img=temp.img" else "img=$out"}
set -x
(
mkdir -p ./files
${populateImageCommands}
)
mkdir -p ./rootImage/nix/store
xargs -I % cp -a --reflink=auto % -t ./rootImage/nix/store/ < ${sdClosureInfo}/store-paths
(
GLOBIGNORE=".:.."
shopt -u dotglob
for f in ./files/*; do
cp -a --reflink=auto -t ./rootImage/ "$f"
done
)
cp ${sdClosureInfo}/registration ./rootImage/nix-path-registration
touch $img
mkfs.btrfs -L ${volumeLabel} -U ${uuid} -r ./rootImage --shrink $img
if ! btrfs check $img; then
echo "--- 'btrfs check' failed for BTRFS image ---"
return 1
fi
if [ ${builtins.toString compressImage} ]; then
echo "Compressing image"
zstd -v --no-progress ./$img -o $out
fi
'';
}

View file

@ -80,6 +80,10 @@ in rec {
optional (attr ? ${name} && !elem attr.${name} values)
"Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
assertValuesSomeOfOr = name: values: default: group: attr:
optional (attr ? ${name} && !(all (x: elem x values) (splitString " " attr.${name}) || attr.${name} == default))
"Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
assertHasField = name: group: attr:
optional (!(attr ? ${name}))
"Systemd ${group} field `${name}' must exist.";
@ -274,7 +278,7 @@ in rec {
});
in "${out}/bin/${scriptName}";
unitConfig = { config, options, ... }: {
unitConfig = { config, name, options, ... }: {
config = {
unitConfig =
optionalAttrs (config.requires != [])
@ -294,9 +298,9 @@ in rec {
// optionalAttrs (config.requisite != [])
{ Requisite = toString config.requisite; }
// optionalAttrs (config ? restartTriggers && config.restartTriggers != [])
{ X-Restart-Triggers = "${pkgs.writeText "X-Restart-Triggers" (toString config.restartTriggers)}"; }
{ X-Restart-Triggers = "${pkgs.writeText "X-Restart-Triggers-${name}" (toString config.restartTriggers)}"; }
// optionalAttrs (config ? reloadTriggers && config.reloadTriggers != [])
{ X-Reload-Triggers = "${pkgs.writeText "X-Reload-Triggers" (toString config.reloadTriggers)}"; }
{ X-Reload-Triggers = "${pkgs.writeText "X-Reload-Triggers-${name}" (toString config.reloadTriggers)}"; }
// optionalAttrs (config.description != "") {
Description = config.description; }
// optionalAttrs (config.documentation != []) {

View file

@ -4,19 +4,20 @@
, qemu_pkg ? qemu_test
, coreutils
, imagemagick_light
, libtiff
, netpbm
, qemu_test
, socat
, ruff
, tesseract4
, vde2
, extraPythonPackages ? (_ : [])
}:
python3Packages.buildPythonApplication rec {
python3Packages.buildPythonApplication {
pname = "nixos-test-driver";
version = "1.1";
src = ./.;
format = "pyproject";
propagatedBuildInputs = [
coreutils
@ -31,14 +32,13 @@ python3Packages.buildPythonApplication rec {
++ extraPythonPackages python3Packages;
doCheck = true;
nativeCheckInputs = with python3Packages; [ mypy pylint black ];
nativeCheckInputs = with python3Packages; [ mypy ruff black ];
checkPhase = ''
mypy --disallow-untyped-defs \
--no-implicit-optional \
--pretty \
--no-color-output \
--ignore-missing-imports ${src}/test_driver
pylint --errors-only --enable=unused-import ${src}/test_driver
black --check --diff ${src}/test_driver
echo -e "\x1b[32m## run mypy\x1b[0m"
mypy test_driver extract-docstrings.py
echo -e "\x1b[32m## run ruff\x1b[0m"
ruff .
echo -e "\x1b[32m## run black\x1b[0m"
black --check --diff .
'';
}

View file

@ -1,5 +1,6 @@
import ast
import sys
from pathlib import Path
"""
This program takes all the Machine class methods and prints its methods in
@ -40,27 +41,34 @@ some_function(param1, param2)
"""
assert len(sys.argv) == 2
with open(sys.argv[1], "r") as f:
module = ast.parse(f.read())
def main() -> None:
if len(sys.argv) != 2:
print(f"Usage: {sys.argv[0]} <path-to-test-driver>")
sys.exit(1)
class_definitions = (node for node in module.body if isinstance(node, ast.ClassDef))
module = ast.parse(Path(sys.argv[1]).read_text())
machine_class = next(filter(lambda x: x.name == "Machine", class_definitions))
assert machine_class is not None
class_definitions = (node for node in module.body if isinstance(node, ast.ClassDef))
function_definitions = [
node for node in machine_class.body if isinstance(node, ast.FunctionDef)
]
function_definitions.sort(key=lambda x: x.name)
machine_class = next(filter(lambda x: x.name == "Machine", class_definitions))
assert machine_class is not None
for f in function_definitions:
docstr = ast.get_docstring(f)
if docstr is not None:
args = ", ".join((a.arg for a in f.args.args[1:]))
args = f"({args})"
function_definitions = [
node for node in machine_class.body if isinstance(node, ast.FunctionDef)
]
function_definitions.sort(key=lambda x: x.name)
docstr = "\n".join((f" {l}" for l in docstr.strip().splitlines()))
for function in function_definitions:
docstr = ast.get_docstring(function)
if docstr is not None:
args = ", ".join(a.arg for a in function.args.args[1:])
args = f"({args})"
print(f"{f.name}{args}\n\n:{docstr[1:]}\n")
docstr = "\n".join(f" {line}" for line in docstr.strip().splitlines())
print(f"{function.name}{args}\n\n:{docstr[1:]}\n")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,44 @@
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
[project]
name = "nixos-test-driver"
version = "0.0.0"
[project.scripts]
nixos-test-driver = "test_driver:main"
generate-driver-symbols = "test_driver:generate_driver_symbols"
[tool.setuptools.packages]
find = {}
[tool.setuptools.package-data]
test_driver = ["py.typed"]
[tool.ruff]
line-length = 88
select = ["E", "F", "I", "U", "N"]
ignore = ["E501"]
# xxx: we can import https://pypi.org/project/types-colorama/ here
[[tool.mypy.overrides]]
module = "colorama.*"
ignore_missing_imports = true
[[tool.mypy.overrides]]
module = "ptpython.*"
ignore_missing_imports = true
[tool.black]
line-length = 88
target-version = ['py39']
include = '\.pyi?$'
[tool.mypy]
python_version = "3.10"
warn_redundant_casts = true
disallow_untyped_calls = true
disallow_untyped_defs = true
no_implicit_optional = true

View file

@ -1,14 +0,0 @@
from setuptools import setup, find_packages
setup(
name="nixos-test-driver",
version='1.1',
packages=find_packages(),
package_data={"test_driver": ["py.typed"]},
entry_points={
"console_scripts": [
"nixos-test-driver=test_driver:main",
"generate-driver-symbols=test_driver:generate_driver_symbols"
]
},
)

View file

@ -0,0 +1,2 @@
with import ../../.. {};
pkgs.callPackage ./default.nix {}

View file

@ -1,11 +1,12 @@
from pathlib import Path
import argparse
import ptpython.repl
import os
import time
from pathlib import Path
import ptpython.repl
from test_driver.logger import rootlog
from test_driver.driver import Driver
from test_driver.logger import rootlog
class EnvDefault(argparse.Action):
@ -25,9 +26,7 @@ class EnvDefault(argparse.Action):
)
if required and default:
required = False
super(EnvDefault, self).__init__(
default=default, required=required, nargs=nargs, **kwargs
)
super().__init__(default=default, required=required, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None): # type: ignore
setattr(namespace, self.dest, values)

View file

@ -1,14 +1,14 @@
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Dict, Iterator, List, Union, Optional, Callable, ContextManager
import os
import re
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Any, Callable, ContextManager, Dict, Iterator, List, Optional, Union
from test_driver.logger import rootlog
from test_driver.machine import Machine, NixStartScript, retry
from test_driver.vlan import VLan
from test_driver.polling_condition import PollingCondition
from test_driver.vlan import VLan
def get_tmp_dir() -> Path:

View file

@ -1,13 +1,17 @@
from colorama import Style, Fore
from contextlib import contextmanager
from typing import Any, Dict, Iterator
from queue import Queue, Empty
from xml.sax.saxutils import XMLGenerator
# mypy: disable-error-code="no-untyped-call"
# drop the above line when mypy is upgraded to include
# https://github.com/python/typeshed/commit/49b717ca52bf0781a538b04c0d76a5513f7119b8
import codecs
import os
import sys
import time
import unicodedata
from contextlib import contextmanager
from queue import Empty, Queue
from typing import Any, Dict, Iterator
from xml.sax.saxutils import XMLGenerator
from colorama import Fore, Style
class Logger:

View file

@ -1,7 +1,3 @@
from contextlib import _GeneratorContextManager, nullcontext
from pathlib import Path
from queue import Queue
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import base64
import io
import os
@ -16,6 +12,10 @@ import sys
import tempfile
import threading
import time
from contextlib import _GeneratorContextManager, nullcontext
from pathlib import Path
from queue import Queue
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from test_driver.logger import rootlog
@ -236,14 +236,14 @@ class LegacyStartCommand(StartCommand):
def __init__(
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
netBackendArgs: Optional[str] = None, # noqa: N803
netFrontendArgs: Optional[str] = None, # noqa: N803
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
qemuBinary: Optional[str] = None,
qemuFlags: Optional[str] = None,
qemuBinary: Optional[str] = None, # noqa: N803
qemuFlags: Optional[str] = None, # noqa: N803
):
if qemuBinary is not None:
self._cmd = qemuBinary
@ -599,7 +599,7 @@ class Machine:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
self.shell.send(b"echo ${PIPESTATUS[0]}\n")
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode(errors="replace"))
@ -736,7 +736,7 @@ class Machine:
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
def wait_until_tty_matches(self, tty: str, regexp: str, timeout: int = 900) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
@ -752,7 +752,7 @@ class Machine:
return len(matcher.findall(text)) > 0
with self.nested(f"waiting for {regexp} to appear on tty {tty}"):
retry(tty_matches)
retry(tty_matches, timeout)
def send_chars(self, chars: str, delay: Optional[float] = 0.01) -> None:
"""
@ -764,7 +764,7 @@ class Machine:
for char in chars:
self.send_key(char, delay, log=False)
def wait_for_file(self, filename: str) -> None:
def wait_for_file(self, filename: str, timeout: int = 900) -> None:
"""
Waits until the file exists in the machine's file system.
"""
@ -774,9 +774,11 @@ class Machine:
return status == 0
with self.nested(f"waiting for file '{filename}'"):
retry(check_file)
retry(check_file, timeout)
def wait_for_open_port(self, port: int, addr: str = "localhost") -> None:
def wait_for_open_port(
self, port: int, addr: str = "localhost", timeout: int = 900
) -> None:
"""
Wait until a process is listening on the given TCP port and IP address
(default `localhost`).
@ -787,9 +789,11 @@ class Machine:
return status == 0
with self.nested(f"waiting for TCP port {port} on {addr}"):
retry(port_is_open)
retry(port_is_open, timeout)
def wait_for_closed_port(self, port: int, addr: str = "localhost") -> None:
def wait_for_closed_port(
self, port: int, addr: str = "localhost", timeout: int = 900
) -> None:
"""
Wait until nobody is listening on the given TCP port and IP address
(default `localhost`).
@ -800,7 +804,7 @@ class Machine:
return status != 0
with self.nested(f"waiting for TCP port {port} on {addr} to be closed"):
retry(port_is_closed)
retry(port_is_closed, timeout)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl(f"start {jobname}", user)
@ -839,6 +843,9 @@ class Machine:
while True:
chunk = self.shell.recv(1024)
# No need to print empty strings, it means we are waiting.
if len(chunk) == 0:
continue
self.log(f"Guest shell says: {chunk!r}")
# NOTE: for this to work, nothing must be printed after this line!
if b"Spawning backdoor root shell..." in chunk:
@ -974,7 +981,7 @@ class Machine:
"""
return self._get_screen_text_variants([2])[0]
def wait_for_text(self, regex: str) -> None:
def wait_for_text(self, regex: str, timeout: int = 900) -> None:
"""
Wait until the supplied regular expressions matches the textual
contents of the screen by using optical character recognition (see
@ -997,7 +1004,7 @@ class Machine:
return False
with self.nested(f"waiting for {regex} to appear on screen"):
retry(screen_matches)
retry(screen_matches, timeout)
def wait_for_console_text(self, regex: str, timeout: int | None = None) -> None:
"""
@ -1125,7 +1132,7 @@ class Machine:
return
assert self.shell
self.shell.send("poweroff\n".encode())
self.shell.send(b"poweroff\n")
self.wait_for_shutdown()
def crash(self) -> None:
@ -1148,7 +1155,7 @@ class Machine:
self.send_key("ctrl-alt-delete")
self.connected = False
def wait_for_x(self) -> None:
def wait_for_x(self, timeout: int = 900) -> None:
"""
Wait until it is possible to connect to the X server.
"""
@ -1165,14 +1172,14 @@ class Machine:
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
retry(check_x, timeout)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
def wait_for_window(self, regexp: str, timeout: int = 900) -> None:
"""
Wait until an X11 window has appeared whose name matches the given
regular expression, e.g., `wait_for_window("Terminal")`.
@ -1190,7 +1197,7 @@ class Machine:
return any(pattern.search(name) for name in names)
with self.nested("waiting for a window to appear"):
retry(window_is_visible)
retry(window_is_visible, timeout)
def sleep(self, secs: int) -> None:
# We want to sleep in *guest* time, not *host* time.

View file

@ -1,11 +1,11 @@
from typing import Callable, Optional
from math import isfinite
import time
from math import isfinite
from typing import Callable, Optional
from .logger import rootlog
class PollingConditionFailed(Exception):
class PollingConditionError(Exception):
pass
@ -60,7 +60,7 @@ class PollingCondition:
def maybe_raise(self) -> None:
if not self.check():
raise PollingConditionFailed(self.status_message(False))
raise PollingConditionError(self.status_message(False))
def status_message(self, status: bool) -> str:
return f"Polling condition {'succeeded' if status else 'failed'}: {self.description}"

View file

@ -1,8 +1,8 @@
from pathlib import Path
import io
import os
import pty
import subprocess
from pathlib import Path
from test_driver.logger import rootlog

Some files were not shown because too many files have changed in this diff Show more