Project import generated by Copybara.

GitOrigin-RevId: bc5d68306b40b8522ffb69ba6cff91898c2fbbff
This commit is contained in:
Default email 2021-12-06 17:07:01 +01:00
parent f62ee65546
commit ce641f4048
5786 changed files with 236630 additions and 99348 deletions

View file

@ -6,6 +6,13 @@
#
# For documentation on this file, see https://help.github.com/articles/about-codeowners/
# Mentioned users will get code review requests.
#
# IMPORTANT NOTE: in order to actually get pinged, commit access is required.
# This also holds true for GitHub teams. Since almost none of our teams have write
# permissions, you need to list all members of the team with commit access individually.
# We still add the team to the list next to its members, this helps keeping things
# in sync. (Put non team members before the team to distinguish them.)
# See https://github.com/NixOS/nixpkgs/issues/124085 for more details
# This file
/.github/CODEOWNERS @edolstra
@ -210,11 +217,11 @@
/pkgs/top-level/php-packages.nix @jtojnar @NixOS/php @aanderse @etu @globin @ma27 @talyz
# Podman, CRI-O modules and related
/nixos/modules/virtualisation/containers.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/cri-o.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/podman.nix @NixOS/podman @zowoq
/nixos/tests/cri-o.nix @NixOS/podman @zowoq
/nixos/tests/podman.nix @NixOS/podman @zowoq
/nixos/modules/virtualisation/containers.nix @NixOS/podman @zowoq @adisbladis
/nixos/modules/virtualisation/cri-o.nix @NixOS/podman @zowoq @adisbladis
/nixos/modules/virtualisation/podman.nix @NixOS/podman @zowoq @adisbladis
/nixos/tests/cri-o.nix @NixOS/podman @zowoq @adisbladis
/nixos/tests/podman.nix @NixOS/podman @zowoq @adisbladis
# Docker tools
/pkgs/build-support/docker @roberth @utdemir
@ -230,6 +237,10 @@
/pkgs/development/go-modules @kalbasit @Mic92 @zowoq
/pkgs/development/go-packages @kalbasit @Mic92 @zowoq
# GNOME
/pkgs/desktops/gnome @NixOS/GNOME @jtojnar @hedning
/pkgs/desktops/gnome/extensions @piegamesde @NixOS/GNOME @jtojnar @hedning
# Cinnamon
/pkgs/desktops/cinnamon @mkg20001

View file

@ -20,12 +20,17 @@ Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-
- [ ] aarch64-linux
- [ ] x86_64-darwin
- [ ] aarch64-darwin
- [ ] For non-Linux: Is `sandbox = true` set in `nix.conf`? (See [Nix manual](https://nixos.org/manual/nix/stable/#sec-conf-file))
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested compilation of all packages that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [21.11 Release Notes (or backporting 21.05 Release notes)](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#generating-2111-release-notes)
- [ ] For non-Linux: Is `sandbox = true` set in `nix.conf`? (See [Nix manual](https://nixos.org/manual/nix/stable/command-ref/conf-file.html))
- [ ] Tested, as applicable:
- [NixOS test(s)](https://nixos.org/manual/nixos/unstable/index.html#sec-nixos-tests) (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- and/or [package tests](https://nixos.org/manual/nixpkgs/unstable/#sec-package-tests)
- or, for functions and "core" functionality, tests in [lib/tests](https://github.com/NixOS/nixpkgs/blob/master/lib/tests) or [pkgs/test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/test)
- made sure NixOS tests are [linked](https://nixos.org/manual/nixpkgs/unstable/#ssec-nixos-tests-linking) to the relevant packages
- [ ] Tested compilation of all packages that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review rev HEAD"`. Note: all changes have to be committed, also see [nixpkgs-review usage](https://github.com/Mic92/nixpkgs-review#usage)
- [ ] Tested basic functionality of all binary files (usually in `./result/bin/`)
- [22.05 Release Notes (or backporting 21.11 Release notes)](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#generating-2205-release-notes)
- [ ] (Package updates) Added a release notes entry if the change is major or breaking
- [ ] (Module updates) Added a release notes entry if the change is significant
- [ ] (Module addition) Added a release notes entry if adding a new NixOS module
- [ ] (Release notes changes) Ran `nixos/doc/manual/md-to-db.sh` to update generated release notes
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).

View file

@ -143,6 +143,8 @@
- doc/languages-frameworks/vim.section.md
- pkgs/applications/editors/vim/**/*
- pkgs/misc/vim-plugins/**/*
- nixos/modules/programs/neovim.nix
- pkgs/applications/editors/neovim/**/*
"6.topic: xfce":
- nixos/doc/manual/configuration/xfce.xml

View file

@ -15,6 +15,6 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps:
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v14
- uses: cachix/install-nix-action@v16
# explicit list of supportedSystems is needed until aarch64-darwin becomes part of the trunk jobset
- run: nix-build pkgs/top-level/release.nix -A tarball.nixpkgs-basic-release-checks --arg supportedSystems '[ "aarch64-darwin" "aarch64-linux" "x86_64-linux" "x86_64-darwin" ]'

View file

@ -11,36 +11,32 @@ on:
jobs:
tests:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
if: "github.repository_owner == 'NixOS' && !contains(github.event.pull_request.title, '[skip editorconfig]')"
steps:
- name: Get list of changed files from PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
echo 'PR_DIFF<<EOF' >> $GITHUB_ENV
gh api \
repos/NixOS/nixpkgs/pulls/${{github.event.number}}/files --paginate \
| jq '.[] | select(.status != "removed") | .filename' \
>> $GITHUB_ENV
echo 'EOF' >> $GITHUB_ENV
> "$HOME/changed_files"
- name: print list of changed files
run: |
cat "$HOME/changed_files"
- uses: actions/checkout@v2
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
if: env.PR_DIFF
- uses: cachix/install-nix-action@v14
if: env.PR_DIFF
- uses: cachix/install-nix-action@v16
with:
# nixpkgs commit is pinned so that it doesn't break
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/f93ecc4f6bc60414d8b73dbdf615ceb6a2c604df.tar.gz
- name: install editorconfig-checker
run: nix-env -iA editorconfig-checker -f '<nixpkgs>'
if: env.PR_DIFF
- name: Checking EditorConfig
if: env.PR_DIFF
run: |
echo "$PR_DIFF" | xargs editorconfig-checker -disable-indent-size
cat "$HOME/changed_files" | xargs -r editorconfig-checker -disable-indent-size
- if: ${{ failure() }}
run: |
echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again."

View file

@ -18,7 +18,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v14
- uses: cachix/install-nix-action@v16
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -18,7 +18,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v14
- uses: cachix/install-nix-action@v16
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true

View file

@ -19,7 +19,7 @@ jobs:
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v14
- uses: cachix/install-nix-action@v16
- name: Check DocBook files generated from Markdown are consistent
run: |
nixos/doc/manual/md-to-db.sh

View file

@ -32,6 +32,10 @@ jobs:
into: staging-next-21.05
- from: staging-next-21.05
into: staging-21.05
- from: release-21.11
into: staging-next-21.11
- from: staging-next-21.11
into: staging-21.11
name: ${{ matrix.pairs.from }} → ${{ matrix.pairs.into }}
steps:
- uses: actions/checkout@v2

View file

@ -3,8 +3,10 @@
.*.swp
.*.swo
.idea/
outputs/
result
result-*
source/
/doc/NEWS.html
/doc/NEWS.txt
/doc/manual.html
@ -21,3 +23,6 @@ __pycache__
# generated by pkgs/common-updater/update-script.nix
update-git-commits.txt
# JetBrains IDEA module declaration file
/nixpkgs.iml

View file

@ -1 +1 @@
21.11
22.05

View file

@ -53,10 +53,10 @@ For package version upgrades and such a one-line commit message is usually suffi
Follow these steps to backport a change into a release branch in compliance with the [commit policy](https://nixos.org/nixpkgs/manual/#submitting-changes-stable-release-branches).
1. Take note of the commits in which the change was introduced into `master` branch.
2. Check out the target _release branch_, e.g. `release-20.09`. Do not use a _channel branch_ like `nixos-20.09` or `nixpkgs-20.09`.
2. Check out the target _release branch_, e.g. `release-21.11`. Do not use a _channel branch_ like `nixos-21.11` or `nixpkgs-21.11-darwin`.
3. Create a branch for your change, e.g. `git checkout -b backport`.
4. When the reason to backport is not obvious from the original commit message, use `git cherry-pick -xe <original commit>` and add a reason. Otherwise use `git cherry-pick -x <original commit>`. That's fine for minor version updates that only include security and bug fixes, commits that fixes an otherwise broken package or similar. Please also ensure the commits exists on the master branch; in the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
5. Push to GitHub and open a backport pull request. Make sure to select the release branch (e.g. `release-20.09`) as the target branch of the pull request, and link to the pull request in which the original change was comitted to `master`. The pull request title should be the commit title with the release version as prefix, e.g. `[20.09]`.
5. Push to GitHub and open a backport pull request. Make sure to select the release branch (e.g. `release-21.11`) as the target branch of the pull request, and link to the pull request in which the original change was comitted to `master`. The pull request title should be the commit title with the release version as prefix, e.g. `[21.11]`.
6. When the backport pull request is merged and you have the necessary privileges you can also replace the label `9.needs: port to stable` with `8.has: port to stable` on the original pull request. This way maintainers can keep track of missing backports easier.
## Criteria for Backporting changes
@ -68,17 +68,17 @@ Anything that does not cause user or downstream dependency regressions can be ba
- Services which require a client to be up-to-date regardless. (E.g. `spotify`, `steam`, or `discord`)
- Security critical applications (E.g. `firefox`)
## Generating 21.11 Release Notes
## Generating 22.05 Release Notes
(This section also applies to backporting 21.05 release notes: substitute "rl-2111" for "rl-2105".)
(This section also applies to backporting 21.11 release notes: substitute "rl-2205" for "rl-2111".)
Documentation in nixpkgs is transitioning to a markdown-centric workflow. Release notes now require a translation step to convert from markdown to a compatible docbook document.
Steps for updating 21.11 Release notes:
Steps for updating 22.05 Release notes:
1. Edit `nixos/doc/manual/release-notes/rl-2111.section.md` with the desired changes
2. Run `./nixos/doc/manual/md-to-db.sh` to render `nixos/doc/manual/from_md/release-notes/rl-2111.section.xml`
3. Include changes to `rl-2111.section.md` and `rl-2111.section.xml` in the same commit.
1. Edit `nixos/doc/manual/release-notes/rl-2205.section.md` with the desired changes
2. Run `./nixos/doc/manual/md-to-db.sh` to render `nixos/doc/manual/from_md/release-notes/rl-2205.section.xml`
3. Include changes to `rl-2205.section.md` and `rl-2205.section.xml` in the same commit.
## Reviewing contributions

View file

@ -8,7 +8,7 @@
</p>
[Nixpkgs](https://github.com/nixos/nixpkgs) is a collection of over
60,000 software packages that can be installed with the
80,000 software packages that can be installed with the
[Nix](https://nixos.org/nix/) package manager. It also implements
[NixOS](https://nixos.org/nixos/), a purely-functional Linux distribution.
@ -46,14 +46,14 @@ Nixpkgs and NixOS are built and tested by our continuous integration
system, [Hydra](https://hydra.nixos.org/).
* [Continuous package builds for unstable/master](https://hydra.nixos.org/jobset/nixos/trunk-combined)
* [Continuous package builds for the NixOS 21.05 release](https://hydra.nixos.org/jobset/nixos/release-21.05)
* [Continuous package builds for the NixOS 21.11 release](https://hydra.nixos.org/jobset/nixos/release-21.11)
* [Tests for unstable/master](https://hydra.nixos.org/job/nixos/trunk-combined/tested#tabs-constituents)
* [Tests for the NixOS 21.05 release](https://hydra.nixos.org/job/nixos/release-21.05/tested#tabs-constituents)
* [Tests for the NixOS 21.11 release](https://hydra.nixos.org/job/nixos/release-21.11/tested#tabs-constituents)
Artifacts successfully built with Hydra are published to cache at
https://cache.nixos.org/. When successful build and test criteria are
met, the Nixpkgs expressions are distributed via [Nix
channels](https://nixos.org/nix/manual/#sec-channels).
channels](https://nixos.org/manual/nix/stable/package-management/channels.html).
# Contributing

View file

@ -151,6 +151,12 @@ Create a Docker image with many of the store paths being on their own layer to i
: Shell commands to run while creating the archive for the final layer in a fakeroot environment. Unlike `extraCommands`, you can run `chown` to change the owners of the files in the archive, changing fakeroot's state instead of the real filesystem. The latter would require privileges that the build user does not have. Static binaries do not interact with the fakeroot environment. By default all files in the archive will be owned by root.
`enableFakechroot` _optional_
: Whether to run in `fakeRootCommands` in `fakechroot`, making programs behave as though `/` is the root of the image being created, while files in the Nix store are available as usual. This allows scripts that perform installation in `/` to work as expected. Considering that `fakechroot` is implemented via the same mechanism as `fakeroot`, the same caveats apply.
*Default:* `false`
### Behavior of `contents` in the final image {#dockerTools-buildLayeredImage-arg-contents}
Each path directly listed in `contents` will have a symlink in the root of the image.

View file

@ -47,6 +47,28 @@ These functions write `text` to the Nix store. This is useful for creating scrip
Many more commands wrap `writeTextFile` including `writeText`, `writeTextDir`, `writeScript`, and `writeScriptBin`. These are convenience functions over `writeTextFile`.
## `writeShellApplication` {#trivial-builder-writeShellApplication}
This can be used to easily produce a shell script that has some dependencies (`runtimeInputs`). It automatically sets the `PATH` of the script to contain all of the listed inputs, sets some sanity shellopts (`errexit`, `nounset`, `pipefail`), and checks the resulting script with [`shellcheck`](https://github.com/koalaman/shellcheck).
For example, look at the following code:
```nix
writeShellApplication {
name = "show-nixos-org";
runtimeInputs = [ curl w3m ];
text = ''
curl -s 'https://nixos.org' | w3m -dump -T text/html
'';
}
```
Unlike with normal `writeShellScriptBin`, there is no need to manually write out `${curl}/bin/curl`, setting the PATH
was handled by `writeShellApplication`. Moreover, the script is being checked with `shellcheck` for more strict
validation.
## `symlinkJoin` {#trivial-builder-symlinkJoin}
This can be used to put many derivations into the same directory structure. It works by creating a new derivation and adding symlinks to each of the paths listed. It expects two arguments, `name`, and `paths`. `name` is the name used in the Nix store path for the created derivation. `paths` is a list of paths that will be symlinked. These paths can be to Nix store derivations or any other subdirectory contained within.

View file

@ -181,7 +181,7 @@
rev = "${version}";
```
- Filling lists condionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
- Building lists conditionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
```nix
buildInputs = lib.optional stdenv.isDarwin iconv;

View file

@ -166,7 +166,7 @@ lib.attrsets.setAttrByPath [ "a" "b" ] 3
<xi:include href="./locations.xml" xpointer="lib.attrsets.getAttrFromPath" />
<para>
Like [](#function-library-lib.attrsets.attrByPath) except without a default, and it will throw if the value doesn't exist.
Like <xref linkend="function-library-lib.attrsets.attrByPath" /> except without a default, and it will throw if the value doesn't exist.
</para>
<variablelist>
@ -1480,7 +1480,7 @@ lib.attrsets.zipAttrsWith
<xi:include href="./locations.xml" xpointer="lib.attrsets.zipAttrs" />
<para>
Merge sets of attributes and combine each attribute value in to a list. Similar to [](#function-library-lib.attrsets.zipAttrsWith) where the merge function returns a list of all values.
Merge sets of attributes and combine each attribute value in to a list. Similar to <xref linkend="function-library-lib.attrsets.zipAttrsWith" /> where the merge function returns a list of all values.
</para>
<variablelist>

View file

@ -291,10 +291,10 @@ let
# define packages to install
basePackages = [
git
# replace with beam.packages.erlang.elixir_1_11 if you need
# replace with beam.packages.erlang.elixir_1_13 if you need
beam.packages.erlang.elixir
nodejs
postgresql_13
postgresql_14
# only used for frontend dependencies
# you are free to use yarn2nix as well
nodePackages.node2nix
@ -312,10 +312,11 @@ let
mkdir -p .nix-mix .nix-hex
export MIX_HOME=$PWD/.nix-mix
export HEX_HOME=$PWD/.nix-mix
# make hex from Nixpkgs available
# `mix local.hex` will install hex into MIX_HOME and should take precedence
export MIX_PATH="${beam.packages.erlang.hex}/lib/erlang/lib/hex/ebin"
export PATH=$MIX_HOME/bin:$HEX_HOME/bin:$PATH
# TODO: not sure how to make hex available without installing it afterwards.
mix local.hex --if-missing
export LANG=en_US.UTF-8
export LANG=C.UTF-8
# keep your shell history in iex
export ERL_AFLAGS="-kernel shell_history enabled"

View file

@ -4,12 +4,12 @@
This section uses [Mint](https://github.com/mint-lang/mint) as an example for how to build a Crystal package.
If the Crystal project has any dependencies, the first step is to get a `shards.nix` file encoding those. Get a copy of the project and go to its root directory such that its `shard.lock` file is in the current directory, then run `crystal2nix` in it
If the Crystal project has any dependencies, the first step is to get a `shards.nix` file encoding those. Get a copy of the project and go to its root directory such that its `shard.lock` file is in the current directory. Executable projects should usually commit the `shard.lock` file, but sometimes that's not the case, which means you need to generate it yourself. With an existing `shard.lock` file, `crystal2nix` can be run.
```bash
$ git clone https://github.com/mint-lang/mint
$ cd mint
$ git checkout 0.5.0
$ if [ ! -f shard.lock ]; then nix-shell -p shards --run "shards lock"; fi
$ nix-shell -p crystal2nix --run crystal2nix
```

View file

@ -50,7 +50,7 @@ expression does not protect the Prelude import with a semantic integrity
check, so the first step is to freeze the expression using `dhall freeze`,
like this:
```bash
```ShellSession
$ dhall freeze --inplace ./true.dhall
```
@ -113,7 +113,7 @@ in
… which we can then build using this command:
```bash
```ShellSession
$ nix build --file ./example.nix dhallPackages.true
```
@ -121,7 +121,7 @@ $ nix build --file ./example.nix dhallPackages.true
The above package produces the following directory tree:
```bash
```ShellSession
$ tree -a ./result
result
├── .cache
@ -135,7 +135,7 @@ result
* `source.dhall` contains the result of interpreting our Dhall package:
```bash
```ShellSession
$ cat ./result/source.dhall
True
```
@ -143,7 +143,7 @@ result
* The `.cache` subdirectory contains one binary cache product encoding the
same result as `source.dhall`:
```bash
```ShellSession
$ dhall decode < ./result/.cache/dhall/122027abdeddfe8503496adeb623466caa47da5f63abd2bc6fa19f6cfcb73ecfed70
True
```
@ -151,7 +151,7 @@ result
* `binary.dhall` contains a Dhall expression which handles fetching and decoding
the same cache product:
```bash
```ShellSession
$ cat ./result/binary.dhall
missing sha256:27abdeddfe8503496adeb623466caa47da5f63abd2bc6fa19f6cfcb73ecfed70
$ cp -r ./result/.cache .cache
@ -168,7 +168,7 @@ to conserve disk space when they are used exclusively as dependencies. For
example, if we build the Prelude package it will only contain the binary
encoding of the expression:
```bash
```ShellSession
$ nix build --file ./example.nix dhallPackages.Prelude
$ tree -a result
@ -199,7 +199,7 @@ Dhall overlay like this:
… and now the Prelude will contain the fully decoded result of interpreting
the Prelude:
```bash
```ShellSession
$ nix build --file ./example.nix dhallPackages.Prelude
$ tree -a result
@ -302,7 +302,7 @@ Additionally, `buildDhallGitHubPackage` accepts the same arguments as
You can use the `dhall-to-nixpkgs` command-line utility to automate
packaging Dhall code. For example:
```bash
```ShellSession
$ nix-env --install --attr haskellPackages.dhall-nixpkgs
$ nix-env --install --attr nix-prefetch-git # Used by dhall-to-nixpkgs
@ -329,12 +329,12 @@ The utility takes care of automatically detecting remote imports and converting
them to package dependencies. You can also use the utility on local
Dhall directories, too:
```bash
```ShellSession
$ dhall-to-nixpkgs directory ~/proj/dhall-semver
{ buildDhallDirectoryPackage, Prelude }:
buildDhallDirectoryPackage {
name = "proj";
src = /Users/gabriel/proj/dhall-semver;
src = ~/proj/dhall-semver;
file = "package.dhall";
source = false;
document = false;
@ -342,6 +342,37 @@ $ dhall-to-nixpkgs directory ~/proj/dhall-semver
}
```
### Remote imports as fixed-output derivations {#ssec-dhall-remote-imports-as-fod}
`dhall-to-nixpkgs` has the ability to fetch and build remote imports as
fixed-output derivations by using their Dhall integrity check. This is
sometimes easier than manually packaging all remote imports.
This can be used like the following:
```ShellSession
$ dhall-to-nixpkgs directory --fixed-output-derivations ~/proj/dhall-semver
{ buildDhallDirectoryPackage, buildDhallUrl }:
buildDhallDirectoryPackage {
name = "proj";
src = ~/proj/dhall-semver;
file = "package.dhall";
source = false;
document = false;
dependencies = [
(buildDhallUrl {
url = "https://prelude.dhall-lang.org/v17.0.0/package.dhall";
hash = "sha256-ENs8kZwl6QRoM9+Jeo/+JwHcOQ+giT2VjDQwUkvlpD4=";
dhallHash = "sha256:10db3c919c25e9046833df897a8ffe2701dc390fa0893d958c3430524be5a43e";
})
];
}
```
Here, `dhall-semver`'s `Prelude` dependency is fetched and built with the
`buildDhallUrl` helper function, instead of being passed in as a function
argument.
## Overriding dependency versions {#ssec-dhall-overriding-dependency-versions}
Suppose that we change our `true.dhall` example expression to depend on an older
@ -359,7 +390,7 @@ in Prelude.Bool.not False
If we try to rebuild that expression the build will fail:
```
```ShellSession
$ nix build --file ./example.nix dhallPackages.true
builder for '/nix/store/0f1hla7ff1wiaqyk1r2ky4wnhnw114fi-true.drv' failed with exit code 1; last 10 log lines:
@ -385,7 +416,7 @@ importing the URL.
However, we can override the default Prelude version by using `dhall-to-nixpkgs`
to create a Dhall package for our desired Prelude:
```bash
```ShellSession
$ dhall-to-nixpkgs github https://github.com/dhall-lang/dhall-lang.git \
--name Prelude \
--directory Prelude \
@ -396,7 +427,7 @@ $ dhall-to-nixpkgs github https://github.com/dhall-lang/dhall-lang.git \
… and then referencing that package in our Dhall overlay, by either overriding
the Prelude globally for all packages, like this:
```bash
```nix
dhallOverrides = self: super: {
true = self.callPackage ./true.nix { };
@ -407,7 +438,7 @@ the Prelude globally for all packages, like this:
… or selectively overriding the Prelude dependency for just the `true` package,
like this:
```bash
```nix
dhallOverrides = self: super: {
true = self.callPackage ./true.nix {
Prelude = self.callPackage ./Prelude.nix { };

View file

@ -71,15 +71,19 @@ The `dotnetCorePackages.sdk` contains both a runtime and the full sdk of a given
To package Dotnet applications, you can use `buildDotnetModule`. This has similar arguments to `stdenv.mkDerivation`, with the following additions:
* `projectFile` has to be used for specifying the dotnet project file relative to the source root. These usually have `.sln` or `.csproj` file extensions.
* `projectFile` has to be used for specifying the dotnet project file relative to the source root. These usually have `.sln` or `.csproj` file extensions. This can be an array of multiple projects as well.
* `nugetDeps` has to be used to specify the NuGet dependency file. Unfortunately, these cannot be deterministically fetched without a lockfile. This file should be generated using `nuget-to-nix` tool, which is available in nixpkgs.
* `executables` is used to specify which executables get wrapped to `$out/bin`, relative to `$out/lib/$pname`. If this is unset, all executables generated will get installed. If you do not want to install any, set this to `[]`.
* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. By default, this is set to the `projectFile` attribute.
* `disabledTests` is used to disable running specific unit tests. This gets passed as: `dotnet test --filter "FullyQualifiedName!={}"`, to ensure compatibility with all unit test frameworks.
* `dotnetRestoreFlags` can be used to pass flags to `dotnet restore`.
* `dotnetBuildFlags` can be used to pass flags to `dotnet build`.
* `dotnetTestFlags` can be used to pass flags to `dotnet test`.
* `dotnetInstallFlags` can be used to pass flags to `dotnet install`.
* `dotnetFlags` can be used to pass flags to all of the above phases.

View file

@ -1,5 +1,31 @@
# OCaml {#sec-language-ocaml}
## User guide {#sec-language-ocaml-user-guide}
OCaml libraries are available in attribute sets of the form `ocaml-ng.ocamlPackages_X_XX` where X is to be replaced with the desired compiler version. For example, ocamlgraph compiled with OCaml 4.12 can be found in `ocaml-ng.ocamlPackages_4_12.ocamlgraph`. The compiler itself is also located in this set, under the name `ocaml`.
If you don't care about the exact compiler version, `ocamlPackages` is a top-level alias pointing to a recent version of OCaml.
OCaml applications are usually available top-level, and not inside `ocamlPackages`. Notable exceptions are build tools that must be built with the same compiler version as the compiler you intend to use like `dune` or `ocaml-lsp`.
To open a shell able to build a typical OCaml project, put the dependencies in `buildInputs` and add `ocamlPackages.ocaml` and `ocamlPackages.findlib` to `nativeBuildInputs` at least.
For example:
```nix
let
pkgs = import <nixpkgs> {};
# choose the ocaml version you want to use
ocamlPackages = pkgs.ocaml-ng.ocamlPackages_4_12;
in
pkgs.mkShell {
# build tools
nativeBuildInputs = with ocamlPackages; [ ocaml findlib dune_2 ocaml-lsp ];
# dependencies
buildInputs = with ocamlPackages; [ ocamlgraph ];
}
```
## Packaging guide {#sec-language-ocaml-packaging}
OCaml libraries should be installed in `$(out)/lib/ocaml/${ocaml.version}/site-lib/`. Such directories are automatically added to the `$OCAMLPATH` environment variable when building another package that depends on them or when opening a `nix-shell`.
Given that most of the OCaml ecosystem is now built with dune, nixpkgs includes a convenience build support function called `buildDunePackage` that will build an OCaml package using dune, OCaml and findlib and any additional dependencies provided as `buildInputs` or `propagatedBuildInputs`.

View file

@ -765,7 +765,7 @@ and in this case the `python38` interpreter is automatically used.
### Interpreters {#interpreters}
Versions 2.7, 3.6, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
respectively `python27`, `python36`, `python37`, `python38` and `python39`. The
respectively `python27`, `python37`, `python38` and `python39`. The
aliases `python2` and `python3` correspond to respectively `python27` and
`python39`. The attribute `python` maps to `python2`. The PyPy interpreters
compatible with Python 2.7 and 3 are available as `pypy27` and `pypy3`, with
@ -830,10 +830,10 @@ attribute set is created for each available Python interpreter. The available
sets are
* `pkgs.python27Packages`
* `pkgs.python36Packages`
* `pkgs.python37Packages`
* `pkgs.python38Packages`
* `pkgs.python39Packages`
* `pkgs.python310Packages`
* `pkgs.pypyPackages`
and the aliases

View file

@ -13,7 +13,7 @@ into your `configuration.nix` or bring them into scope with `nix-shell -p rustc
For other versions such as daily builds (beta and nightly),
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
or use Mozilla's [Rust nightlies overlay](#using-the-rust-nightlies-overlay).
or use a community maintained [Rust overlay](#using-community-rust-overlays).
## Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
@ -186,6 +186,33 @@ added. To find the correct hash, you can first use `lib.fakeSha256` or
`lib.fakeHash` as a stub hash. Building the package (and thus the
vendored dependencies) will then inform you of the correct hash.
### Cargo features {#cargo-features}
You can disable default features using `buildNoDefaultFeatures`, and
extra features can be added with `buildFeatures`.
If you want to use different features for check phase, you can use
`checkNoDefaultFeatures` and `checkFeatures`. They are only passed to
`cargo test` and not `cargo build`. If left unset, they default to
`buildNoDefaultFeatures` and `buildFeatures`.
For example:
```nix
rustPlatform.buildRustPackage rec {
pname = "myproject";
version = "1.0.0";
buildNoDefaultFeatures = true;
buildFeatures = [ "color" "net" ];
# disable network features in tests
checkFeatures = [ "color" ];
# ...
}
```
### Cross compilation {#cross-compilation}
By default, Rust packages are compiled for the host platform, just like any
@ -261,7 +288,7 @@ rustPlatform.buildRustPackage {
Please note that the code will be compiled twice here: once in `release` mode
for the `buildPhase`, and again in `debug` mode for the `checkPhase`.
Test flags, e.g., `--features xxx/yyy`, can be passed to `cargo test` via the
Test flags, e.g., `--package foo`, can be passed to `cargo test` via the
`cargoTestFlags` attribute.
Another attribute, called `checkFlags`, is used to pass arguments to the test
@ -411,7 +438,7 @@ you of the correct hash.
`rustPlatform` provides the following hooks to automate Cargo builds:
* `cargoSetupHook`: configure Cargo to use depenencies vendored
* `cargoSetupHook`: configure Cargo to use dependencies vendored
through `fetchCargoTarball`. This hook uses the `cargoDeps`
environment variable to find the vendored dependencies. If a project
already vendors its dependencies, the variable `cargoVendorDir` can
@ -421,18 +448,20 @@ you of the correct hash.
* `cargoBuildHook`: use Cargo to build a crate. If the crate to be
built is a crate in e.g. a Cargo workspace, the relative path to the
crate to build can be set through the optional `buildAndTestSubdir`
environment variable. Additional Cargo build flags can be passed
through `cargoBuildFlags`.
environment variable. Features can be specified with
`cargoBuildNoDefaultFeatures` and `cargoBuildFeatures`. Additional
Cargo build flags can be passed through `cargoBuildFlags`.
* `maturinBuildHook`: use [Maturin](https://github.com/PyO3/maturin)
to build a Python wheel. Similar to `cargoBuildHook`, the optional
variable `buildAndTestSubdir` can be used to build a crate in a
Cargo workspace. Additional maturin flags can be passed through
Cargo workspace. Additional Maturin flags can be passed through
`maturinBuildFlags`.
* `cargoCheckHook`: run tests using Cargo. The build type for checks
can be set using `cargoCheckType`. Additional flags can be passed to
the tests using `checkFlags` and `checkFlagsArray`. By default,
tests are run in parallel. This can be disabled by setting
`dontUseCargoParallelTests`.
can be set using `cargoCheckType`. Features can be specified with
`cargoCheckNoDefaultFeaatures` and `cargoCheckFeatures`. Additional
flags can be passed to the tests using `checkFlags` and
`checkFlagsArray`. By default, tests are run in parallel. This can
be disabled by setting `dontUseCargoParallelTests`.
* `cargoInstallHook`: install binaries and static/shared libraries
that were built using `cargoBuildHook`.
@ -447,7 +476,7 @@ dependencies. The build itself is then performed by
The following example outlines how the `tokenizers` Python package is
built. Since the Python package is in the `source/bindings/python`
directory of the *tokenizers* project's source archive, we use
directory of the `tokenizers` project's source archive, we use
`sourceRoot` to point the tooling to this directory:
```nix
@ -672,7 +701,7 @@ Some crates require external libraries. For crates from
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a seperate file.
or build inputs by overriding the hello crate in a separate file.
```nix
with import <nixpkgs> {};
@ -729,7 +758,7 @@ with import <nixpkgs> {};
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of rustc used to compile the crate:
- The version of `rustc` used to compile the crate:
```nix
(hello {}).override { rust = pkgs.rust; };
@ -742,7 +771,7 @@ general. A number of other parameters can be overridden:
(hello {}).override { release = false; };
```
- Whether to print the commands sent to rustc when building
- Whether to print the commands sent to `rustc` when building
(equivalent to `--verbose` in cargo:
```nix
@ -871,11 +900,87 @@ rustc 1.26.0-nightly (188e693b3 2018-03-26)
To see that you are using nightly.
## Using the Rust nightlies overlay {#using-the-rust-nightlies-overlay}
## Using community Rust overlays {#using-community-rust-overlays}
Mozilla provides an overlay for nixpkgs to bring a nightly version of Rust into scope.
This overlay can _also_ be used to install recent unstable or stable versions
of Rust, if desired.
There are two community maintained approaches to Rust toolchain management:
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
- [fenix](https://github.com/nix-community/fenix)
Oxalica's overlay allows you to select a particular Rust version and components.
See [their documentation](https://github.com/oxalica/rust-overlay#rust-overlay) for more
detailed usage.
Fenix is an alternative to `rustup` and can also be used as an overlay.
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
Because of this and ergonomics, either of those community projects
should be preferred to the Mozilla's Rust overlay (`nixpkgs-mozilla`).
### How to select a specific `rustc` and toolchain version {#how-to-select-a-specific-rustc-and-toolchain-version}
You can consume the oxalica overlay and use it to grab a specific Rust toolchain version.
Here is an example `shell.nix` showing how to grab the current stable toolchain:
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
}
}:
pkgs.mkShell {
nativeBuildInputs = with pkgs; [
pkg-config
rust-bin.stable.latest.minimal
];
}
```
You can try this out by:
1. Saving that to `shell.nix`
2. Executing `nix-shell --pure --command 'rustc --version'`
As of writing, this prints out `rustc 1.56.0 (09c42c458 2021-10-18)`.
### How to use an overlay toolchain in a derivation {#how-to-use-an-overlay-toolchain-in-a-derivation}
You can also use an overlay's Rust toolchain with `buildRustPackage`.
The below snippet demonstrates invoking `buildRustPackage` with an oxalica overlay selected Rust toolchain:
```nix
with import <nixpkgs> {
overlays = [
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
];
};
rustPlatform.buildRustPackage rec {
pname = "ripgrep";
version = "12.1.1";
nativeBuildInputs = [
rust-bin.stable.latest.minimal
];
src = fetchFromGitHub {
owner = "BurntSushi";
repo = "ripgrep";
rev = version;
sha256 = "1hqps7l5qrjh9f914r5i6kmcz6f1yb951nv4lby0cjnp5l253kps";
};
cargoSha256 = "03wf9r2csi6jpa7v5sw5lpxkrk4wfzwmzx7k3991q3bdjzcwnnwp";
meta = with lib; {
description = "A fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
};
}
```
Follow the below steps to try that snippet.
1. create a new directory
1. save the above snippet as `default.nix` in that directory
1. cd into that directory and run `nix-build`
### Rust overlay installation {#rust-overlay-installation}
@ -883,27 +988,15 @@ You can use this overlay by either changing your local nixpkgs configuration,
or by adding the overlay declaratively in a nix expression, e.g. in `configuration.nix`.
For more information see [the manual on installing overlays](#sec-overlays-install).
#### Imperative rust overlay installation {#imperative-rust-overlay-installation}
Clone [nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla),
and create a symbolic link to the file
[rust-overlay.nix](https://github.com/mozilla/nixpkgs-mozilla/blob/master/rust-overlay.nix)
in the `~/.config/nixpkgs/overlays` directory.
```ShellSession
$ git clone https://github.com/mozilla/nixpkgs-mozilla.git
$ mkdir -p ~/.config/nixpkgs/overlays
$ ln -s $(pwd)/nixpkgs-mozilla/rust-overlay.nix ~/.config/nixpkgs/overlays/rust-overlay.nix
```
### Declarative rust overlay installation {#declarative-rust-overlay-installation}
### Declarative Rust overlay installation {#declarative-rust-overlay-installation}
This snippet shows how to use oxalica's Rust overlay.
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
```nix
{ pkgs ? import <nixpkgs> {
overlays = [
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
(import (builtins.fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
# Further overlays go here
];
};
@ -911,36 +1004,3 @@ Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.
```
Note that this will fetch the latest overlay version when rebuilding your system.
### Rust overlay usage {#rust-overlay-usage}
The overlay contains attribute sets corresponding to different versions of the rust toolchain, such as:
* `latest.rustChannels.stable`
* `latest.rustChannels.nightly`
* a function `rustChannelOf`, called as `(rustChannelOf { date = "2018-04-11"; channel = "nightly"; })`, or...
* `(nixpkgs.rustChannelOf { rustToolchain = ./rust-toolchain; })` if you have a local `rust-toolchain` file (see https://github.com/mozilla/nixpkgs-mozilla#using-in-nix-expressions for an example)
Each of these contain packages such as `rust`, which contains your usual rust development tools with the respective toolchain chosen.
For example, you might want to add `latest.rustChannels.stable.rust` to the list of packages in your configuration.
Imperatively, the latest stable version can be installed with the following command:
```ShellSession
$ nix-env -Ai nixpkgs.latest.rustChannels.stable.rust
```
Or using the attribute with nix-shell:
```ShellSession
$ nix-shell -p nixpkgs.latest.rustChannels.stable.rust
```
Substitute the `nixpkgs` prefix with `nixos` on NixOS.
To install the beta or nightly channel, "stable" should be substituted by
"nightly" or "beta", or
use the function provided by this overlay to pull a version based on a
build date.
The overlay automatically updates itself as it uses the same source as
[rustup](https://www.rustup.rs/).

View file

@ -112,7 +112,7 @@ self: super:
This overlay uses Intel's MKL library for both BLAS and LAPACK interfaces. Note that the same can be accomplished at runtime using `LD_LIBRARY_PATH` of `libblas.so.3` and `liblapack.so.3`. For instance:
```ShellSession
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib:$LD_LIBRARY_PATH nix-shell -p octave --run octave
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib${LD_LIBRARY_PATH:+:}$LD_LIBRARY_PATH nix-shell -p octave --run octave
```
Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.

View file

@ -119,7 +119,7 @@ let
mkFixStrictness mkOrder mkBefore mkAfter mkAliasDefinitions
mkAliasAndWrapDefinitions fixMergeModules mkRemovedOptionModule
mkRenamedOptionModule mkMergedOptionModule mkChangedOptionModule
mkAliasOptionModule doRename;
mkAliasOptionModule mkDerivedConfig doRename;
inherit (self.options) isOption mkEnableOption mkSinkUndeclaredOptions
mergeDefaultOption mergeOneOption mergeEqualOption getValues
getFiles optionAttrSetToDocList optionAttrSetToDocList'

View file

@ -642,7 +642,7 @@ rec {
unique [ 3 2 3 4 ]
=> [ 3 2 4 ]
*/
unique = foldl' (acc: e: if elem e acc then acc else acc ++ [ e ]) [];
unique = foldl' (acc: e: if elem e acc then acc else acc ++ [ e ]) [];
/* Intersects list 'e' and another list. O(nm) complexity.
@ -663,9 +663,6 @@ rec {
/* Test if two lists have no common element.
It should be slightly more efficient than (intersectLists a b == [])
*/
mutuallyExclusive = a: b:
(builtins.length a) == 0 ||
(!(builtins.elem (builtins.head a) b) &&
mutuallyExclusive (builtins.tail a) b);
mutuallyExclusive = a: b: length a == 0 || !(any (x: elem x a) b);
}

View file

@ -52,15 +52,43 @@ in
rec {
/* Evaluate a set of modules. The result is a set of two
attributes: options: the nested set of all option declarations,
and config: the nested set of all option values.
/*
Evaluate a set of modules. The result is a set with the attributes:
options: The nested set of all option declarations,
config: The nested set of all option values.
type: A module system type representing the module set as a submodule,
to be extended by configuration from the containing module set.
This is also available as the module argument moduleType.
extendModules: A function similar to evalModules but building on top
of the module set. Its arguments, modules and specialArgs are
added to the existing values.
Using extendModules a few times has no performance impact as long
as you only reference the final options and config.
If you do reference multiple config (or options) from before and
after extendModules, performance is the same as with multiple
evalModules invocations, because the new modules' ability to
override existing configuration fundamentally requires a new
fixpoint to be constructed.
This is also available as a module argument.
_module: A portion of the configuration tree which is elided from
config. It contains some values that are mostly internal to the
module system implementation.
!!! Please think twice before adding to this argument list! The more
that is specified here instead of in the modules themselves the harder
it is to transparently move a set of modules to be a submodule of another
config (as the proper arguments need to be replicated at each call to
evalModules) and the less declarative the module set is. */
evalModules = { modules
evalModules = evalModulesArgs@
{ modules
, prefix ? []
, # This should only be used for special arguments that need to be evaluated
# when resolving module structure (like in imports). For everything else,
@ -120,7 +148,10 @@ rec {
};
config = {
_module.args = args;
_module.args = {
inherit extendModules;
moduleType = type;
} // args;
};
};
@ -183,10 +214,28 @@ rec {
else throw baseMsg
else null;
result = builtins.seq checkUnmatched {
inherit options;
config = removeAttrs config [ "_module" ];
inherit (config) _module;
checked = builtins.seq checkUnmatched;
extendModules = extendArgs@{
modules ? [],
specialArgs ? {},
prefix ? [],
}:
evalModules (evalModulesArgs // {
modules = evalModulesArgs.modules ++ modules;
specialArgs = evalModulesArgs.specialArgs or {} // specialArgs;
prefix = extendArgs.prefix or evalModulesArgs.prefix;
});
type = lib.types.submoduleWith {
inherit modules specialArgs;
};
result = {
options = checked options;
config = checked (removeAttrs config [ "_module" ]);
_module = checked (config._module);
inherit extendModules type;
};
in result;
@ -912,6 +961,26 @@ rec {
use = id;
};
/* mkDerivedConfig : Option a -> (a -> Definition b) -> Definition b
Create config definitions with the same priority as the definition of another option.
This should be used for option definitions where one option sets the value of another as a convenience.
For instance a config file could be set with a `text` or `source` option, where text translates to a `source`
value using `mkDerivedConfig options.text (pkgs.writeText "filename.conf")`.
It takes care of setting the right priority using `mkOverride`.
*/
# TODO: make the module system error message include information about `opt` in
# error messages about conflicts. E.g. introduce a variation of `mkOverride` which
# adds extra location context to the definition object. This will allow context to be added
# to all messages that report option locations "this value was derived from <full option name>
# which was defined in <locations>". It can provide a trace of options that contributed
# to definitions.
mkDerivedConfig = opt: f:
mkOverride
(opt.highestPrio or defaultPriority)
(f opt.value);
doRename = { from, to, visible, warn, use, withPriority ? true }:
{ config, options, ... }:
let

View file

@ -74,7 +74,7 @@ rec {
apply ? null,
# Whether the option is for NixOS developers only.
internal ? null,
# Whether the option shows up in the manual.
# Whether the option shows up in the manual. Default: true. Use false to hide the option and any sub-options from submodules. Use "shallow" to hide only sub-options.
visible ? null,
# Whether the option can be set only once
readOnly ? null,
@ -180,7 +180,10 @@ rec {
description = opt.description or (lib.warn "Option `${name}' has no description." "This option has no description.");
declarations = filter (x: x != unknownModule) opt.declarations;
internal = opt.internal or false;
visible = opt.visible or true;
visible =
if (opt?visible && opt.visible == "shallow")
then true
else opt.visible or true;
readOnly = opt.readOnly or false;
type = opt.type.description or null;
}
@ -192,8 +195,9 @@ rec {
subOptions =
let ss = opt.type.getSubOptions opt.loc;
in if ss != {} then optionAttrSetToDocList' opt.loc ss else [];
subOptionsVisible = docOption.visible && opt.visible or null != "shallow";
in
[ docOption ] ++ optionals docOption.visible subOptions) (collect isOption options);
[ docOption ] ++ optionals subOptionsVisible subOptions) (collect isOption options);
/* This function recursively removes all derivation attributes from

View file

@ -369,7 +369,7 @@ rec {
Example:
escapeXML ''"test" 'test' < & >''
=> "\\[\\^a-z]\\*"
=> "&quot;test&quot; &apos;test&apos; &lt; &amp; &gt;"
*/
escapeXML = builtins.replaceStrings
["\"" "'" "<" ">" "&"]

View file

@ -258,6 +258,12 @@ rec {
platform = {};
};
x86_64-darwin = {
config = "x86_64-apple-darwin";
xcodePlatform = "MacOSX";
platform = {};
};
#
# Windows
#

View file

@ -20,15 +20,17 @@ rec {
name = "PowerNV";
baseConfig = "powernv_defconfig";
target = "zImage";
installTarget = "install";
file = "vmlinux";
target = "vmlinux";
autoModules = true;
# avoid driver/FS trouble arising from unusual page size
extraConfig = ''
PPC_64K_PAGES n
PPC_4K_PAGES y
IPV6 y
ATA_BMDMA y
ATA_SFF y
VIRTIO_MENU y
'';
};
};

View file

@ -1,8 +1,11 @@
#!/bin/sh
#!/usr/bin/env bash
#
# This script is used to test that the module system is working as expected.
# By default it test the version of nixpkgs which is defined in the NIX_PATH.
set -o errexit -o noclobber -o nounset -o pipefail
shopt -s failglob inherit_errexit
# https://stackoverflow.com/a/246128/6605742
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
@ -13,101 +16,96 @@ fail=0
evalConfig() {
local attr=$1
shift;
local script="import ./default.nix { modules = [ $@ ];}"
shift
local script="import ./default.nix { modules = [ $* ];}"
nix-instantiate --timeout 1 -E "$script" -A "$attr" --eval-only --show-trace --read-write-mode
}
reportFailure() {
local attr=$1
shift;
local script="import ./default.nix { modules = [ $@ ];}"
shift
local script="import ./default.nix { modules = [ $* ];}"
echo 2>&1 "$ nix-instantiate -E '$script' -A '$attr' --eval-only"
evalConfig "$attr" "$@"
fail=$((fail + 1))
evalConfig "$attr" "$@" || true
((++fail))
}
checkConfigOutput() {
local outputContains=$1
shift;
shift
if evalConfig "$@" 2>/dev/null | grep --silent "$outputContains" ; then
pass=$((pass + 1))
return 0;
((++pass))
else
echo 2>&1 "error: Expected result matching '$outputContains', while evaluating"
reportFailure "$@"
return 1
fi
}
checkConfigError() {
local errorContains=$1
local err=""
shift;
if err==$(evalConfig "$@" 2>&1 >/dev/null); then
shift
if err="$(evalConfig "$@" 2>&1 >/dev/null)"; then
echo 2>&1 "error: Expected error code, got exit code 0, while evaluating"
reportFailure "$@"
return 1
else
if echo "$err" | grep -zP --silent "$errorContains" ; then
pass=$((pass + 1))
return 0;
((++pass))
else
echo 2>&1 "error: Expected error matching '$errorContains', while evaluating"
reportFailure "$@"
return 1
fi
fi
}
# Check boolean option.
checkConfigOutput "false" config.enable ./declare-enable.nix
checkConfigError 'The option .* does not exist. Definition values:\n- In .*: true' config.enable ./define-enable.nix
checkConfigOutput '^false$' config.enable ./declare-enable.nix
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./define-enable.nix
# Check integer types.
# unsigned
checkConfigOutput "42" config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*unsigned integer.*. Definition values:\n- In .*: -23' config.value ./declare-int-unsigned-value.nix ./define-value-int-negative.nix
checkConfigOutput '^42$' config.value ./declare-int-unsigned-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*unsigned integer.*. Definition values:\n\s*- In .*: -23' config.value ./declare-int-unsigned-value.nix ./define-value-int-negative.nix
# positive
checkConfigError 'A definition for option .* is not of type.*positive integer.*. Definition values:\n- In .*: 0' config.value ./declare-int-positive-value.nix ./define-value-int-zero.nix
checkConfigError 'A definition for option .* is not of type.*positive integer.*. Definition values:\n\s*- In .*: 0' config.value ./declare-int-positive-value.nix ./define-value-int-zero.nix
# between
checkConfigOutput "42" config.value ./declare-int-between-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*between.*-21 and 43.*inclusive.*. Definition values:\n- In .*: -23' config.value ./declare-int-between-value.nix ./define-value-int-negative.nix
checkConfigOutput '^42$' config.value ./declare-int-between-value.nix ./define-value-int-positive.nix
checkConfigError 'A definition for option .* is not of type.*between.*-21 and 43.*inclusive.*. Definition values:\n\s*- In .*: -23' config.value ./declare-int-between-value.nix ./define-value-int-negative.nix
# Check either types
# types.either
checkConfigOutput "42" config.value ./declare-either.nix ./define-value-int-positive.nix
checkConfigOutput "\"24\"" config.value ./declare-either.nix ./define-value-string.nix
checkConfigOutput '^42$' config.value ./declare-either.nix ./define-value-int-positive.nix
checkConfigOutput '^"24"$' config.value ./declare-either.nix ./define-value-string.nix
# types.oneOf
checkConfigOutput "42" config.value ./declare-oneOf.nix ./define-value-int-positive.nix
checkConfigOutput "[ ]" config.value ./declare-oneOf.nix ./define-value-list.nix
checkConfigOutput "\"24\"" config.value ./declare-oneOf.nix ./define-value-string.nix
checkConfigOutput '^42$' config.value ./declare-oneOf.nix ./define-value-int-positive.nix
checkConfigOutput '^\[ \]$' config.value ./declare-oneOf.nix ./define-value-list.nix
checkConfigOutput '^"24"$' config.value ./declare-oneOf.nix ./define-value-string.nix
# Check mkForce without submodules.
set -- config.enable ./declare-enable.nix ./define-enable.nix
checkConfigOutput "true" "$@"
checkConfigOutput "false" "$@" ./define-force-enable.nix
checkConfigOutput "false" "$@" ./define-enable-force.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./define-force-enable.nix
checkConfigOutput '^false$' "$@" ./define-enable-force.nix
# Check mkForce with option and submodules.
checkConfigError 'attribute .*foo.* .* not found' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix
checkConfigOutput 'false' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
checkConfigOutput '^false$' config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
set -- config.attrsOfSub.foo.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo-enable.nix
checkConfigOutput 'true' "$@"
checkConfigOutput 'false' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-enable-force.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-enable-force.nix
# Check overriding effect of mkForce on submodule definitions.
checkConfigError 'attribute .*bar.* .* not found' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix
checkConfigOutput 'false' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar.nix
checkConfigOutput '^false$' config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar.nix
set -- config.attrsOfSub.bar.enable ./declare-attrsOfSub-any-enable.nix ./define-attrsOfSub-foo.nix ./define-attrsOfSub-bar-enable.nix
checkConfigOutput 'true' "$@"
checkConfigOutput '^true$' "$@"
checkConfigError 'attribute .*bar.* .* not found' "$@" ./define-force-attrsOfSub-foo-enable.nix
checkConfigError 'attribute .*bar.* .* not found' "$@" ./define-attrsOfSub-force-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput 'true' "$@" ./define-attrsOfSub-foo-enable-force.nix
checkConfigOutput '^true$' "$@" ./define-attrsOfSub-foo-force-enable.nix
checkConfigOutput '^true$' "$@" ./define-attrsOfSub-foo-enable-force.nix
# Check mkIf with submodules.
checkConfigError 'attribute .*foo.* .* not found' config.attrsOfSub.foo.enable ./declare-enable.nix ./declare-attrsOfSub-any-enable.nix
@ -115,24 +113,24 @@ set -- config.attrsOfSub.foo.enable ./declare-enable.nix ./declare-attrsOfSub-an
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-if-attrsOfSub-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-attrsOfSub-if-foo-enable.nix
checkConfigError 'attribute .*foo.* .* not found' "$@" ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput 'false' "$@" ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-if-attrsOfSub-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-if-foo-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput 'true' "$@" ./define-enable.nix ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput '^false$' "$@" ./define-attrsOfSub-foo-enable-if.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-if-attrsOfSub-foo-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-if-foo-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-foo-if-enable.nix
checkConfigOutput '^true$' "$@" ./define-enable.nix ./define-attrsOfSub-foo-enable-if.nix
# Check disabledModules with config definitions and option declarations.
set -- config.enable ./define-enable.nix ./declare-enable.nix
checkConfigOutput "true" "$@"
checkConfigOutput "false" "$@" ./disable-define-enable.nix
checkConfigError "The option .*enable.* does not exist. Definition values:\n- In .*: true" "$@" ./disable-declare-enable.nix
checkConfigOutput '^true$' "$@"
checkConfigOutput '^false$' "$@" ./disable-define-enable.nix
checkConfigError "The option .*enable.* does not exist. Definition values:\n\s*- In .*: true" "$@" ./disable-declare-enable.nix
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-define-enable.nix ./disable-declare-enable.nix
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-enable-modules.nix
# Check _module.args.
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
checkConfigOutput "true" "$@" ./define-_module-args-custom.nix
checkConfigOutput '^true$' "$@" ./define-_module-args-custom.nix
# Check that using _module.args on imports cause infinite recursions, with
# the proper error context.
@ -142,71 +140,78 @@ checkConfigError 'infinite recursion encountered' "$@"
# Check _module.check.
set -- config.enable ./declare-enable.nix ./define-enable.nix ./define-attrsOfSub-foo.nix
checkConfigError 'The option .* does not exist. Definition values:\n- In .*' "$@"
checkConfigOutput "true" "$@" ./define-module-check.nix
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*' "$@"
checkConfigOutput '^true$' "$@" ./define-module-check.nix
# Check coerced value.
checkConfigOutput "\"42\"" config.value ./declare-coerced-value.nix
checkConfigOutput "\"24\"" config.value ./declare-coerced-value.nix ./define-value-string.nix
checkConfigError 'A definition for option .* is not.*string or signed integer convertible to it.*. Definition values:\n- In .*: \[ \]' config.value ./declare-coerced-value.nix ./define-value-list.nix
checkConfigOutput '^"42"$' config.value ./declare-coerced-value.nix
checkConfigOutput '^"24"$' config.value ./declare-coerced-value.nix ./define-value-string.nix
checkConfigError 'A definition for option .* is not.*string or signed integer convertible to it.*. Definition values:\n\s*- In .*: \[ \]' config.value ./declare-coerced-value.nix ./define-value-list.nix
# Check coerced value with unsound coercion
checkConfigOutput "12" config.value ./declare-coerced-value-unsound.nix
checkConfigError 'A definition for option .* is not of type .*. Definition values:\n- In .*: "1000"' config.value ./declare-coerced-value-unsound.nix ./define-value-string-bigint.nix
checkConfigError 'unrecognised JSON value' config.value ./declare-coerced-value-unsound.nix ./define-value-string-arbitrary.nix
checkConfigOutput '^12$' config.value ./declare-coerced-value-unsound.nix
checkConfigError 'A definition for option .* is not of type .*. Definition values:\n\s*- In .*: "1000"' config.value ./declare-coerced-value-unsound.nix ./define-value-string-bigint.nix
checkConfigError 'json.exception.parse_error' config.value ./declare-coerced-value-unsound.nix ./define-value-string-arbitrary.nix
# Check mkAliasOptionModule.
checkConfigOutput "true" config.enable ./alias-with-priority.nix
checkConfigOutput "true" config.enableAlias ./alias-with-priority.nix
checkConfigOutput "false" config.enable ./alias-with-priority-can-override.nix
checkConfigOutput "false" config.enableAlias ./alias-with-priority-can-override.nix
checkConfigOutput '^true$' config.enable ./alias-with-priority.nix
checkConfigOutput '^true$' config.enableAlias ./alias-with-priority.nix
checkConfigOutput '^false$' config.enable ./alias-with-priority-can-override.nix
checkConfigOutput '^false$' config.enableAlias ./alias-with-priority-can-override.nix
# submoduleWith
## specialArgs should work
checkConfigOutput "foo" config.submodule.foo ./declare-submoduleWith-special.nix
checkConfigOutput '^"foo"$' config.submodule.foo ./declare-submoduleWith-special.nix
## shorthandOnlyDefines config behaves as expected
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError 'is not of type `boolean' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigError "You're trying to declare a value of type \`bool'\nrather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput "true" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigError "You're trying to declare a value of type \`bool'\n\s*rather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
## submoduleWith should merge all modules in one swoop
checkConfigOutput "true" config.submodule.inner ./declare-submoduleWith-modules.nix
checkConfigOutput "true" config.submodule.outer ./declare-submoduleWith-modules.nix
checkConfigOutput '^true$' config.submodule.inner ./declare-submoduleWith-modules.nix
checkConfigOutput '^true$' config.submodule.outer ./declare-submoduleWith-modules.nix
# Should also be able to evaluate the type name (which evaluates freeformType,
# which evaluates all the modules defined by the type)
checkConfigOutput "submodule" options.submodule.type.description ./declare-submoduleWith-modules.nix
checkConfigOutput '^"submodule"$' options.submodule.type.description ./declare-submoduleWith-modules.nix
## submodules can be declared using (evalModules {...}).type
checkConfigOutput '^true$' config.submodule.inner ./declare-submodule-via-evalModules.nix
checkConfigOutput '^true$' config.submodule.outer ./declare-submodule-via-evalModules.nix
# Should also be able to evaluate the type name (which evaluates freeformType,
# which evaluates all the modules defined by the type)
checkConfigOutput '^"submodule"$' options.submodule.type.description ./declare-submodule-via-evalModules.nix
## Paths should be allowed as values and work as expected
checkConfigOutput "true" config.submodule.enable ./declare-submoduleWith-path.nix
checkConfigOutput '^true$' config.submodule.enable ./declare-submoduleWith-path.nix
# Check that disabledModules works recursively and correctly
checkConfigOutput "true" config.enable ./disable-recursive/main.nix
checkConfigOutput "true" config.enable ./disable-recursive/{main.nix,disable-foo.nix}
checkConfigOutput "true" config.enable ./disable-recursive/{main.nix,disable-bar.nix}
checkConfigError 'The option .* does not exist. Definition values:\n- In .*: true' config.enable ./disable-recursive/{main.nix,disable-foo.nix,disable-bar.nix}
checkConfigOutput '^true$' config.enable ./disable-recursive/main.nix
checkConfigOutput '^true$' config.enable ./disable-recursive/{main.nix,disable-foo.nix}
checkConfigOutput '^true$' config.enable ./disable-recursive/{main.nix,disable-bar.nix}
checkConfigError 'The option .* does not exist. Definition values:\n\s*- In .*: true' config.enable ./disable-recursive/{main.nix,disable-foo.nix,disable-bar.nix}
# Check that imports can depend on derivations
checkConfigOutput "true" config.enable ./import-from-store.nix
checkConfigOutput '^true$' config.enable ./import-from-store.nix
# Check that configs can be conditional on option existence
checkConfigOutput true config.enable ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput 360 config.value ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput 7 config.value ./define-option-dependently.nix ./declare-int-positive-value.nix
checkConfigOutput true config.set.enable ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput 360 config.set.value ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput 7 config.set.value ./define-option-dependently-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput '^true$' config.enable ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput '^360$' config.value ./define-option-dependently.nix ./declare-enable.nix ./declare-int-positive-value.nix
checkConfigOutput '^7$' config.value ./define-option-dependently.nix ./declare-int-positive-value.nix
checkConfigOutput '^true$' config.set.enable ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput '^360$' config.set.value ./define-option-dependently-nested.nix ./declare-enable-nested.nix ./declare-int-positive-value-nested.nix
checkConfigOutput '^7$' config.set.value ./define-option-dependently-nested.nix ./declare-int-positive-value-nested.nix
# Check attrsOf and lazyAttrsOf. Only lazyAttrsOf should be lazy, and only
# attrsOf should work with conditional definitions
# In addition, lazyAttrsOf should honor an options emptyValue
checkConfigError "is not lazy" config.isLazy ./declare-attrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput "true" config.isLazy ./declare-lazyAttrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput "true" config.conditionalWorks ./declare-attrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput "false" config.conditionalWorks ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput "empty" config.value.foo ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput '^true$' config.isLazy ./declare-lazyAttrsOf.nix ./attrsOf-lazy-check.nix
checkConfigOutput '^true$' config.conditionalWorks ./declare-attrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput '^false$' config.conditionalWorks ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
checkConfigOutput '^"empty"$' config.value.foo ./declare-lazyAttrsOf.nix ./attrsOf-conditional-check.nix
# Even with multiple assignments, a type error should be thrown if any of them aren't valid
@ -215,64 +220,69 @@ checkConfigError 'A definition for option .* is not of type .*' \
## Freeform modules
# Assigning without a declared option should work
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix
checkConfigOutput '^"24"$' config.value ./freeform-attrsOf.nix ./define-value-string.nix
# No freeform assigments shouldn't make it error
checkConfigOutput '{ }' config ./freeform-attrsOf.nix
checkConfigOutput '^{ }$' config ./freeform-attrsOf.nix
# but only if the type matches
checkConfigError 'A definition for option .* is not of type .*' config.value ./freeform-attrsOf.nix ./define-value-list.nix
# and properties should be applied
checkConfigOutput yes config.value ./freeform-attrsOf.nix ./define-value-string-properties.nix
checkConfigOutput '^"yes"$' config.value ./freeform-attrsOf.nix ./define-value-string-properties.nix
# Options should still be declarable, and be able to have a type that doesn't match the freeform type
checkConfigOutput false config.enable ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput '^false$' config.enable ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput '^"24"$' config.value ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
# and this should work too with nested values
checkConfigOutput false config.nest.foo ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput bar config.nest.bar ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput '^false$' config.nest.foo ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput '^"bar"$' config.nest.bar ./freeform-attrsOf.nix ./freeform-nested.nix
# Check whether a declared option can depend on an freeform-typed one
checkConfigOutput null config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix
checkConfigOutput 24 config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix ./define-value-string.nix
checkConfigOutput '^null$' config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix
checkConfigOutput '^"24"$' config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix ./define-value-string.nix
# Check whether an freeform-typed value can depend on a declared option, this can only work with lazyAttrsOf
checkConfigError 'infinite recursion encountered' config.foo ./freeform-attrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigError 'The option .* is used but not defined' config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigOutput 24 config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix ./define-value-string.nix
checkConfigOutput '^"24"$' config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix ./define-value-string.nix
## types.anything
# Check that attribute sets are merged recursively
checkConfigOutput null config.value.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.l2.foo ./types-anything/nested-attrs.nix
checkConfigOutput null config.value.l1.l2.l3.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.l2.foo ./types-anything/nested-attrs.nix
checkConfigOutput '^null$' config.value.l1.l2.l3.foo ./types-anything/nested-attrs.nix
# Attribute sets that are coercible to strings shouldn't be recursed into
checkConfigOutput foo config.value.outPath ./types-anything/attrs-coercible.nix
checkConfigOutput '^"foo"$' config.value.outPath ./types-anything/attrs-coercible.nix
# Multiple lists aren't concatenated together
checkConfigError 'The option .* has conflicting definitions' config.value ./types-anything/lists.nix
# Check that all equalizable atoms can be used as long as all definitions are equal
checkConfigOutput 0 config.value.int ./types-anything/equal-atoms.nix
checkConfigOutput false config.value.bool ./types-anything/equal-atoms.nix
checkConfigOutput '""' config.value.string ./types-anything/equal-atoms.nix
checkConfigOutput / config.value.path ./types-anything/equal-atoms.nix
checkConfigOutput null config.value.null ./types-anything/equal-atoms.nix
checkConfigOutput 0.1 config.value.float ./types-anything/equal-atoms.nix
checkConfigOutput '^0$' config.value.int ./types-anything/equal-atoms.nix
checkConfigOutput '^false$' config.value.bool ./types-anything/equal-atoms.nix
checkConfigOutput '^""$' config.value.string ./types-anything/equal-atoms.nix
checkConfigOutput '^/$' config.value.path ./types-anything/equal-atoms.nix
checkConfigOutput '^null$' config.value.null ./types-anything/equal-atoms.nix
checkConfigOutput '^0.1$' config.value.float ./types-anything/equal-atoms.nix
# Functions can't be merged together
checkConfigError "The option .value.multiple-lambdas.<function body>. has conflicting option types" config.applied.multiple-lambdas ./types-anything/functions.nix
checkConfigOutput '<LAMBDA>' config.value.single-lambda ./types-anything/functions.nix
checkConfigOutput 'null' config.applied.merging-lambdas.x ./types-anything/functions.nix
checkConfigOutput 'null' config.applied.merging-lambdas.y ./types-anything/functions.nix
checkConfigOutput '^<LAMBDA>$' config.value.single-lambda ./types-anything/functions.nix
checkConfigOutput '^null$' config.applied.merging-lambdas.x ./types-anything/functions.nix
checkConfigOutput '^null$' config.applied.merging-lambdas.y ./types-anything/functions.nix
# Check that all mk* modifiers are applied
checkConfigError 'attribute .* not found' config.value.mkiffalse ./types-anything/mk-mods.nix
checkConfigOutput '{ }' config.value.mkiftrue ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.mkdefault ./types-anything/mk-mods.nix
checkConfigOutput '{ }' config.value.mkmerge ./types-anything/mk-mods.nix
checkConfigOutput true config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput baz config.value.nested.bar.baz ./types-anything/mk-mods.nix
checkConfigOutput '^{ }$' config.value.mkiftrue ./types-anything/mk-mods.nix
checkConfigOutput '^1$' config.value.mkdefault ./types-anything/mk-mods.nix
checkConfigOutput '^{ }$' config.value.mkmerge ./types-anything/mk-mods.nix
checkConfigOutput '^true$' config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput '^1$' config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput '^"baz"$' config.value.nested.bar.baz ./types-anything/mk-mods.nix
## types.functionTo
checkConfigOutput "input is input" config.result ./functionTo/trivial.nix
checkConfigOutput "a b" config.result ./functionTo/merging-list.nix
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
checkConfigOutput "b a" config.result ./functionTo/list-order.nix
checkConfigOutput "a c" config.result ./functionTo/merging-attrs.nix
checkConfigOutput '^"input is input"$' config.result ./functionTo/trivial.nix
checkConfigOutput '^"a b"$' config.result ./functionTo/merging-list.nix
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n\s*- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
checkConfigOutput '^"b a"$' config.result ./functionTo/list-order.nix
checkConfigOutput '^"a c"$' config.result ./functionTo/merging-attrs.nix
# moduleType
checkConfigOutput '^"a b"$' config.resultFoo ./declare-variants.nix ./define-variant.nix
checkConfigOutput '^"a y z"$' config.resultFooBar ./declare-variants.nix ./define-variant.nix
checkConfigOutput '^"a b c"$' config.resultFooFoo ./declare-variants.nix ./define-variant.nix
cat <<EOF
====== module tests ======
@ -280,7 +290,7 @@ $pass Pass
$fail Fail
EOF
if test $fail -ne 0; then
if [ "$fail" -ne 0 ]; then
exit 1
fi
exit 0

View file

@ -0,0 +1,28 @@
{ lib, ... }: {
options.submodule = lib.mkOption {
inherit (lib.evalModules {
modules = [
{
options.inner = lib.mkOption {
type = lib.types.bool;
default = false;
};
}
];
}) type;
default = {};
};
config.submodule = lib.mkMerge [
({ lib, ... }: {
options.outer = lib.mkOption {
type = lib.types.bool;
default = false;
};
})
{
inner = true;
outer = true;
}
];
}

View file

@ -0,0 +1,9 @@
{ lib, moduleType, ... }:
let inherit (lib) mkOption types;
in
{
options.variants = mkOption {
type = types.lazyAttrsOf moduleType;
default = {};
};
}

View file

@ -0,0 +1,22 @@
{ config, lib, ... }:
let inherit (lib) types mkOption attrNames;
in
{
options = {
attrs = mkOption { type = types.attrsOf lib.types.int; };
result = mkOption { };
resultFoo = mkOption { };
resultFooBar = mkOption { };
resultFooFoo = mkOption { };
};
config = {
attrs.a = 1;
variants.foo.attrs.b = 1;
variants.bar.attrs.y = 1;
variants.foo.variants.bar.attrs.z = 1;
variants.foo.variants.foo.attrs.c = 3;
resultFoo = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.attrs);
resultFooBar = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.variants.bar.attrs);
resultFooFoo = lib.concatMapStringsSep " " toString (attrNames config.variants.foo.variants.foo.attrs);
};
}

View file

@ -23,6 +23,10 @@ pkgs.runCommand "nixpkgs-lib-tests" {
export NIX_STORE_DIR=$TEST_ROOT/store
export PAGER=cat
cacheDir=$TEST_ROOT/binary-cache
mkdir -p $NIX_CONF_DIR
echo "experimental-features = nix-command" >> $NIX_CONF_DIR/nix.conf
nix-store --init
cp -r ${../.} lib

View file

@ -1,5 +1,6 @@
#!/usr/bin/env bash
set -euo pipefail
shopt -s inherit_errexit
# Use
# || die
@ -9,27 +10,28 @@ die() {
}
if test -n "${TEST_LIB:-}"; then
export NIX_PATH=nixpkgs="$(dirname "$TEST_LIB")"
NIX_PATH=nixpkgs="$(dirname "$TEST_LIB")"
else
export NIX_PATH=nixpkgs="$(cd $(dirname ${BASH_SOURCE[0]})/../..; pwd)"
NIX_PATH=nixpkgs="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.."; pwd)"
fi
export NIX_PATH
work="$(mktemp -d)"
clean_up() {
rm -rf "$work"
}
trap clean_up EXIT
cd $work
cd "$work"
touch {README.md,module.o,foo.bar}
# nix-instantiate doesn't write out the source, only computing the hash, so
# this uses the experimental nix command instead.
dir="$(nix eval --raw '(with import <nixpkgs/lib>; "${
dir="$(nix eval --impure --raw --expr '(with import <nixpkgs/lib>; "${
cleanSource ./.
}")')"
(cd $dir; find) | sort -f | diff -U10 - <(cat <<EOF
(cd "$dir"; find) | sort -f | diff -U10 - <(cat <<EOF
.
./foo.bar
./README.md
@ -37,20 +39,20 @@ EOF
) || die "cleanSource 1"
dir="$(nix eval --raw '(with import <nixpkgs/lib>; "${
dir="$(nix eval --impure --raw --expr '(with import <nixpkgs/lib>; "${
cleanSourceWith { src = '"$work"'; filter = path: type: ! hasSuffix ".bar" path; }
}")')"
(cd $dir; find) | sort -f | diff -U10 - <(cat <<EOF
(cd "$dir"; find) | sort -f | diff -U10 - <(cat <<EOF
.
./module.o
./README.md
EOF
) || die "cleanSourceWith 1"
dir="$(nix eval --raw '(with import <nixpkgs/lib>; "${
dir="$(nix eval --impure --raw --expr '(with import <nixpkgs/lib>; "${
cleanSourceWith { src = cleanSource '"$work"'; filter = path: type: ! hasSuffix ".bar" path; }
}")')"
(cd $dir; find) | sort -f | diff -U10 - <(cat <<EOF
(cd "$dir"; find) | sort -f | diff -U10 - <(cat <<EOF
.
./README.md
EOF

View file

@ -171,7 +171,7 @@ rec {
On each release the first letter is bumped and a new animal is chosen
starting with that new letter.
*/
codeName = "Porcupine";
codeName = "Quokka";
/* Returns the current nixpkgs version suffix as string. */
versionSuffix =

View file

@ -505,17 +505,36 @@ rec {
then setFunctionArgs (args: unify (value args)) (functionArgs value)
else unify (if shorthandOnlyDefinesConfig then { config = value; } else value);
allModules = defs: modules ++ imap1 (n: { value, file }:
allModules = defs: imap1 (n: { value, file }:
if isAttrs value || isFunction value then
# Annotate the value with the location of its definition for better error messages
coerce (lib.modules.unifyModuleSyntax file "${toString file}-${toString n}") value
else value
) defs;
freeformType = (evalModules {
inherit modules specialArgs;
args.name = "name";
})._module.freeformType;
base = evalModules {
inherit specialArgs;
modules = [{
# This is a work-around for the fact that some sub-modules,
# such as the one included in an attribute set, expects an "args"
# attribute to be given to the sub-module. As the option
# evaluation does not have any specific attribute name yet, we
# provide a default for the documentation and the freeform type.
#
# This is necessary as some option declaration might use the
# "name" attribute given as argument of the submodule and use it
# as the default of option declarations.
#
# We use lookalike unicode single angle quotation marks because
# of the docbook transformation the options receive. In all uses
# &gt; and &lt; wouldn't be encoded correctly so the encoded values
# would be used, and use of `<` and `>` would break the XML document.
# It shouldn't cause an issue since this is cosmetic for the manual.
_module.args.name = lib.mkOptionDefault "name";
}] ++ modules;
};
freeformType = base._module.freeformType;
in
mkOptionType rec {
@ -523,32 +542,13 @@ rec {
description = freeformType.description or name;
check = x: isAttrs x || isFunction x || path.check x;
merge = loc: defs:
(evalModules {
modules = allModules defs;
inherit specialArgs;
args.name = last loc;
(base.extendModules {
modules = [ { _module.args.name = last loc; } ] ++ allModules defs;
prefix = loc;
}).config;
emptyValue = { value = {}; };
getSubOptions = prefix: (evalModules
{ inherit modules prefix specialArgs;
# This is a work-around due to the fact that some sub-modules,
# such as the one included in an attribute set, expects a "args"
# attribute to be given to the sub-module. As the option
# evaluation does not have any specific attribute name, we
# provide a default one for the documentation.
#
# This is mandatory as some option declaration might use the
# "name" attribute given as argument of the submodule and use it
# as the default of option declarations.
#
# Using lookalike unicode single angle quotation marks because
# of the docbook transformation the options receive. In all uses
# &gt; and &lt; wouldn't be encoded correctly so the encoded values
# would be used, and use of `<` and `>` would break the XML document.
# It shouldn't cause an issue since this is cosmetic for the manual.
args.name = "name";
}).options // optionalAttrs (freeformType != null) {
getSubOptions = prefix: (base.extendModules
{ inherit prefix; }).options // optionalAttrs (freeformType != null) {
# Expose the sub options of the freeform type. Note that the option
# discovery doesn't care about the attribute name used here, so this
# is just to avoid conflicts with potential options from the submodule

View file

@ -159,6 +159,7 @@
};
abbe = {
email = "ashish.is@lostca.se";
matrix = "@abbe:badti.me";
github = "wahjava";
githubId = 2255192;
name = "Ashish SHUKLA";
@ -602,6 +603,12 @@
fingerprint = "7931 EB4E 4712 D7BE 04F8 6D34 07EE 1FFC A58A 11C5";
}];
};
amfl = {
email = "amfl@none.none";
github = "amfl";
githubId = 382798;
name = "amfl";
};
amiddelk = {
email = "amiddelk@gmail.com";
github = "amiddelk";
@ -1151,6 +1158,12 @@
githubId = 56650223;
name = "Artturi N";
};
ayazhafiz = {
email = "ayaz.hafiz.1@gmail.com";
github = "ayazhafiz";
githubId = 262763;
name = "Ayaz Hafiz";
};
b4dm4n = {
email = "fabianm88@gmail.com";
github = "B4dM4n";
@ -1551,6 +1564,16 @@
githubId = 87764;
name = "Ben Ford";
};
boppyt = {
email = "boppy@nwcpz.com";
github = "boppyt";
githubId = 71049646;
name = "Zack A";
keys = [{
longkeyid = "rsa4096/0x6310C97DE31D1545";
fingerprint = "E8D7 5C19 9F65 269B 439D F77B 6310 C97D E31D 1545";
}];
};
borisbabic = {
email = "boris.ivan.babic@gmail.com";
github = "borisbabic";
@ -1657,6 +1680,7 @@
};
bryanasdev000 = {
email = "bryanasdev000@gmail.com";
matrix = "@bryanasdev000:matrix.org";
github = "bryanasdev000";
githubId = 53131727;
name = "Bryan Albuquerque";
@ -2068,6 +2092,17 @@
githubId = 12386805;
name = "Chua Hou";
};
chuangzhu = {
name = "Chuang Zhu";
email = "chuang@melty.land";
matrix = "@chuangzhu:matrix.org";
github = "chuangzhu";
githubId = 31200881;
keys = [{
longkeyid = "rsa4096/E838CED81CFFD3F9";
fingerprint = "5D03 A5E6 0754 A3E3 CA57 5037 E838 CED8 1CFF D3F9";
}];
};
chvp = {
email = "nixpkgs@cvpetegem.be";
matrix = "@charlotte:vanpetegem.me";
@ -2332,6 +2367,12 @@
githubId = 12202789;
name = "CrazedProgrammer";
};
creator54 = {
email = "hi.creator54@gmail.com";
github = "creator54";
githubId = 34543609;
name = "creator54";
};
cript0nauta = {
email = "shareman1204@gmail.com";
github = "cript0nauta";
@ -2523,12 +2564,30 @@
githubId = 1298344;
name = "Daniel Fullmer";
};
danth = {
name = "Daniel Thwaites";
email = "danthwaites30@btinternet.com";
matrix = "@danth:matrix.org";
github = "danth";
githubId = 28959268;
keys = [{
longkeyid = "rsa3072/0xD8AFC4BF05670F9D";
fingerprint = "4779 D1D5 3C97 2EAE 34A5 ED3D D8AF C4BF 0567 0F9D";
}];
};
dan4ik605743 = {
email = "6057430gu@gmail.com";
github = "dan4ik605743";
githubId = 86075850;
name = "Danil Danevich";
};
darkonion0 = {
name = "Alexandre Peruggia";
email = "darkgenius1@protonmail.com";
matrix = "@alexoo:matrix.org";
github = "DarkOnion0";
githubId = 68606322;
};
das-g = {
email = "nixpkgs@raphael.dasgupta.ch";
github = "das-g";
@ -2587,6 +2646,12 @@
githubId = 91113;
name = "David Kleuker";
};
davidarmstronglewis = {
email = "davidlewis@mac.com";
github = "davidarmstronglewis";
githubId = 6754950;
name = "David Armstrong Lewis";
};
davidrusu = {
email = "davidrusu.me@gmail.com";
github = "davidrusu";
@ -2789,6 +2854,12 @@
githubId = 2439413;
name = "Derek Gonyeo";
};
dguenther = {
email = "dguenther9@gmail.com";
github = "dguenther";
githubId = 767083;
name = "Derek Guenther";
};
dhkl = {
email = "david@davidslab.com";
github = "dhl";
@ -3035,6 +3106,12 @@
fingerprint = "85F3 72DF 4AF3 EF13 ED34 72A3 0AAF 2901 E804 0715";
}];
};
drzoidberg = {
email = "jakob@mast3rsoft.com";
github = "jakobneufeld";
githubId = 24791219;
name = "Jakob Neufeld";
};
dschrempf = {
name = "Dominik Schrempf";
email = "dominik.schrempf@gmail.com";
@ -3227,12 +3304,6 @@
githubId = 119483;
name = "Matthew Brown";
};
eduardosm = {
email = "esm@eduardosm.net";
github = "eduardosm";
githubId = 761151;
name = "Eduardo Sánchez Muñoz";
};
eduarrrd = {
email = "e.bachmakov@gmail.com";
github = "eduarrrd";
@ -3277,6 +3348,7 @@
};
ekleog = {
email = "leo@gaspard.io";
matrix = "@leo:gaspard.ninja";
github = "ekleog";
githubId = 411447;
name = "Leo Gaspard";
@ -3647,10 +3719,14 @@
};
expipiplus1 = {
email = "nix@monoid.al";
matrix = "@joe:monoid.al";
matrix = "@ellie:monoid.al";
github = "expipiplus1";
githubId = 857308;
name = "Joe Hermaszewski";
name = "Ellie Hermaszewska";
keys = [{
longkeyid = "rsa4096/0xC8116E3A0C1CA76A";
fingerprint = "FC1D 3E4F CBCA 80DF E870 6397 C811 6E3A 0C1C A76A";
}];
};
extends = {
email = "sharosari@gmail.com";
@ -3860,6 +3936,12 @@
githubId = 183879;
name = "Florian Klink";
};
florentc = {
email = "florentc@users.noreply.github.com";
github = "florentc";
githubId = 1149048;
name = "Florent Ch.";
};
FlorianFranzen = {
email = "Florian.Franzen@gmail.com";
github = "FlorianFranzen";
@ -4103,6 +4185,12 @@
githubId = 20208;
name = "Rok Garbas";
};
gardspirito = {
name = "gardspirito";
email = "nyxoroso@gmail.com";
github = "gardspirito";
githubId = 29687558;
};
garrison = {
email = "jim@garrison.cc";
github = "garrison";
@ -4133,6 +4221,16 @@
githubId = 313929;
name = "Gabriel Ebner";
};
genofire = {
name = "genofire";
email = "geno+dev@fireorbit.de";
github = "genofire";
githubId = 6905586;
keys = [{
longkeyid = "rsa4096/0xFC83907C125BC2BC";
fingerprint = "386E D1BF 848A BB4A 6B4A 3C45 FC83 907C 125B C2BC";
}];
};
georgewhewell = {
email = "georgerw@gmail.com";
github = "georgewhewell";
@ -4192,6 +4290,12 @@
githubId = 1713676;
name = "Luis G. Torres";
};
GKasparov = {
email = "mizozahr@gmail.com";
github = "GKasparov";
githubId = 60962839;
name = "Mazen Zahr";
};
gleber = {
email = "gleber.p@gmail.com";
github = "gleber";
@ -4535,6 +4639,13 @@
githubId = 2405974;
name = "Sébastian Méric de Bellefon";
};
henkkalkwater = {
email = "chris+nixpkgs@netsoj.nl";
github = "HenkKalkwater";
githubId = 4262067;
matrix = "@chris:netsoj.nl";
name = "Chris Josten";
};
henrikolsson = {
email = "henrik@fixme.se";
github = "henrikolsson";
@ -4660,6 +4771,12 @@
githubId = 896431;
name = "Chris Hodapp";
};
hollowman6 = {
email = "hollowman@hollowman.ml";
github = "HollowMan6";
githubId = 43995067;
name = "Songlin Jiang";
};
holymonson = {
email = "holymonson@gmail.com";
github = "holymonson";
@ -4921,6 +5038,12 @@
githubId = 4085046;
name = "Imuli";
};
ineol = {
email = "leo.stefanesco@gmail.com";
github = "ineol";
githubId = 37965;
name = "Léo Stefanesco";
};
infinisil = {
email = "contact@infinisil.com";
matrix = "@infinisil:matrix.org";
@ -4965,6 +5088,13 @@
fingerprint = "1412 816B A9FA F62F D051 1975 D3E1 B013 B463 1293";
}];
};
ius = {
email = "j.de.gram@gmail.com";
name = "Joerie de Gram";
matrix = "@ius:nltrix.net";
github = "ius";
githubId = 529626;
};
ivan = {
email = "ivan@ludios.org";
github = "ivan";
@ -4998,7 +5128,7 @@
}];
};
ivankovnatsky = {
email = "ikovnatsky@protonmail.ch";
email = "75213+ivankovnatsky@users.noreply.github.com";
github = "ivankovnatsky";
githubId = 75213;
name = "Ivan Kovnatsky";
@ -5182,12 +5312,28 @@
githubId = 221929;
name = "Jean-Baptiste Giraudeau";
};
jceb = {
name = "jceb";
email = "jceb@e-jc.de";
github = "jceb";
githubId = 101593;
};
jchw = {
email = "johnwchadwick@gmail.com";
github = "jchv";
githubId = 938744;
name = "John Chadwick";
};
jcouyang = {
email = "oyanglulu@gmail.com";
github = "jcouyang";
githubId = 1235045;
name = "Jichao Ouyang";
keys = [{
longkeyid = "rsa2048/0xDA8B833B52604E63";
fingerprint = "A506 C38D 5CC8 47D0 DF01 134A DA8B 833B 5260 4E63";
}];
};
jcumming = {
email = "jack@mudshark.org";
github = "jcumming";
@ -5782,6 +5928,16 @@
githubId = 20658981;
name = "Jarosław Wygoda";
};
jyooru = {
email = "joel@joel.tokyo";
github = "jyooru";
githubId = 63786778;
name = "Joel";
keys = [{
longkeyid = "rsa4096/18550BD205E9EF64";
fingerprint = "9148 DC9E F4D5 3EB6 A30E 8EF0 1855 0BD2 05E9 EF64";
}];
};
jyp = {
email = "jeanphilippe.bernardy@gmail.com";
github = "jyp";
@ -5851,6 +6007,12 @@
githubId = 3831860;
name = "Arnold Krille";
};
kanashimia = {
email = "chad@redpilled.dev";
github = "kanashimia";
githubId = 56224949;
name = "Mia Kanashi";
};
karantan = {
name = "Gasper Vozel";
email = "karantan@gmail.com";
@ -6063,6 +6225,12 @@
email = "tierpluspluslists@gmail.com";
name = "Karn Kallio";
};
klden = {
name = "Kenzyme Le";
email = "kl@kenzymele.com";
github = "klDen";
githubId = 5478260;
};
klntsky = {
email = "klntsky@gmail.com";
name = "Vladimir Kalnitsky";
@ -6243,6 +6411,12 @@
githubId = 278013;
name = "Tomasz Kontusz";
};
kubukoz = {
email = "kubukoz@gmail.com";
github = "kubukoz";
githubId = 894884;
name = "Jakub Kozłowski";
};
kurnevsky = {
email = "kurnevsky@gmail.com";
github = "kurnevsky";
@ -6255,6 +6429,13 @@
githubId = 449813;
name = "Roman Kuznetsov";
};
kvark = {
name = "Dzmitry Malyshau";
email = "kvark@fastmail.com";
matrix = "@kvark:matrix.org";
github = "kvark";
githubId = 107301;
};
kwohlfahrt = {
email = "kai.wohlfahrt@gmail.com";
github = "kwohlfahrt";
@ -6446,12 +6627,16 @@
githubId = 4158274;
name = "Michiel Leenaars";
};
legendofmiracles = {
lom = {
email = "legendofmiracles@protonmail.com";
matrix = "@legendofmiracles:matrix.org";
github = "legendofmiracles";
githubId = 30902201;
name = "legendofmiracles";
keys = [{
longkeyid = "rsa4096/0x19B082B3DEFE5451";
fingerprint = "CC50 F82C 985D 2679 0703 AF15 19B0 82B3 DEFE 5451";
}];
};
lejonet = {
email = "daniel@kuehn.se";
@ -6628,6 +6813,12 @@
fingerprint = "5B93 9CFA E8FC 4D8F E07A 3AEA DFE1 D4A0 1733 7E2A";
}];
};
lorenzleutgeb = {
email = "lorenz@leutgeb.xyz";
github = "lorenzleutgeb";
githubId = 542154;
name = "Lorenz Leutgeb";
};
luis = {
email = "luis.nixos@gmail.com";
github = "Luis-Hebendanz";
@ -6717,6 +6908,12 @@
githubId = 10626;
name = "Andreas Wagner";
};
lrewega = {
email = "lrewega@c32.ca";
github = "lrewega";
githubId = 639066;
name = "Luke Rewega";
};
lromor = {
email = "leonardo.romor@gmail.com";
github = "lromor";
@ -6897,6 +7094,12 @@
githubId = 109141;
name = "Georges Dubus";
};
Madouura = {
email = "madouura@gmail.com";
github = "Madouura";
githubId = 93990818;
name = "Madoura";
};
mafo = {
email = "Marc.Fontaine@gmx.de";
github = "MarcFontaine";
@ -7035,6 +7238,12 @@
githubId = 623509;
name = "Martijn Vermaat";
};
martinetd = {
email = "f.ktfhrvnznqxacf@noclue.notk.org";
github = "martinetd";
githubId = 1729331;
name = "Dominique Martinet";
};
martingms = {
email = "martin@mg.am";
github = "martingms";
@ -7146,6 +7355,16 @@
githubId = 95194;
name = "Mauricio Scheffer";
};
max-niederman = {
email = "max@maxniederman.com";
github = "max-niederman";
githubId = 19580458;
name = "Max Niederman";
keys = [{
longkeyid = "rsa3072/0x9AED881481D8444E";
fingerprint = "1DE4 424D BF77 1192 5DC4 CF5E 9AED 8814 81D8 444E";
}];
};
maxdamantus = {
email = "maxdamantus@gmail.com";
github = "Maxdamantus";
@ -7224,6 +7443,12 @@
githubId = 51356;
name = "Mathieu Boespflug";
};
mbprtpmnr = {
name = "mbprtpmnr";
email = "mbprtpmnr@pm.me";
github = "mbprtpmnr";
githubId = 88109321;
};
mbrgm = {
email = "marius@yeai.de";
github = "mbrgm";
@ -7485,6 +7710,12 @@
fingerprint = "DB43 2895 CF68 F0CE D4B7 EF60 DA01 5B05 B5A1 1B22";
}];
};
milahu = {
email = "milahu@gmail.com";
github = "milahu";
githubId = 12958815;
name = "Milan Hauth";
};
milesbreslin = {
email = "milesbreslin@gmail.com";
github = "milesbreslin";
@ -7686,6 +7917,7 @@
mohe2015 = {
name = "Moritz Hedtke";
email = "Moritz.Hedtke@t-online.de";
matrix = "@moritz.hedtke:matrix.org";
github = "mohe2015";
githubId = 13287984;
keys = [{
@ -7727,6 +7959,12 @@
githubId = 99988;
name = "Maarten Hoogendoorn";
};
MoritzBoehme = {
email = "mail@moritzboeh.me";
github = "MoritzBoehme";
githubId = 42215704;
name = "Moritz Böhme";
};
MostAwesomeDude = {
email = "cds@corbinsimpson.com";
github = "MostAwesomeDude";
@ -8012,6 +8250,17 @@
githubId = 56316606;
name = "Amneesh Singh";
};
nazarewk = {
name = "Krzysztof Nazarewski";
email = "3494992+nazarewk@users.noreply.github.com";
matrix = "@nazarewk:matrix.org";
github = "nazarewk";
githubId = 3494992;
keys = [{
longkeyid = "rsa4096/0x916D8B67241892AE";
fingerprint = "4BFF 0614 03A2 47F0 AA0B 4BC4 916D 8B67 2418 92AE";
}];
};
nbren12 = {
email = "nbren12@gmail.com";
github = "nbren12";
@ -8265,6 +8514,12 @@
githubId = 7588406;
name = "Andrew R. M.";
};
nkalupahana = {
email = "hello@nisa.la";
github = "nkalupahana";
githubId = 7347290;
name = "Nisala Kalupahana";
};
nloomans = {
email = "noah@nixos.noahloomans.com";
github = "nloomans";
@ -8568,6 +8823,12 @@
githubId = 101514;
name = "Orivej Desh";
};
ornxka = {
email = "ornxka@littledevil.sh";
github = "ornxka";
githubId = 52086525;
name = "ornxka";
};
oro = {
email = "marco@orovecchia.at";
github = "oro";
@ -8673,6 +8934,7 @@
};
pamplemousse = {
email = "xav.maso@gmail.com";
matrix = "@pamplemouss_:matrix.org";
github = "Pamplemousse";
githubId = 2647236;
name = "Xavier Maso";
@ -8726,7 +8988,7 @@
name = "pasqui23";
};
patryk27 = {
email = "wychowaniec.patryk@gmail.com";
email = "pwychowaniec@pm.me";
github = "Patryk27";
githubId = 3395477;
name = "Patryk Wychowaniec";
@ -8777,12 +9039,6 @@
githubId = 8641;
name = "Pierre Carrier";
};
pengmeiyu = {
email = "pengmyu@gmail.com";
github = "pmeiyu";
githubId = 8529551;
name = "Peng Mei Yu";
};
penguwin = {
email = "penguwin@penguwin.eu";
github = "penguwin";
@ -8887,6 +9143,12 @@
githubId = 421510;
name = "Noé Rubinstein";
};
photex = {
email = "photex@gmail.com";
github = "photex";
githubId = 301903;
name = "Chip Collier";
};
phreedom = {
email = "phreedom@yandex.ru";
github = "phreedom";
@ -9052,6 +9314,12 @@
githubId = 178496;
name = "Philipp Middendorf";
};
pmy = {
email = "pmy@xqzp.net";
github = "pmeiyu";
githubId = 8529551;
name = "Peng Mei Yu";
};
pmyjavec = {
email = "pauly@myjavec.com";
github = "pmyjavec";
@ -9355,6 +9623,12 @@
githubId = 52847440;
name = "Ryan Burns";
};
r3dl3g = {
email = "redleg@rothfuss-web.de";
github = "r3dl3g";
githubId = 35229674;
name = "Armin Rothfuss";
};
raboof = {
email = "arnout@bzzt.net";
matrix = "@raboof:matrix.org";
@ -9547,12 +9821,28 @@
githubId = 500703;
name = "Tadas Barzdžius";
};
revol-xut = {
email = "revol-xut@protonmail.com";
name = "Tassilo Tanneberger";
github = "revol-xut";
githubId = 32239737;
keys = [{
longkeyid = "rsa4096/B966009D57E69CC6";
fingerprint = "91EB E870 1639 1323 642A 6803 B966 009D 57E6 9CC6";
}];
};
rexim = {
email = "reximkut@gmail.com";
github = "rexim";
githubId = 165283;
name = "Alexey Kutepov";
};
rewine = {
email = "lhongxu@outlook.com";
github = "wineee";
githubId = 22803888;
name = "Lu Hongxu";
};
rgrunbla = {
email = "remy@grunblatt.org";
github = "rgrunbla";
@ -9619,6 +9909,12 @@
githubId = 37246692;
name = "Riley Inman";
};
riotbib = {
email = "github-nix@lnrt.de";
github = "riotbib";
githubId = 43172581;
name = "Lennart Mühlenmeier";
};
ris = {
email = "code@humanleg.org.uk";
github = "risicle";
@ -9642,12 +9938,6 @@
}
];
};
rittelle = {
email = "rittelle@posteo.de";
github = "rittelle";
githubId = 33598633;
name = "Lennart Rittel";
};
rixed = {
email = "rixed-github@happyleptic.org";
github = "rixed";
@ -9832,6 +10122,12 @@
githubId = 592876;
name = "Robert W. Pearce";
};
rprecenth = {
email = "rasmus@precenth.eu";
github = "Prillan";
githubId = 1675190;
name = "Rasmus Précenth";
};
rprospero = {
email = "rprospero+nix@gmail.com";
github = "rprospero";
@ -9850,6 +10146,12 @@
githubId = 373566;
name = "Ronuk Raval";
};
rski = {
name = "rski";
email = "rom.skiad+nix@gmail.com";
github = "rski";
githubId = 2960312;
};
rszibele = {
email = "richard@szibele.com";
github = "rszibele";
@ -9992,6 +10294,17 @@
github = "s1341";
githubId = 5682183;
};
sagikazarmark = {
name = "Mark Sagi-Kazar";
email = "mark.sagikazar@gmail.com";
matrix = "@mark.sagikazar:matrix.org";
github = "sagikazarmark";
githubId = 1226384;
keys = [{
longkeyid = "rsa4096/0xF251ADDC9D041C7E";
fingerprint = "E628 C811 6FB8 1657 F706 4EA4 F251 ADDC 9D04 1C7E";
}];
};
samalws = {
email = "sam@samalws.com";
name = "Sam Alws";
@ -10109,6 +10422,12 @@
githubId = 720864;
name = "Sébastien Bourdeauducq";
};
sbellem = {
email = "sbellem@gmail.com";
github = "sbellem";
githubId = 125458;
name = "Sylvain Bellemare";
};
sbond75 = {
name = "sbond75";
email = "43617712+sbond75@users.noreply.github.com";
@ -10369,6 +10688,12 @@
githubId = 251028;
name = "Shell Turner";
};
shikanime = {
name = "William Phetsinorath";
email = "deva.shikanime@protonmail.com";
github = "shikanime";
githubId = 22115108;
};
shlevy = {
email = "shea@shealevy.com";
github = "shlevy";
@ -10761,6 +11086,12 @@
github = "staccato";
githubId = 86573128;
};
stackshadow = {
email = "stackshadow@evilbrain.de";
github = "stackshadow";
githubId = 7512804;
name = "Martin Langlotz";
};
steell = {
email = "steve@steellworks.com";
github = "Steell";
@ -10910,13 +11241,6 @@
githubId = 2666479;
name = "Y Nguyen";
};
superherointj = {
name = "Sérgio G.";
email = "5861043+superherointj@users.noreply.github.com";
matrix = "@superherointj:matrix.org";
github = "superherointj";
githubId = 5861043;
};
SuperSandro2000 = {
email = "sandro.jaeckel@gmail.com";
matrix = "@sandro:supersandro.de";
@ -11073,6 +11397,16 @@
githubId = 321799;
name = "Paul Colomiets";
};
taikx4 = {
email = "taikx4@taikx4szlaj2rsdupcwabg35inbny4jk322ngeb7qwbbhd5i55nf5yyd.onion";
github = "taikx4";
githubId = 94917129;
name = "taikx4";
keys = [{
longkeyid = "ed25519/0xCCD52C7B37BB837E";
fingerprint = "6B02 8103 C4E5 F68C D77C 9E54 CCD5 2C7B 37BB 837E";
}];
};
takagiy = {
email = "takagiy.4dev@gmail.com";
github = "takagiy";
@ -11256,6 +11590,17 @@
githubId = 1141680;
name = "Thane Gill";
};
thblt = {
name = "Thibault Polge";
email = "thibault@thb.lt";
matrix = "@thbltp:matrix.org";
github = "thblt";
githubId = 2453136;
keys = [{
longkeyid = "rsa4096/0x63A44817A52EAB7B";
fingerprint = "D2A2 F0A1 E7A8 5E6F B711 DEE5 63A4 4817 A52E AB7B";
}];
};
TheBrainScrambler = {
email = "esthromeris@riseup.net";
github = "TheBrainScrambler";
@ -11443,6 +11788,12 @@
fingerprint = "556A 403F B0A2 D423 F656 3424 8489 B911 F9ED 617B";
}];
};
tmarkovski = {
email = "tmarkovski@gmail.com";
github = "tmarkovski";
githubId = 1280118;
name = "Tomislav Markovski";
};
tmountain = {
email = "tinymountain@gmail.com";
github = "tmountain";
@ -11612,6 +11963,12 @@
githubId = 1568873;
name = "Torsten Scholak";
};
tshaynik = {
email = "tshaynik@protonmail.com";
github = "tshaynik";
githubId = 15064765;
name = "tshaynik";
};
tstrobel = {
email = "4ZKTUB6TEP74PYJOPWIR013S2AV29YUBW5F9ZH2F4D5UMJUJ6S@hash.domains";
name = "Thomas Strobel";
@ -12181,7 +12538,7 @@
githubId = 6016963;
name = "Patrick Winter";
};
winterqt = {
winter = {
email = "nixos@winter.cafe";
github = "winterqt";
githubId = 78392041;
@ -12531,6 +12888,16 @@
fingerprint = "9270 66BD 8125 A45B 4AC4 0326 6180 7181 F60E FCB2";
}];
};
yuu = {
email = "yuuyin@protonmail.com";
github = "yuuyins";
githubId = 86538850;
name = "Yuu Yin";
keys = [{
longkeyid = "rsa4096/0x416F303B43C20AC3";
fingerprint = "9F19 3AE8 AA25 647F FC31 46B5 416F 303B 43C2 0AC3";
}];
};
yvesf = {
email = "yvesf+nix@xapek.org";
github = "yvesf";
@ -12654,12 +13021,6 @@
githubId = 1772064;
name = "Tim Zook";
};
zoomulator = {
email = "zoomulator@gmail.com";
github = "zoomulator";
githubId = 1069303;
name = "Kim Simmons";
};
zopieux = {
email = "zopieux@gmail.com";
github = "zopieux";
@ -12994,17 +13355,16 @@
github = "zupo";
githubId = 311580;
};
rski = {
name = "rski";
email = "rom.skiad+nix@gmail.com";
github = "rski";
githubId = 2960312;
sei40kr = {
name = "Seong Yong-ju";
email = "sei40kr@gmail.com";
github = "sei40kr";
githubId = 11665236;
};
mbprtpmnr = {
name = "mbprtpmnr";
email = "mbprtpmnr@pm.me";
github = "mbprtpmnr";
githubId = 88109321;
vdot0x23 = {
name = "Victor Büttner";
email = "nix.victor@0x23.dk";
github = "vdot0x23";
githubId = 40716069;
};
}

View file

@ -37,7 +37,7 @@ let
keyDrv = drv: if canEval drv.drvPath then { key = drv.drvPath; value = drv; } else { };
immediateDependenciesOf = drv:
concatLists (mapAttrsToList (n: v: derivationsIn v) (removeAttrs drv ["meta" "passthru"]));
concatLists (mapAttrsToList (n: v: derivationsIn v) (removeAttrs drv (["meta" "passthru"] ++ optionals (drv?passthru) (attrNames drv.passthru))));
derivationsIn = x:
if !canEval x then []

View file

@ -73,7 +73,7 @@ lyaml,,,,,,lblasc
markdown,,,,,,
mediator_lua,,,,,,
mpack,,,,,,
moonscript,,,,,,arobyn
moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn
nvim-client,https://github.com/neovim/lua-client.git,,,,,
penlight,https://github.com/lunarmodules/Penlight.git,,,,,alerque
plenary.nvim,https://github.com/nvim-lua/plenary.nvim.git,,,,lua5_1,

1 name src ref server version luaversion maintainers
73 markdown
74 mediator_lua
75 mpack
76 moonscript https://github.com/leafo/moonscript.git dev-1 arobyn
77 nvim-client https://github.com/neovim/lua-client.git
78 penlight https://github.com/lunarmodules/Penlight.git alerque
79 plenary.nvim https://github.com/nvim-lua/plenary.nvim.git lua5_1

View file

@ -305,7 +305,7 @@ class CleanEnvironment(object):
def get_current_plugins(editor: Editor) -> List[Plugin]:
with CleanEnvironment():
cmd = ["nix", "eval", "--json", editor.get_plugins]
cmd = ["nix", "eval", "--impure", "--json", "--expr", editor.get_plugins]
log.debug("Running command %s", cmd)
out = subprocess.check_output(cmd)
data = json.loads(out)

View file

@ -114,7 +114,7 @@ async def check_changes(package: Dict, worktree: str, update_info: str):
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8'))
if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files_process = await check_subprocess('git', 'diff', '--name-only', 'HEAD', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines()
changes[0]['files'] = changed_files

View file

@ -30,7 +30,7 @@ fetchGithubName () {
curl https://github.com/NixOS/nixpkgs/commit/"$commitid" 2>/dev/null |
grep committed -B10 | grep 'href="/' |
sed -re 's@.* href="/@@; s@".*@@' |
grep -v "/commit/"
grep -v "/commit/"
)";
echo "$userid"
}
@ -38,7 +38,7 @@ fetchGithubName () {
[ -n "$NIXPKGS_GITHUB_NAME_CACHE" ] && {
echo "$emails" | while read email; do
line="$(grep "$email " "$NIXPKGS_GITHUB_NAME_CACHE")"
[ -z "$line" ] && {
[ -z "$line" ] && {
echo "$email $(fetchGithubName "$email")" >> \
"$NIXPKGS_GITHUB_NAME_CACHE"
}
@ -47,11 +47,11 @@ fetchGithubName () {
# For RDF
normalize_name () {
sed -e 's/%/%25/g; s/ /%20/g; s/'\''/%27/g; s/"/%22/g; s/`/%60/g; s/\^/%5e/g; '
sed -e 's/%/%25/g; s/ /%20/g; s/'\''/%27/g; s/"/%22/g; s/`/%60/g; s/\^/%5e/g; '
}
denormalize_name () {
sed -e 's/%20/ /g; s/%27/'\''/g; s/%22/"/g; s/%60/`/g; s/%5e/^/g; s/%25/%/g;';
sed -e 's/%20/ /g; s/%27/'\''/g; s/%22/"/g; s/%60/`/g; s/%5e/^/g; s/%25/%/g;';
}
n3="$(mktemp --suffix .n3)"
@ -75,22 +75,22 @@ echo "$maintainers" | cut -f 2 | sed -e 's@.*@<my://name/&> <my://is-name> <my:/
# Get transitive closure
sparql="$(nix-build '<nixpkgs>' -Q -A apache-jena --no-out-link)/bin/sparql"
name_list="$(
"$sparql" --results=TSV --data="$n3" "
select ?x ?y ?g where {
?x <my://can-be>+ ?y.
?x <my://is-name> ?g.
"$sparql" --results=TSV --data="$n3" "
select ?x ?y ?g where {
?x <my://can-be>+ ?y.
?x <my://is-name> ?g.
}
" | tail -n +2 |
sed -re 's@<my://name/@@g; s@<my://@@g; s@>@@g;' |
sort -k 2,3 -t ' '
" | tail -n +2 |
sed -re 's@<my://name/@@g; s@<my://@@g; s@>@@g;' |
sort -k 2,3 -t ' '
)"
github_name_list="$(
"$sparql" --results=TSV --data="$n3" "
select ?x ?y where {
?x (<my://can-be>+ / <my://at-github>) ?y.
"$sparql" --results=TSV --data="$n3" "
select ?x ?y where {
?x (<my://can-be>+ / <my://at-github>) ?y.
}
" | tail -n +2 |
sed -re 's@<my://(name|github)/@@g; s@<my://@@g; s@>@@g;'
" | tail -n +2 |
sed -re 's@<my://(name|github)/@@g; s@<my://@@g; s@>@@g;'
)"
# Take first spelling option for every person
@ -104,10 +104,10 @@ if [ -n "$NIXPKGS_GITHUB_NAME_CACHE" ]; then
github_adder_script="$(mktemp)"
echo "$github_name_list" |
grep -E "$(echo "$name_list_canonical" | cut -f 2 |
tr '\n' '|' )" |
sort | uniq |
tr '\n' '|' )" |
sort | uniq |
sed -re 's/(.*)\t(.*)/s| \1$| \1\t\2|g;/' |
denormalize_name > "$github_adder_script"
denormalize_name > "$github_adder_script"
else
github_adder_script='/dev/null'
fi

View file

@ -93,7 +93,6 @@ with lib.maintainers; {
cstrahan
Frostman
kalbasit
mdlayher
mic92
orivej
rvolosatovs
@ -200,7 +199,6 @@ with lib.maintainers; {
openstack = {
members = [
angustrau
superherointj
SuperSandro2000
];
scope = "Maintain the ecosystem around OpenStack";

View file

@ -58,5 +58,5 @@ a while to finish.
## NixOS Boot Entries {#sect-nixos-gc-boot-entries}
If your `/boot` partition runs out of space, after clearing old profiles
you must rebuild your system with `nixos-rebuild` to update the `/boot`
partition and clear space.
you must rebuild your system with `nixos-rebuild boot` or `nixos-rebuild
switch` to update the `/boot` partition and clear space.

View file

@ -26,7 +26,7 @@ we assign the name `wan` to the interface with MAC address
```nix
systemd.network.links."10-wan" = {
matchConfig.MACAddress = "52:54:00:12:01:01";
matchConfig.PermanentMACAddress = "52:54:00:12:01:01";
linkConfig.Name = "wan";
};
```

View file

@ -159,6 +159,17 @@ The following methods are available on machine objects:
`execute`
: Execute a shell command, returning a list `(status, stdout)`.
If the command detaches, it must close stdout, as `execute` will wait
for this to consume all output reliably. This can be achieved by
redirecting stdout to stderr `>&2`, to `/dev/console`, `/dev/null` or
a file. Examples of detaching commands are `sleep 365d &`, where the
shell forks a new process that can write to stdout and `xclip -i`, where
the `xclip` command itself forks without closing stdout.
Takes an optional parameter `check_return` that defaults to `True`.
Setting this parameter to `False` will not check for the return code
and return -1 instead. This can be used for commands that shut down
the VM and would therefore break the pipe that would be used for
retrieving the return code.
`succeed`
@ -174,6 +185,9 @@ The following methods are available on machine objects:
- Dereferencing unset variables fail the command.
- It will wait for stdout to be closed. See `execute` for the
implications.
`fail`
: Like `succeed`, but raising an exception if the command returns a zero

View file

@ -64,7 +64,8 @@ $ nix-store --optimise
<para>
If your <literal>/boot</literal> partition runs out of space,
after clearing old profiles you must rebuild your system with
<literal>nixos-rebuild</literal> to update the
<literal>nixos-rebuild boot</literal> or
<literal>nixos-rebuild switch</literal> to update the
<literal>/boot</literal> partition and clear space.
</para>
</section>

View file

@ -32,7 +32,7 @@
</para>
<programlisting language="bash">
systemd.network.links.&quot;10-wan&quot; = {
matchConfig.MACAddress = &quot;52:54:00:12:01:01&quot;;
matchConfig.PermanentMACAddress = &quot;52:54:00:12:01:01&quot;;
linkConfig.Name = &quot;wan&quot;;
};
</programlisting>

View file

@ -266,7 +266,23 @@ start_all()
<listitem>
<para>
Execute a shell command, returning a list
<literal>(status, stdout)</literal>.
<literal>(status, stdout)</literal>. If the command detaches,
it must close stdout, as <literal>execute</literal> will wait
for this to consume all output reliably. This can be achieved
by redirecting stdout to stderr <literal>&gt;&amp;2</literal>,
to <literal>/dev/console</literal>,
<literal>/dev/null</literal> or a file. Examples of detaching
commands are <literal>sleep 365d &amp;</literal>, where the
shell forks a new process that can write to stdout and
<literal>xclip -i</literal>, where the
<literal>xclip</literal> command itself forks without closing
stdout. Takes an optional parameter
<literal>check_return</literal> that defaults to
<literal>True</literal>. Setting this parameter to
<literal>False</literal> will not check for the return code
and return -1 instead. This can be used for commands that shut
down the VM and would therefore break the pipe that would be
used for retrieving the return code.
</para>
</listitem>
</varlistentry>
@ -300,6 +316,12 @@ start_all()
Dereferencing unset variables fail the command.
</para>
</listitem>
<listitem>
<para>
It will wait for stdout to be closed. See
<literal>execute</literal> for the implications.
</para>
</listitem>
</itemizedlist>
</listitem>
</varlistentry>

View file

@ -25,8 +25,11 @@
<para>
You are logged-in automatically as <literal>nixos</literal>. The
<literal>nixos</literal> user account has an empty password so you
can use <literal>sudo</literal> without a password.
can use <literal>sudo</literal> without a password:
</para>
<programlisting>
$ sudo -i
</programlisting>
<para>
If you downloaded the graphical ISO image, you can run
<literal>systemctl start display-manager</literal> to start the

View file

@ -12,7 +12,7 @@
<listitem>
<para>
<emphasis>Stable channels</emphasis>, such as
<link xlink:href="https://nixos.org/channels/nixos-21.05"><literal>nixos-21.05</literal></link>.
<link xlink:href="https://nixos.org/channels/nixos-21.11"><literal>nixos-21.11</literal></link>.
These only get conservative bug fixes and package upgrades. For
instance, a channel update may cause the Linux kernel on your
system to be upgraded from 4.19.34 to 4.19.38 (a minor bug fix),
@ -33,7 +33,7 @@
<listitem>
<para>
<emphasis>Small channels</emphasis>, such as
<link xlink:href="https://nixos.org/channels/nixos-21.05-small"><literal>nixos-21.05-small</literal></link>
<link xlink:href="https://nixos.org/channels/nixos-21.11-small"><literal>nixos-21.11-small</literal></link>
or
<link xlink:href="https://nixos.org/channels/nixos-unstable-small"><literal>nixos-unstable-small</literal></link>.
These are identical to the stable and unstable channels
@ -60,8 +60,8 @@
<para>
When you first install NixOS, youre automatically subscribed to the
NixOS channel that corresponds to your installation source. For
instance, if you installed from a 21.05 ISO, you will be subscribed
to the <literal>nixos-21.05</literal> channel. To see which NixOS
instance, if you installed from a 21.11 ISO, you will be subscribed
to the <literal>nixos-21.11</literal> channel. To see which NixOS
channel youre subscribed to, run the following as root:
</para>
<programlisting>
@ -76,17 +76,17 @@ nixos https://nixos.org/channels/nixos-unstable
</programlisting>
<para>
(Be sure to include the <literal>nixos</literal> parameter at the
end.) For instance, to use the NixOS 21.05 stable channel:
end.) For instance, to use the NixOS 21.11 stable channel:
</para>
<programlisting>
# nix-channel --add https://nixos.org/channels/nixos-21.05 nixos
# nix-channel --add https://nixos.org/channels/nixos-21.11 nixos
</programlisting>
<para>
If you have a server, you may want to use the <quote>small</quote>
channel instead:
</para>
<programlisting>
# nix-channel --add https://nixos.org/channels/nixos-21.05-small nixos
# nix-channel --add https://nixos.org/channels/nixos-21.11-small nixos
</programlisting>
<para>
And if you want to live on the bleeding edge:
@ -146,7 +146,7 @@ system.autoUpgrade.allowReboot = true;
also specify a channel explicitly, e.g.
</para>
<programlisting language="bash">
system.autoUpgrade.channel = https://nixos.org/channels/nixos-21.05;
system.autoUpgrade.channel = https://nixos.org/channels/nixos-21.11;
</programlisting>
</section>
</chapter>

View file

@ -1,9 +1,5 @@
<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-release-21.11">
<title>Release 21.11 (“Porcupine”, 2021.11/??)</title>
<para>
In addition to numerous new and upgraded packages, this release has
the following highlights:
</para>
<title>Release 21.11 (“Porcupine”, 2021/11/30)</title>
<itemizedlist spacing="compact">
<listitem>
<para>
@ -14,7 +10,26 @@
</itemizedlist>
<section xml:id="sec-release-21.11-highlights">
<title>Highlights</title>
<para>
In addition to numerous new and upgraded packages, this release
has the following highlights:
</para>
<itemizedlist>
<listitem>
<para>
Nix has been updated to version 2.4, reference its
<link xlink:href="https://discourse.nixos.org/t/nix-2-4-released/15822">release
notes</link> for more information on what has changed. The
previous version of Nix, 2.3.16, remains available for the
time being in the <literal>nix_2_3</literal> package.
</para>
</listitem>
<listitem>
<para>
<literal>iptables</literal> now uses
<literal>nf_tables</literal> backend.
</para>
</listitem>
<listitem>
<para>
PHP now defaults to PHP 8.0, updated from 7.4.
@ -22,7 +37,7 @@
</listitem>
<listitem>
<para>
kOps now defaults to 1.21.1, which uses containerd as the
kops now defaults to 1.21.1, which uses containerd as the
default runtime.
</para>
</listitem>
@ -46,13 +61,36 @@
</listitem>
<listitem>
<para>
Activation scripts can now opt int to be run when running
<literal>nixos-rebuild dry-activate</literal> and detect the
dry activation by reading <literal>$NIXOS_ACTION</literal>.
This allows activation scripts to output what they would
change if the activation was really run. The users/modules
activation script supports this and outputs some of is
actions.
Improvements have been made to the Hadoop module and package:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
HDFS and YARN now support production-ready highly
available deployments with automatic failover.
</para>
</listitem>
<listitem>
<para>
Hadoop now defaults to Hadoop 3, updated from 2.
</para>
</listitem>
<listitem>
<para>
JournalNode, ZKFS and HTTPFS services have been added.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Activation scripts can now, optionally, be run during a
<literal>nixos-rebuild dry-activate</literal> and can detect
the dry activation by reading
<literal>$NIXOS_ACTION</literal>. This allows activation
scripts to output what they would change if the activation was
really run. The users/modules activation script supports this
and outputs some of is actions.
</para>
</listitem>
<listitem>
@ -94,6 +132,81 @@
Notes</link> for details.
</para>
</listitem>
<listitem>
<para>
LXD support was greatly improved:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
building LXD images from configurations is now directly
possible with just nixpkgs
</para>
</listitem>
<listitem>
<para>
hydra is now building nixOS LXD images that can be used
standalone with full nixos-rebuild support
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
OpenSSH was updated to version 8.8p1
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
This breaks connections to old SSH daemons as ssh-rsa host
keys and ssh-rsa public keys that were signed with SHA-1
are disabled by default now
</para>
</listitem>
<listitem>
<para>
These can be re-enabled, see the
<link xlink:href="https://www.openssh.com/txt/release-8.8">OpenSSH
changelog</link> for details
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
ORY Kratos was updated to version 0.8.0-alpha.3
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
This release requires you to run SQL migrations. Please,
as always, create a backup of your database first!
</para>
</listitem>
<listitem>
<para>
The SDKs are now generated with tag v0alpha2 to reflect
that some signatures have changed in a breaking fashion.
Please update your imports from v0alpha1 to v0alpha2.
</para>
</listitem>
<listitem>
<para>
The SMTPS scheme used in courier config URL with
cleartext/StartTLS/TLS SMTP connection types is now only
supporting implicit TLS. For StartTLS and cleartext SMTP,
please use the SMTP scheme instead.
</para>
</listitem>
<listitem>
<para>
for more details, see
<link xlink:href="https://github.com/ory/kratos/releases/tag/v0.8.0-alpha.1">Release
Notes</link>.
</para>
</listitem>
</itemizedlist>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-new-services">
@ -142,14 +255,14 @@
<para>
<link xlink:href="https://www.isc.org/kea/">Kea</link>, ISCs
2nd generation DHCP and DDNS server suite. Available at
<link xlink:href="options.html#opt-services.kea">services.kea</link>.
<link xlink:href="options.html#opt-services.kea.dhcp4">services.kea</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://owncast.online/">owncast</link>,
self-hosted video live streaming solution. Available at
<link xlink:href="options.html#opt-services.owncast">services.owncast</link>.
<link xlink:href="options.html#opt-services.owncast.enable">services.owncast</link>.
</para>
</listitem>
<listitem>
@ -157,7 +270,7 @@
<link xlink:href="https://joinpeertube.org/">PeerTube</link>,
developed by Framasoft, is the free and decentralized
alternative to video platforms. Available at
<link xlink:href="options.html#opt-services.peertube">services.peertube</link>.
<link xlink:href="options.html#opt-services.peertube.enable">services.peertube</link>.
</para>
</listitem>
<listitem>
@ -381,11 +494,68 @@
<link xlink:href="options.html#opt-services.seafile.enable">services.seafile</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/mchehab/rasdaemon">rasdaemon</link>,
a hardware error logging daemon. Available as
<link linkend="opt-hardware.rasdaemon.enable">hardware.rasdaemon</link>.
</para>
</listitem>
<listitem>
<para>
<literal>code-server</literal>-module now available
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/xmrig/xmrig">xmrig</link>,
a high performance, open source, cross platform RandomX,
KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and
RandomX benchmark.
</para>
</listitem>
<listitem>
<para>
Auto nice daemons
<link xlink:href="https://github.com/Nefelim4ag/Ananicy">ananicy</link>
and
<link xlink:href="https://gitlab.com/ananicy-cpp/ananicy-cpp/">ananicy-cpp</link>.
Available as
<link linkend="opt-services.ananicy.enable">services.ananicy</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/prometheus-community/smartctl_exporter">smartctl_exporter</link>,
a Prometheus exporter for
<link xlink:href="https://en.wikipedia.org/wiki/S.M.A.R.T.">S.M.A.R.T.</link>
data. Available as
<link xlink:href="options.html#opt-services.prometheus.exporters.smartctl.enable">services.prometheus.exporters.smartctl</link>.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-incompatibilities">
<title>Backward Incompatibilities</title>
<itemizedlist>
<listitem>
<para>
The NixOS VM test framework,
<literal>pkgs.nixosTest</literal>/<literal>make-test-python.nix</literal>,
now requires detaching commands such as
<literal>succeed(&quot;foo &amp;&quot;)</literal> and
<literal>succeed(&quot;foo | xclip -i&quot;)</literal> to
close stdout. This can be done with a redirect such as
<literal>succeed(&quot;foo &gt;&amp;2 &amp;&quot;)</literal>.
This breaking change was necessitated by a race condition
causing tests to fail or hang. It applies to all methods that
invoke commands on the nodes, including
<literal>execute</literal>, <literal>succeed</literal>,
<literal>fail</literal>,
<literal>wait_until_succeeds</literal>,
<literal>wait_until_fails</literal>.
</para>
</listitem>
<listitem>
<para>
The <literal>services.wakeonlan</literal> option was removed,
@ -403,6 +573,14 @@
nobody/nogroup, which is unsafe.
</para>
</listitem>
<listitem>
<para>
Since <literal>iptables</literal> now uses
<literal>nf_tables</literal> backend and
<literal>ipset</literal> doesnt support it, some applications
(ferm, shorewall, firehol) may have limited functionality.
</para>
</listitem>
<listitem>
<para>
The <literal>paperless</literal> module and package have been
@ -490,7 +668,7 @@ Superuser created successfully.
<listitem>
<para>
The <literal>staticjinja</literal> package has been upgraded
from 1.0.4 to 4.1.0
from 1.0.4 to 4.1.1
</para>
</listitem>
<listitem>
@ -549,6 +727,17 @@ Superuser created successfully.
<link xlink:href="options.html#opt-services.geoipupdate.enable">services.geoipupdate</link>.
</para>
</listitem>
<listitem>
<para>
<literal>ihatemoney</literal> has been updated to version
5.1.1
(<link xlink:href="https://github.com/spiral-project/ihatemoney/blob/5.1.1/CHANGELOG.rst">release
notes</link>). If you serve ihatemoney by HTTP rather than
HTTPS, you must set
<link xlink:href="options.html#opt-services.ihatemoney.secureCookie">services.ihatemoney.secureCookie</link>
to <literal>false</literal>.
</para>
</listitem>
<listitem>
<para>
PHP 7.3 is no longer supported due to upstream not supporting
@ -1153,6 +1342,21 @@ Superuser created successfully.
would be parsed as 3 parameters.
</para>
</listitem>
<listitem>
<para>
<literal>nix.daemonNiceLevel</literal> and
<literal>nix.daemonIONiceLevel</literal> have been removed in
favour of the new options
<link xlink:href="options.html#opt-nix.daemonCPUSchedPolicy"><literal>nix.daemonCPUSchedPolicy</literal></link>,
<link xlink:href="options.html#opt-nix.daemonIOSchedClass"><literal>nix.daemonIOSchedClass</literal></link>
and
<link xlink:href="options.html#opt-nix.daemonIOSchedPriority"><literal>nix.daemonIOSchedPriority</literal></link>.
Please refer to the options documentation and the
<literal>sched(7)</literal> and
<literal>ioprio_set(2)</literal> man pages for guidance on how
to use them.
</para>
</listitem>
<listitem>
<para>
The <literal>coursier</literal> packages binary was renamed
@ -1170,12 +1374,52 @@ Superuser created successfully.
will no longer work and must be updated.
</para>
</listitem>
<listitem>
<para>
The <literal>fluidsynth_1</literal> attribute has been
removed, as this legacy version is no longer needed in
nixpkgs. The actively maintained 2.x series is available as
<literal>fluidsynth</literal> unchanged.
</para>
</listitem>
<listitem>
<para>
Nextcloud 20 (<literal>pkgs.nextcloud20</literal>) has been
dropped because it was EOLed by upstream in 2021-10.
</para>
</listitem>
<listitem>
<para>
The <literal>virtualisation.pathsInNixDB</literal> option was
renamed
<link xlink:href="options.html#opt-virtualisation.additionalPaths"><literal>virtualisation.additionalPaths</literal></link>.
</para>
</listitem>
<listitem>
<para>
The <literal>services.ddclient.password</literal> option was
removed, and replaced with
<literal>services.ddclient.passwordFile</literal>.
</para>
</listitem>
<listitem>
<para>
The default GNAT version has been changed: The
<literal>gnat</literal> attribute now points to
<literal>gnat11</literal> instead of <literal>gnat9</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>retroArchCores</literal> has been removed. This means
that using <literal>nixpkgs.config.retroarch</literal> to
customize RetroArch cores is not supported anymore. Instead,
use package overrides, for example:
<literal>retroarch.override { cores = with libretro; [ citra snes9x ]; };</literal>.
Also, <literal>retroarchFull</literal> derivation is available
for those who want to have all RetroArch cores available.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-21.11-notable-changes">
@ -1199,25 +1443,31 @@ Superuser created successfully.
<para>
In NixOS virtual machines (QEMU), the
<literal>virtualisation</literal> module has been updated with
new options to configure:
new options:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
IPv4 port forwarding
(<link xlink:href="options.html#opt-virtualisation.forwardPorts"><literal>virtualisation.forwardPorts</literal></link>),
<link xlink:href="options.html#opt-virtualisation.forwardPorts"><literal>forwardPorts</literal></link>
to configure IPv4 port forwarding,
</para>
</listitem>
<listitem>
<para>
shared host directories
(<link xlink:href="options.html#opt-virtualisation.sharedDirectories"><literal>virtualisation.sharedDirectories</literal></link>),
<link xlink:href="options.html#opt-virtualisation.sharedDirectories"><literal>sharedDirectories</literal></link>
to set up shared host directories,
</para>
</listitem>
<listitem>
<para>
screen resolution
(<link xlink:href="options.html#opt-virtualisation.resolution"><literal>virtualisation.resolution</literal></link>).
<link xlink:href="options.html#opt-virtualisation.resolution"><literal>resolution</literal></link>
to set the screen resolution,
</para>
</listitem>
<listitem>
<para>
<link xlink:href="options.html#opt-virtualisation.useNixStoreImage"><literal>useNixStoreImage</literal></link>
to use a disk image for the Nix store instead of 9P.
</para>
</listitem>
</itemizedlist>
@ -1470,6 +1720,23 @@ Superuser created successfully.
option.
</para>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.smokeping.host">services.smokeping.host</link>
option was added and defaulted to
<literal>localhost</literal>. Before,
<literal>smokeping</literal> listened to all interfaces by
default. NixOS defaults generally aim to provide
non-Internet-exposed defaults for databases and internal
monitoring tools, see e.g.
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/100192">#100192</link>.
Further, the systemd service for <literal>smokeping</literal>
got reworked defaults for increased operational stability, see
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/144127">PR
#144127</link> for details.
</para>
</listitem>
<listitem>
<para>
The
@ -1602,15 +1869,6 @@ Superuser created successfully.
encapsulation.
</para>
</listitem>
<listitem>
<para>
Changing systemd <literal>.socket</literal> units now restarts
them and stops the service that is activated by them.
Additionally, services with
<literal>stopOnChange = false</literal> dont break anymore
when they are socket-activated.
</para>
</listitem>
<listitem>
<para>
The <literal>virtualisation.libvirtd</literal> module has been
@ -1653,6 +1911,133 @@ Superuser created successfully.
better user experience and benefit from this change.
</para>
</listitem>
<listitem>
<para>
A new option
<literal>services.prometheus.enableReload</literal> has been
added which can be enabled to reload the prometheus service
when its config file changes instead of restarting.
</para>
</listitem>
<listitem>
<para>
The option
<literal>services.prometheus.environmentFile</literal> has
been removed since it was causing
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/126083">issues</link>
and Prometheus now has native support for secret files, i.e.
<literal>basic_auth.password_file</literal> and
<literal>authorization.credentials_file</literal>.
</para>
</listitem>
<listitem>
<para>
Dokuwiki now supports caddy! However
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
the nginx option has been removed, in the new
configuration, please use the
<literal>dokuwiki.webserver = &quot;nginx&quot;</literal>
instead.
</para>
</listitem>
<listitem>
<para>
The <quote>${hostname}</quote> option has been deprecated,
please use
<literal>dokuwiki.sites = [ &quot;${hostname}&quot; ]</literal>
instead
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.unifi.enable">services.unifi</link>
module has been reworked, solving a number of issues. This
leads to several user facing changes:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
The <literal>services.unifi.dataDir</literal> option is
removed and the data is now always located under
<literal>/var/lib/unifi/data</literal>. This is done to
make better use of systemd state direcotiry and thus
making the service restart more reliable.
</para>
</listitem>
<listitem>
<para>
The unifi logs can now be found under:
<literal>/var/log/unifi</literal> instead of
<literal>/var/lib/unifi/logs</literal>.
</para>
</listitem>
<listitem>
<para>
The unifi run directory can now be found under:
<literal>/run/unifi</literal> instead of
<literal>/var/lib/unifi/run</literal>.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
<literal>security.pam.services.&lt;name&gt;.makeHomeDir</literal>
now uses <literal>umask=0077</literal> instead of
<literal>umask=0022</literal> when creating the home
directory.
</para>
</listitem>
<listitem>
<para>
Loki has had another release. Some default values have been
changed for the configuration and some configuration options
have been renamed. For more details, please check
<link xlink:href="https://grafana.com/docs/loki/latest/upgrading/#240">the
upgrade guide</link>.
</para>
</listitem>
<listitem>
<para>
<literal>julia</literal> now refers to
<literal>julia-stable</literal> instead of
<literal>julia-lts</literal>. In practice this means it has
been upgraded from <literal>1.0.4</literal> to
<literal>1.5.4</literal>.
</para>
</listitem>
<listitem>
<para>
RetroArch has been upgraded from version
<literal>1.8.5</literal> to <literal>1.9.13.2</literal>. Since
the previous release was quite old, if youre having issues
after the upgrade, please delete your
<literal>$XDG_CONFIG_HOME/retroarch/retroarch.cfg</literal>
file.
</para>
</listitem>
<listitem>
<para>
hydrus has been upgraded from version <literal>438</literal>
to <literal>463</literal>. Since upgrading between releases
this old is advised against, be sure to have a backup of your
data before upgrading. For details, see
<link xlink:href="https://hydrusnetwork.github.io/hydrus/help/getting_started_installing.html#big_updates">the
hydrus manual</link>.
</para>
</listitem>
<listitem>
<para>
More jdk and jre versions are now exposed via
<literal>java-packages.compiler</literal>.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -0,0 +1,85 @@
<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-release-22.05">
<title>Release 22.05 (“Quokka”, 2022.05/??)</title>
<para>
In addition to numerous new and upgraded packages, this release has
the following highlights:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Support is planned until the end of December 2022, handing over
to 22.11.
</para>
</listitem>
</itemizedlist>
<section xml:id="sec-release-22.05-highlights">
<title>Highlights</title>
<para>
</para>
</section>
<section xml:id="sec-release-22.05-new-services">
<title>New Services</title>
<para>
</para>
</section>
<section xml:id="sec-release-22.05-incompatibilities">
<title>Backward Incompatibilities</title>
<itemizedlist>
<listitem>
<para>
<literal>pkgs.ghc</literal> now refers to
<literal>pkgs.targetPackages.haskellPackages.ghc</literal>.
This <emphasis>only</emphasis> makes a difference if you are
cross-compiling and will ensure that
<literal>pkgs.ghc</literal> always runs on the host platform
and compiles for the target platform (similar to
<literal>pkgs.gcc</literal> for example).
<literal>haskellPackages.ghc</literal> still behaves as
before, running on the build platform and compiling for the
host platform (similar to <literal>stdenv.cc</literal>). This
means you dont have to adjust your derivations if you use
<literal>haskellPackages.callPackage</literal>, but when using
<literal>pkgs.callPackage</literal> and taking
<literal>ghc</literal> as an input, you should now use
<literal>buildPackages.ghc</literal> instead to ensure cross
compilation keeps working (or switch to
<literal>haskellPackages.callPackage</literal>).
</para>
</listitem>
<listitem>
<para>
<literal>pkgs.emacsPackages.orgPackages</literal> is removed
because org elpa is deprecated. The packages in the top level
of <literal>pkgs.emacsPackages</literal>, such as org and
org-contrib, refer to the ones in
<literal>pkgs.emacsPackages.elpaPackages</literal> and
<literal>pkgs.emacsPackages.nongnuPackages</literal> where the
new versions will release.
</para>
</listitem>
<listitem>
<para>
The <literal>wafHook</literal> hook now honors
<literal>NIX_BUILD_CORES</literal> when
<literal>enableParallelBuilding</literal> is not set
explicitly. Packages can restore the old behaviour by setting
<literal>enableParallelBuilding=false</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>pkgs.claws-mail-gtk2</literal>, representing Claws
Mails older release version three, was removed in order to
get rid of Python 2. Please switch to
<literal>claws-mail</literal>, which is Claws Mails latest
release based on GTK+3 and Python 3.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-release-22.05-notable-changes">
<title>Other Notable Changes</title>
<para>
</para>
</section>
</section>

View file

@ -15,7 +15,10 @@ finished booting, it should have detected most of your hardware.
The NixOS manual is available by running `nixos-help`.
You are logged-in automatically as `nixos`. The `nixos` user account has
an empty password so you can use `sudo` without a password.
an empty password so you can use `sudo` without a password:
```ShellSession
$ sudo -i
```
If you downloaded the graphical ISO image, you can run `systemctl
start display-manager` to start the desktop environment. If you want

View file

@ -6,7 +6,7 @@ expressions and associated binaries. The NixOS channels are updated
automatically from NixOS's Git repository after certain tests have
passed and all packages have been built. These channels are:
- *Stable channels*, such as [`nixos-21.05`](https://nixos.org/channels/nixos-21.05).
- *Stable channels*, such as [`nixos-21.11`](https://nixos.org/channels/nixos-21.11).
These only get conservative bug fixes and package upgrades. For
instance, a channel update may cause the Linux kernel on your system
to be upgraded from 4.19.34 to 4.19.38 (a minor bug fix), but not
@ -19,7 +19,7 @@ passed and all packages have been built. These channels are:
radical changes between channel updates. It's not recommended for
production systems.
- *Small channels*, such as [`nixos-21.05-small`](https://nixos.org/channels/nixos-21.05-small)
- *Small channels*, such as [`nixos-21.11-small`](https://nixos.org/channels/nixos-21.11-small)
or [`nixos-unstable-small`](https://nixos.org/channels/nixos-unstable-small).
These are identical to the stable and unstable channels described above,
except that they contain fewer binary packages. This means they get updated
@ -38,8 +38,8 @@ newest supported stable release.
When you first install NixOS, you're automatically subscribed to the
NixOS channel that corresponds to your installation source. For
instance, if you installed from a 21.05 ISO, you will be subscribed to
the `nixos-21.05` channel. To see which NixOS channel you're subscribed
instance, if you installed from a 21.11 ISO, you will be subscribed to
the `nixos-21.11` channel. To see which NixOS channel you're subscribed
to, run the following as root:
```ShellSession
@ -54,16 +54,16 @@ To switch to a different NixOS channel, do
```
(Be sure to include the `nixos` parameter at the end.) For instance, to
use the NixOS 21.05 stable channel:
use the NixOS 21.11 stable channel:
```ShellSession
# nix-channel --add https://nixos.org/channels/nixos-21.05 nixos
# nix-channel --add https://nixos.org/channels/nixos-21.11 nixos
```
If you have a server, you may want to use the "small" channel instead:
```ShellSession
# nix-channel --add https://nixos.org/channels/nixos-21.05-small nixos
# nix-channel --add https://nixos.org/channels/nixos-21.11-small nixos
```
And if you want to live on the bleeding edge:
@ -114,5 +114,5 @@ the new generation contains a different kernel, initrd or kernel
modules. You can also specify a channel explicitly, e.g.
```nix
system.autoUpgrade.channel = https://nixos.org/channels/nixos-21.05;
system.autoUpgrade.channel = https://nixos.org/channels/nixos-21.11;
```

View file

@ -69,9 +69,14 @@
</arg>
<arg>
<arg choice='plain'>
<option>--no-root-passwd</option>
</arg>
<group choice='req'>
<arg choice='plain'>
<option>--no-root-password</option>
</arg>
<arg choice='plain'>
<option>--no-root-passwd</option>
</arg>
</group>
</arg>
<arg>
@ -157,7 +162,7 @@
<listitem>
<para>
It prompts you for a password for the root account (unless
<option>--no-root-passwd</option> is specified).
<option>--no-root-password</option> is specified).
</para>
</listitem>
</itemizedlist>

View file

@ -1,14 +1,18 @@
# Release 21.11 (“Porcupine”, 2021.11/??) {#sec-release-21.11}
In addition to numerous new and upgraded packages, this release has the following highlights:
# Release 21.11 (“Porcupine”, 2021/11/30) {#sec-release-21.11}
- Support is planned until the end of June 2022, handing over to 22.05.
## Highlights {#sec-release-21.11-highlights}
In addition to numerous new and upgraded packages, this release has the following highlights:
- Nix has been updated to version 2.4, reference its [release notes](https://discourse.nixos.org/t/nix-2-4-released/15822) for more information on what has changed. The previous version of Nix, 2.3.16, remains available for the time being in the `nix_2_3` package.
- `iptables` now uses `nf_tables` backend.
- PHP now defaults to PHP 8.0, updated from 7.4.
- kOps now defaults to 1.21.1, which uses containerd as the default runtime.
- kops now defaults to 1.21.1, which uses containerd as the default runtime.
- `python3` now defaults to Python 3.9, updated from Python 3.8.
@ -16,7 +20,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.
- Activation scripts can now opt int to be run when running `nixos-rebuild dry-activate` and detect the dry activation by reading `$NIXOS_ACTION`.
- Improvements have been made to the Hadoop module and package:
- HDFS and YARN now support production-ready highly available deployments with automatic failover.
- Hadoop now defaults to Hadoop 3, updated from 2.
- JournalNode, ZKFS and HTTPFS services have been added.
- Activation scripts can now, optionally, be run during a `nixos-rebuild dry-activate` and can detect the dry activation by reading `$NIXOS_ACTION`.
This allows activation scripts to output what they would change if the activation was really run.
The users/modules activation script supports this and outputs some of is actions.
@ -33,6 +42,20 @@ In addition to numerous new and upgraded packages, this release has the followin
- GNOME has been upgraded to 41. Please take a look at their [Release Notes](https://help.gnome.org/misc/release-notes/41.0/) for details.
- LXD support was greatly improved:
- building LXD images from configurations is now directly possible with just nixpkgs
- hydra is now building nixOS LXD images that can be used standalone with full nixos-rebuild support
- OpenSSH was updated to version 8.8p1
- This breaks connections to old SSH daemons as ssh-rsa host keys and ssh-rsa public keys that were signed with SHA-1 are disabled by default now
- These can be re-enabled, see the [OpenSSH changelog](https://www.openssh.com/txt/release-8.8) for details
- ORY Kratos was updated to version 0.8.0-alpha.3
- This release requires you to run SQL migrations. Please, as always, create a backup of your database first!
- The SDKs are now generated with tag v0alpha2 to reflect that some signatures have changed in a breaking fashion. Please update your imports from v0alpha1 to v0alpha2.
- The SMTPS scheme used in courier config URL with cleartext/StartTLS/TLS SMTP connection types is now only supporting implicit TLS. For StartTLS and cleartext SMTP, please use the SMTP scheme instead.
- for more details, see [Release Notes](https://github.com/ory/kratos/releases/tag/v0.8.0-alpha.1).
## New Services {#sec-release-21.11-new-services}
- [btrbk](https://digint.ch/btrbk/index.html), a backup tool for btrfs subvolumes, taking advantage of btrfs specific capabilities to create atomic snapshots and transfer them incrementally to your backup locations. Available as [services.btrbk](options.html#opt-services.brtbk.instances).
@ -45,11 +68,11 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Jibri](https://github.com/jitsi/jibri), a service for recording or streaming a Jitsi Meet conference. Available as [services.jibri](options.html#opt-services.jibri.enable).
- [Kea](https://www.isc.org/kea/), ISCs 2nd generation DHCP and DDNS server suite. Available at [services.kea](options.html#opt-services.kea).
- [Kea](https://www.isc.org/kea/), ISCs 2nd generation DHCP and DDNS server suite. Available at [services.kea](options.html#opt-services.kea.dhcp4).
- [owncast](https://owncast.online/), self-hosted video live streaming solution. Available at [services.owncast](options.html#opt-services.owncast).
- [owncast](https://owncast.online/), self-hosted video live streaming solution. Available at [services.owncast](options.html#opt-services.owncast.enable).
- [PeerTube](https://joinpeertube.org/), developed by Framasoft, is the free and decentralized alternative to video platforms. Available at [services.peertube](options.html#opt-services.peertube).
- [PeerTube](https://joinpeertube.org/), developed by Framasoft, is the free and decentralized alternative to video platforms. Available at [services.peertube](options.html#opt-services.peertube.enable).
- [sourcehut](https://sr.ht), a collection of tools useful for software development. Available as [services.sourcehut](options.html#opt-services.sourcehut.enable).
@ -116,13 +139,29 @@ In addition to numerous new and upgraded packages, this release has the followin
- [seafile](https://www.seafile.com/en/home/), an open source file syncing & sharing software. Available as [services.seafile](options.html#opt-services.seafile.enable).
- [rasdaemon](https://github.com/mchehab/rasdaemon), a hardware error logging daemon. Available as [hardware.rasdaemon](#opt-hardware.rasdaemon.enable).
- `code-server`-module now available
- [xmrig](https://github.com/xmrig/xmrig), a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and RandomX benchmark.
- Auto nice daemons [ananicy](https://github.com/Nefelim4ag/Ananicy) and [ananicy-cpp](https://gitlab.com/ananicy-cpp/ananicy-cpp/). Available as [services.ananicy](#opt-services.ananicy.enable).
- [smartctl_exporter](https://github.com/prometheus-community/smartctl_exporter), a Prometheus exporter for [S.M.A.R.T.](https://en.wikipedia.org/wiki/S.M.A.R.T.) data. Available as [services.prometheus.exporters.smartctl](options.html#opt-services.prometheus.exporters.smartctl.enable).
## Backward Incompatibilities {#sec-release-21.11-incompatibilities}
- The NixOS VM test framework, `pkgs.nixosTest`/`make-test-python.nix`, now requires detaching commands such as `succeed("foo &")` and `succeed("foo | xclip -i")` to close stdout.
This can be done with a redirect such as `succeed("foo >&2 &")`. This breaking change was necessitated by a race condition causing tests to fail or hang.
It applies to all methods that invoke commands on the nodes, including `execute`, `succeed`, `fail`, `wait_until_succeeds`, `wait_until_fails`.
- The `services.wakeonlan` option was removed, and replaced with `networking.interfaces.<name>.wakeOnLan`.
- The `security.wrappers` option now requires to always specify an owner, group and whether the setuid/setgid bit should be set.
This is motivated by the fact that before NixOS 21.11, specifying either setuid or setgid but not owner/group resulted in wrappers owned by nobody/nogroup, which is unsafe.
- Since `iptables` now uses `nf_tables` backend and `ipset` doesn't support it, some applications (ferm, shorewall, firehol) may have limited functionality.
- The `paperless` module and package have been removed. All users should migrate to the
successor `paperless-ng` instead. The Paperless project [has been
archived](https://github.com/the-paperless-project/paperless/commit/9b0063c9731f7c5f65b1852cb8caff97f5e40ba4)
@ -173,7 +212,7 @@ In addition to numerous new and upgraded packages, this release has the followin
Superuser created successfully.
```
- The `staticjinja` package has been upgraded from 1.0.4 to 4.1.0
- The `staticjinja` package has been upgraded from 1.0.4 to 4.1.1
- Firefox v91 does not support addons with invalid signature anymore. Firefox ESR needs to be used for nix addon support.
@ -202,6 +241,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.geoip-updater` was broken and has been replaced by [services.geoipupdate](options.html#opt-services.geoipupdate.enable).
- `ihatemoney` has been updated to version 5.1.1 ([release notes](https://github.com/spiral-project/ihatemoney/blob/5.1.1/CHANGELOG.rst)). If you serve ihatemoney by HTTP rather than HTTPS, you must set [services.ihatemoney.secureCookie](options.html#opt-services.ihatemoney.secureCookie) to `false`.
- PHP 7.3 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 21.11 release.
- Those making use of `buildBazelPackage` will need to regenerate the fetch hashes (preferred), or set `fetchConfigured = false;`.
@ -355,23 +396,38 @@ In addition to numerous new and upgraded packages, this release has the followin
- `boot.kernelParams` now only accepts one command line parameter per string. This change is aimed to reduce common mistakes like "param = 12", which would be parsed as 3 parameters.
- `nix.daemonNiceLevel` and `nix.daemonIONiceLevel` have been removed in favour of the new options [`nix.daemonCPUSchedPolicy`](options.html#opt-nix.daemonCPUSchedPolicy), [`nix.daemonIOSchedClass`](options.html#opt-nix.daemonIOSchedClass) and [`nix.daemonIOSchedPriority`](options.html#opt-nix.daemonIOSchedPriority). Please refer to the options documentation and the `sched(7)` and `ioprio_set(2)` man pages for guidance on how to use them.
- The `coursier` package's binary was renamed from `coursier` to `cs`. Completions which haven't worked for a while should now work with the renamed binary. To keep using `coursier`, you can create a shell alias.
- The `services.mosquitto` module has been rewritten to support multiple listeners and per-listener configuration.
Module configurations from previous releases will no longer work and must be updated.
- The `fluidsynth_1` attribute has been removed, as this legacy version is no longer needed in nixpkgs. The actively maintained 2.x series is available as `fluidsynth` unchanged.
- Nextcloud 20 (`pkgs.nextcloud20`) has been dropped because it was EOLed by upstream in 2021-10.
- The `virtualisation.pathsInNixDB` option was renamed
[`virtualisation.additionalPaths`](options.html#opt-virtualisation.additionalPaths).
- The `services.ddclient.password` option was removed, and replaced with `services.ddclient.passwordFile`.
- The default GNAT version has been changed: The `gnat` attribute now points to `gnat11`
instead of `gnat9`.
- `retroArchCores` has been removed. This means that using `nixpkgs.config.retroarch` to customize RetroArch cores is not supported anymore. Instead, use package overrides, for example: `retroarch.override { cores = with libretro; [ citra snes9x ]; };`. Also, `retroarchFull` derivation is available for those who want to have all RetroArch cores available.
## Other Notable Changes {#sec-release-21.11-notable-changes}
- The linux kernel package infrastructure was moved out of `all-packages.nix`, and restructured. Linux related functions and attributes now live under the `pkgs.linuxKernel` attribute set.
In particular the versioned `linuxPackages_*` package sets (such as `linuxPackages_5_4`) and kernels from `pkgs` were moved there and now live under `pkgs.linuxKernel.packages.*`. The unversioned ones (such as `linuxPackages_latest`) remain untouched.
- In NixOS virtual machines (QEMU), the `virtualisation` module has been updated with new options to configure:
- IPv4 port forwarding ([`virtualisation.forwardPorts`](options.html#opt-virtualisation.forwardPorts)),
- shared host directories ([`virtualisation.sharedDirectories`](options.html#opt-virtualisation.sharedDirectories)),
- screen resolution ([`virtualisation.resolution`](options.html#opt-virtualisation.resolution)).
- In NixOS virtual machines (QEMU), the `virtualisation` module has been updated with new options:
- [`forwardPorts`](options.html#opt-virtualisation.forwardPorts) to configure IPv4 port forwarding,
- [`sharedDirectories`](options.html#opt-virtualisation.sharedDirectories) to set up shared host directories,
- [`resolution`](options.html#opt-virtualisation.resolution) to set the screen resolution,
- [`useNixStoreImage`](options.html#opt-virtualisation.useNixStoreImage) to use a disk image for the Nix store instead of 9P.
In addition, the default [`msize`](options.html#opt-virtualisation.msize) parameter in 9P filesystems (including /nix/store and all shared directories) has been increased to 16K for improved performance.
@ -433,6 +489,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [networking.wireless.iwd](options.html#opt-networking.wireless.iwd.enable) module has a new [networking.wireless.iwd.settings](options.html#opt-networking.wireless.iwd.settings) option.
- The [services.smokeping.host](options.html#opt-services.smokeping.host) option was added and defaulted to `localhost`. Before, `smokeping` listened to all interfaces by default. NixOS defaults generally aim to provide non-Internet-exposed defaults for databases and internal monitoring tools, see e.g. [#100192](https://github.com/NixOS/nixpkgs/issues/100192). Further, the systemd service for `smokeping` got reworked defaults for increased operational stability, see [PR #144127](https://github.com/NixOS/nixpkgs/pull/144127) for details.
- The [services.syncoid.enable](options.html#opt-services.syncoid.enable) module now properly drops ZFS permissions after usage. Before it delegated permissions to whole pools instead of datasets and didn't clean up after execution. You can manually look this up for your pools by running `zfs allow your-pool-name` and use `zfs unallow syncoid your-pool-name` to clean this up.
- Zfs: `latestCompatibleLinuxPackages` is now exported on the zfs package. One can use `boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;` to always track the latest compatible kernel with a given version of zfs.
@ -464,11 +522,34 @@ In addition to numerous new and upgraded packages, this release has the followin
- `networking.sits` now supports Foo-over-UDP encapsulation.
- Changing systemd `.socket` units now restarts them and stops the service that is activated by them. Additionally, services with `stopOnChange = false` don't break anymore when they are socket-activated.
- The `virtualisation.libvirtd` module has been refactored and updated with new options:
- `virtualisation.libvirtd.qemu*` options (e.g.: `virtualisation.libvirtd.qemuRunAsRoot`) were moved to [`virtualisation.libvirtd.qemu`](options.html#opt-virtualisation.libvirtd.qemu) submodule,
- software TPM1/TPM2 support (e.g.: Windows 11 guests) ([`virtualisation.libvirtd.qemu.swtpm`](options.html#opt-virtualisation.libvirtd.qemu.swtpm)),
- custom OVMF package (e.g.: `pkgs.OVMFFull` with HTTP, CSM and Secure Boot support) ([`virtualisation.libvirtd.qemu.ovmf.package`](options.html#opt-virtualisation.libvirtd.qemu.ovmf.package)).
- The `cawbird` Twitter client now uses its own API keys to count as different application than upstream builds. This is done to evade application-level rate limiting. While existing accounts continue to work, users may want to remove and re-register their account in the client to enjoy a better user experience and benefit from this change.
- A new option `services.prometheus.enableReload` has been added which can be enabled to reload the prometheus service when its config file changes instead of restarting.
- The option `services.prometheus.environmentFile` has been removed since it was causing [issues](https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`.
- Dokuwiki now supports caddy! However
- the nginx option has been removed, in the new configuration, please use the `dokuwiki.webserver = "nginx"` instead.
- The "${hostname}" option has been deprecated, please use `dokuwiki.sites = [ "${hostname}" ]` instead
- The [services.unifi](options.html#opt-services.unifi.enable) module has been reworked, solving a number of issues. This leads to several user facing changes:
- The `services.unifi.dataDir` option is removed and the data is now always located under `/var/lib/unifi/data`. This is done to make better use of systemd state direcotiry and thus making the service restart more reliable.
- The unifi logs can now be found under: `/var/log/unifi` instead of `/var/lib/unifi/logs`.
- The unifi run directory can now be found under: `/run/unifi` instead of `/var/lib/unifi/run`.
- `security.pam.services.<name>.makeHomeDir` now uses `umask=0077` instead of `umask=0022` when creating the home directory.
- Loki has had another release. Some default values have been changed for the configuration and some configuration options have been renamed. For more details, please check [the upgrade guide](https://grafana.com/docs/loki/latest/upgrading/#240).
- `julia` now refers to `julia-stable` instead of `julia-lts`. In practice this means it has been upgraded from `1.0.4` to `1.5.4`.
- RetroArch has been upgraded from version `1.8.5` to `1.9.13.2`. Since the previous release was quite old, if you're having issues after the upgrade, please delete your `$XDG_CONFIG_HOME/retroarch/retroarch.cfg` file.
- hydrus has been upgraded from version `438` to `463`. Since upgrading between releases this old is advised against, be sure to have a backup of your data before upgrading. For details, see [the hydrus manual](https://hydrusnetwork.github.io/hydrus/help/getting_started_installing.html#big_updates).
- More jdk and jre versions are now exposed via `java-packages.compiler`.

View file

@ -0,0 +1,35 @@
# Release 22.05 (“Quokka”, 2022.05/??) {#sec-release-22.05}
In addition to numerous new and upgraded packages, this release has the following highlights:
- Support is planned until the end of December 2022, handing over to 22.11.
## Highlights {#sec-release-22.05-highlights}
## New Services {#sec-release-22.05-new-services}
## Backward Incompatibilities {#sec-release-22.05-incompatibilities}
- `pkgs.ghc` now refers to `pkgs.targetPackages.haskellPackages.ghc`.
This *only* makes a difference if you are cross-compiling and will
ensure that `pkgs.ghc` always runs on the host platform and compiles
for the target platform (similar to `pkgs.gcc` for example).
`haskellPackages.ghc` still behaves as before, running on the build
platform and compiling for the host platform (similar to `stdenv.cc`).
This means you don't have to adjust your derivations if you use
`haskellPackages.callPackage`, but when using `pkgs.callPackage` and
taking `ghc` as an input, you should now use `buildPackages.ghc`
instead to ensure cross compilation keeps working (or switch to
`haskellPackages.callPackage`).
- `pkgs.emacsPackages.orgPackages` is removed because org elpa is deprecated.
The packages in the top level of `pkgs.emacsPackages`, such as org and
org-contrib, refer to the ones in `pkgs.emacsPackages.elpaPackages` and
`pkgs.emacsPackages.nongnuPackages` where the new versions will release.
- The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
- `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
Please switch to `claws-mail`, which is Claws Mail's latest release based on GTK+3 and Python 3.
## Other Notable Changes {#sec-release-22.05-notable-changes}

View file

@ -51,23 +51,28 @@ let
};
};
in rec {
# Merge the option definitions in all modules, forming the full
# system configuration.
inherit (lib.evalModules {
noUserModules = lib.evalModules {
inherit prefix check;
modules = baseModules ++ extraModules ++ [ pkgsModule ] ++ modules;
modules = baseModules ++ extraModules ++ [ pkgsModule ];
args = extraArgs;
specialArgs =
{ modulesPath = builtins.toString ../modules; } // specialArgs;
}) config options _module;
};
# These are the extra arguments passed to every module. In
# particular, Nixpkgs is passed through the "pkgs" argument.
extraArgs = extraArgs_ // {
inherit baseModules extraModules modules;
inherit noUserModules baseModules extraModules modules;
};
in rec {
# Merge the option definitions in all modules, forming the full
# system configuration.
inherit (noUserModules.extendModules { inherit modules; })
config options _module type;
inherit extraArgs;
inherit (_module.args) pkgs;
}

View file

@ -44,11 +44,14 @@
# most likely fails as GRUB will probably refuse to install.
partitionTableType ? "legacy"
, # Whether to invoke `switch-to-configuration boot` during image creation
installBootLoader ? true
, # The root file system type.
fsType ? "ext4"
, # Filesystem label
label ? "nixos"
label ? if onlyNixStore then "nix-store" else "nixos"
, # The initial NixOS configuration file to be copied to
# /etc/nixos/configuration.nix.
@ -57,10 +60,24 @@
, # Shell code executed after the VM has finished.
postVM ? ""
, # Copy the contents of the Nix store to the root of the image and
# skip further setup. Incompatible with `contents`,
# `installBootLoader` and `configFile`.
onlyNixStore ? false
, name ? "nixos-disk-image"
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
format ? "raw"
, # Whether a nix channel based on the current source tree should be
# made available inside the image. Useful for interactive use of nix
# utils, but changes the hash of the image when the sources are
# updated.
copyChannel ? true
, # Additional store paths to copy to the image's store.
additionalPaths ? []
}:
assert partitionTableType == "legacy" || partitionTableType == "legacy+gpt" || partitionTableType == "efi" || partitionTableType == "hybrid" || partitionTableType == "none";
@ -71,6 +88,7 @@ assert lib.all
(attrs: ((attrs.user or null) == null)
== ((attrs.group or null) == null))
contents;
assert onlyNixStore -> contents == [] && configFile == null && !installBootLoader;
with lib;
@ -163,7 +181,14 @@ let format' = format; in let
users = map (x: x.user or "''") contents;
groups = map (x: x.group or "''") contents;
closureInfo = pkgs.closureInfo { rootPaths = [ config.system.build.toplevel channelSources ]; };
basePaths = [ config.system.build.toplevel ]
++ lib.optional copyChannel channelSources;
additionalPaths' = subtractLists basePaths additionalPaths;
closureInfo = pkgs.closureInfo {
rootPaths = basePaths ++ additionalPaths';
};
blockSize = toString (4 * 1024); # ext4fs block size (not block device sector size)
@ -251,7 +276,13 @@ let format' = format; in let
chmod 755 "$TMPDIR"
echo "running nixos-install..."
nixos-install --root $root --no-bootloader --no-root-passwd \
--system ${config.system.build.toplevel} --channel ${channelSources} --substituters ""
--system ${config.system.build.toplevel} \
${if copyChannel then "--channel ${channelSources}" else "--no-channel-copy"} \
--substituters ""
${optionalString (additionalPaths' != []) ''
nix --extra-experimental-features nix-command copy --to $root --no-check-sigs ${concatStringsSep " " additionalPaths'}
''}
diskImage=nixos.raw
@ -320,25 +351,29 @@ let format' = format; in let
''}
echo "copying staging root to image..."
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* / ||
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} \
-t ${fsType} \
-i $diskImage \
$root${optionalString onlyNixStore builtins.storeDir}/* / ||
(echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
'';
in pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name
{ preVM = prepareImage;
moveOrConvertImage = ''
${if format == "raw" then ''
mv $diskImage $out/${filename}
'' else ''
${pkgs.qemu}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
''}
diskImage=$out/${filename}
'';
buildImage = pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name {
preVM = prepareImage;
buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
postVM = ''
${if format == "raw" then ''
mv $diskImage $out/${filename}
'' else ''
${pkgs.qemu}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
''}
diskImage=$out/${filename}
${postVM}
'';
postVM = moveOrConvertImage + postVM;
memSize = 1024;
}
''
} ''
export PATH=${binPath}:$PATH
rootDisk=${if partitionTableType != "none" then "/dev/vda${rootPartition}" else "/dev/vda"}
@ -368,11 +403,13 @@ in pkgs.vmTools.runInLinuxVM (
cp ${configFile} /mnt/etc/nixos/configuration.nix
''}
# Set up core system link, GRUB, etc.
NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
${lib.optionalString installBootLoader ''
# Set up core system link, GRUB, etc.
NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
# The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
rm -f $mountPoint/etc/machine-id
# The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
rm -f $mountPoint/etc/machine-id
''}
# Set the ownerships of the contents. The modes are set in preVM.
# No globbing on targets, so no need to set -f
@ -398,4 +435,9 @@ in pkgs.vmTools.runInLinuxVM (
tune2fs -T now -c 0 -i 0 $rootDisk
''}
''
)
);
in
if onlyNixStore then
pkgs.runCommand name {}
(prepareImage + moveOrConvertImage + postVM)
else buildImage

View file

@ -241,7 +241,7 @@ let
pkgs.vmTools.override {
rootModules =
[ "zfs" "9p" "9pnet_virtio" "virtio_pci" "virtio_blk" ] ++
(pkgs.lib.optional (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) "rtc_cmos");
(pkgs.lib.optional pkgs.stdenv.hostPlatform.isx86 "rtc_cmos");
kernel = modulesTree;
}
).runInLinuxVM (

View file

@ -17,12 +17,12 @@ rec {
''-netdev vde,id=vlan${toString nic},sock="$QEMU_VDE_SOCKET_${toString net}"''
];
qemuSerialDevice = if pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64 then "ttyS0"
qemuSerialDevice = if pkgs.stdenv.hostPlatform.isx86 then "ttyS0"
else if (with pkgs.stdenv.hostPlatform; isAarch32 || isAarch64 || isPower) then "ttyAMA0"
else throw "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
qemuBinary = qemuPkg: {
x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu qemu64";
armv7l-linux = "${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host";
aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host";
powerpc64le-linux = "${qemuPkg}/bin/qemu-system-ppc64 -machine powernv";

View file

@ -4,6 +4,7 @@ from queue import Queue, Empty
from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List, Iterable
from xml.sax.saxutils import XMLGenerator
from colorama import Style
from pathlib import Path
import queue
import io
import threading
@ -11,7 +12,6 @@ import argparse
import base64
import codecs
import os
import pathlib
import ptpython.repl
import pty
import re
@ -171,7 +171,7 @@ class Logger:
yield
self.drain_log_queue()
toc = time.time()
self.log("({:.2f} seconds)".format(toc - tic))
self.log("(finished: {}, in {:.2f} seconds)".format(message, toc - tic))
self.xml.endElement("nest")
@ -239,8 +239,8 @@ class StartCommand:
def cmd(
self,
monitor_socket_path: pathlib.Path,
shell_socket_path: pathlib.Path,
monitor_socket_path: Path,
shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy?
) -> str:
display_opts = ""
@ -272,8 +272,8 @@ class StartCommand:
@staticmethod
def build_environment(
state_dir: pathlib.Path,
shared_dir: pathlib.Path,
state_dir: Path,
shared_dir: Path,
) -> dict:
# We make a copy to not update the current environment
env = dict(os.environ)
@ -288,10 +288,10 @@ class StartCommand:
def run(
self,
state_dir: pathlib.Path,
shared_dir: pathlib.Path,
monitor_socket_path: pathlib.Path,
shell_socket_path: pathlib.Path,
state_dir: Path,
shared_dir: Path,
monitor_socket_path: Path,
shell_socket_path: Path,
) -> subprocess.Popen:
return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path),
@ -334,7 +334,7 @@ class LegacyStartCommand(StartCommand):
self,
netBackendArgs: Optional[str] = None,
netFrontendArgs: Optional[str] = None,
hda: Optional[Tuple[pathlib.Path, str]] = None,
hda: Optional[Tuple[Path, str]] = None,
cdrom: Optional[str] = None,
usb: Optional[str] = None,
bios: Optional[str] = None,
@ -394,11 +394,11 @@ class Machine:
the machine lifecycle with the help of a start script / command."""
name: str
tmp_dir: pathlib.Path
shared_dir: pathlib.Path
state_dir: pathlib.Path
monitor_path: pathlib.Path
shell_path: pathlib.Path
tmp_dir: Path
shared_dir: Path
state_dir: Path
monitor_path: Path
shell_path: Path
start_command: StartCommand
keep_vm_state: bool
@ -421,7 +421,7 @@ class Machine:
def __init__(
self,
tmp_dir: pathlib.Path,
tmp_dir: Path,
start_command: StartCommand,
name: str = "machine",
keep_vm_state: bool = False,
@ -463,7 +463,7 @@ class Machine:
hda = None
if args.get("hda"):
hda_arg: str = args.get("hda", "")
hda_arg_path: pathlib.Path = pathlib.Path(hda_arg)
hda_arg_path: Path = Path(hda_arg)
hda = (hda_arg_path, args.get("hdaInterface", ""))
return LegacyStartCommand(
netBackendArgs=args.get("netBackendArgs"),
@ -490,23 +490,24 @@ class Machine:
return rootlog.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
with self.nested("waiting for monitor prompt"):
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
message = ("{}\n".format(command)).encode()
self.log("sending monitor command: {}".format(command))
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
with self.nested("sending monitor command: {}".format(command)):
message = ("{}\n".format(command)).encode()
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
@ -533,7 +534,12 @@ class Machine:
return state == "active"
retry(check_active)
with self.nested(
"waiting for unit {}{}".format(
unit, f" with user {user}" if user is not None else ""
)
):
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
@ -581,24 +587,45 @@ class Machine:
+ "'{}' but it is in state {}".format(require_state, state)
)
def execute(self, command: str) -> Tuple[int, str]:
def _next_newline_closed_block_from_shell(self) -> str:
assert self.shell
output_buffer = []
while True:
# This receives up to 4096 bytes from the socket
chunk = self.shell.recv(4096)
if not chunk:
# Probably a broken pipe, return the output we have
break
decoded = chunk.decode()
output_buffer += [decoded]
if decoded[-1] == "\n":
break
return "".join(output_buffer)
def execute(
self, command: str, check_return: bool = True, timeout: Optional[int] = 900
) -> Tuple[int, str]:
self.connect()
out_command = "( set -euo pipefail; {} ); echo '|!=EOF' $?\n".format(command)
if timeout is not None:
command = "timeout {} sh -c {}".format(timeout, shlex.quote(command))
out_command = f"( set -euo pipefail; {command} ) | (base64 --wrap 0; echo)\n"
assert self.shell
self.shell.send(out_command.encode())
output = ""
status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
# Get the output
output = base64.b64decode(self._next_newline_closed_block_from_shell())
while True:
chunk = self.shell.recv(4096).decode(errors="ignore")
match = status_code_pattern.match(chunk)
if match:
output += match[1]
status_code = int(match[2])
return (status_code, output)
output += chunk
if not check_return:
return (-1, output.decode())
# Get the return code
self.shell.send("echo ${PIPESTATUS[0]}\n".encode())
rc = int(self._next_newline_closed_block_from_shell().strip())
return (rc, output.decode())
def shell_interact(self) -> None:
"""Allows you to interact with the guest shell
@ -613,12 +640,12 @@ class Machine:
pass_fds=[self.shell.fileno()],
)
def succeed(self, *commands: str) -> str:
def succeed(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command)
(status, out) = self.execute(command, timeout=timeout)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
@ -627,12 +654,12 @@ class Machine:
output += out
return output
def fail(self, *commands: str) -> str:
def fail(self, *commands: str, timeout: Optional[int] = None) -> str:
"""Execute each command and check that it fails."""
output = ""
for command in commands:
with self.nested("must fail: {}".format(command)):
(status, out) = self.execute(command)
(status, out) = self.execute(command, timeout=timeout)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
@ -648,14 +675,14 @@ class Machine:
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
status, output = self.execute(command, timeout=timeout)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success, timeout)
return output
def wait_until_fails(self, command: str) -> str:
def wait_until_fails(self, command: str, timeout: int = 900) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
@ -663,7 +690,7 @@ class Machine:
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
status, output = self.execute(command, timeout=timeout)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
@ -736,7 +763,8 @@ class Machine:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
retry(port_is_closed)
with self.nested("waiting for TCP port {} to be closed"):
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
@ -798,12 +826,12 @@ class Machine:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = pathlib.Path(source)
vm_target = pathlib.Path(target)
host_src = Path(source)
vm_target = Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = pathlib.Path(shared_td)
shared_temp = Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
@ -820,11 +848,11 @@ class Machine:
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
out_dir = pathlib.Path(os.environ.get("out", os.getcwd()))
vm_src = pathlib.Path(source)
out_dir = Path(os.environ.get("out", os.getcwd()))
vm_src = Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = pathlib.Path(shared_td)
vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
shared_temp = Path(shared_td)
vm_shared_temp = Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
@ -870,24 +898,25 @@ class Machine:
retry(screen_matches)
def wait_for_console_text(self, regex: str) -> None:
self.log("waiting for {} to appear on console".format(regex))
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
with self.nested("waiting for {} to appear on console".format(regex)):
# Buffer the console output, this is needed
# to match multiline regexes.
console = io.StringIO()
while True:
try:
console.write(self.last_lines.get())
except queue.Empty:
self.sleep(1)
continue
console.seek(0)
matches = re.search(regex, console.read())
if matches is not None:
return
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
time.sleep(0.01)
def start(self) -> None:
if self.booted:
@ -895,12 +924,12 @@ class Machine:
self.log("starting vm")
def clear(path: pathlib.Path) -> pathlib.Path:
def clear(path: Path) -> Path:
if path.exists():
path.unlink()
return path
def create_socket(path: pathlib.Path) -> socket.socket:
def create_socket(path: Path) -> socket.socket:
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(str(path))
s.listen(1)
@ -998,7 +1027,7 @@ class Machine:
)
return any(pattern.search(name) for name in names)
with self.nested("Waiting for a window to appear"):
with self.nested("waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
@ -1045,7 +1074,7 @@ class VLan:
"""
nr: int
socket_dir: pathlib.Path
socket_dir: Path
process: subprocess.Popen
pid: int
@ -1054,7 +1083,7 @@ class VLan:
def __repr__(self) -> str:
return f"<Vlan Nr. {self.nr}>"
def __init__(self, nr: int, tmp_dir: pathlib.Path):
def __init__(self, nr: int, tmp_dir: Path):
self.nr = nr
self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
@ -1107,7 +1136,7 @@ class Driver:
):
self.tests = tests
tmp_dir = pathlib.Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
with rootlog.nested("start all VLans"):
@ -1167,9 +1196,11 @@ class Driver:
serial_stdout_on=self.serial_stdout_on,
Machine=Machine, # for typing
)
machine_symbols = {
m.name: self.machines[idx] for idx, m in enumerate(self.machines)
}
machine_symbols = {m.name: m for m in self.machines}
# If there's exactly one machine, make it available under the name
# "machine", even if it's not called that.
if len(self.machines) == 1:
(machine_symbols["machine"],) = self.machines
vlan_symbols = {
f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
}
@ -1214,7 +1245,7 @@ class Driver:
"Using legacy create_machine(), please instantiate the"
"Machine class directly, instead"
)
tmp_dir = pathlib.Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
tmp_dir.mkdir(mode=0o700, exist_ok=True)
if args.get("startCommand"):
@ -1300,7 +1331,7 @@ if __name__ == "__main__":
action=EnvDefault,
envvar="testScript",
help="the test script to run",
type=pathlib.Path,
type=Path,
)
args = arg_parser.parse_args()

View file

@ -134,7 +134,9 @@ rec {
vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
nodeHostNames = map (c: c.config.system.name) (lib.attrValues nodes);
nodeHostNames = let
nodesList = map (c: c.config.system.name) (lib.attrValues nodes);
in nodesList ++ lib.optional (lib.length nodesList == 1) "machine";
# TODO: This is an implementation error and needs fixing
# the testing famework cannot legitimately restrict hostnames further
@ -209,11 +211,41 @@ rec {
let
nodes = qemu_pkg:
let
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript
then testScript { nodes = nodes qemu_pkg; }
else testScript;
build-vms = import ./build-vms.nix {
inherit system lib pkgs minimal specialArgs;
extraConfigurations = extraConfigurations ++ [(
{ config, ... }:
{
virtualisation.qemu.package = qemu_pkg;
# Make sure all derivations referenced by the test
# script are available on the nodes. When the store is
# accessed through 9p, this isn't important, since
# everything in the store is available to the guest,
# but when building a root image it is, as all paths
# that should be available to the guest has to be
# copied to the image.
virtualisation.additionalPaths =
lib.optional
# A testScript may evaluate nodes, which has caused
# infinite recursions. The demand cycle involves:
# testScript -->
# nodes -->
# toplevel -->
# additionalPaths -->
# hasContext testScript' -->
# testScript (ad infinitum)
# If we don't need to build an image, we can break this
# cycle by short-circuiting when useNixStoreImage is false.
(config.virtualisation.useNixStoreImage && builtins.hasContext testScript')
(pkgs.writeStringReferencesToFile testScript');
# Ensure we do not use aliases. Ideally this is only set
# when the test framework is used by Nixpkgs NixOS tests.
nixpkgs.config.allowAliases = false;

View file

@ -10,7 +10,7 @@ rec {
# Check whenever fileSystem is needed for boot. NOTE: Make sure
# pathsNeededForBoot is closed under the parent relationship, i.e. if /a/b/c
# is in the list, put /a and /a/b in as well.
pathsNeededForBoot = [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/var/lib/nixos" "/etc" ];
pathsNeededForBoot = [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/var/lib/nixos" "/etc" "/usr" ];
fsNeededForBoot = fs: fs.neededForBoot || elem fs.mountPoint pathsNeededForBoot;
# Check whenever `b` depends on `a` as a fileSystem

View file

@ -4,6 +4,7 @@ with lib;
let
cfg = config.amazonImage;
amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios";
in {
@ -106,10 +107,12 @@ in {
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg root_logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg root "$rootDisk" \
--arg boot "$bootDisk" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .disks.boot.logical_bytes = $boot_logical_bytes
| .disks.boot.file = $boot
@ -145,9 +148,11 @@ in {
--arg system_label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg boot_mode "${amiBootMode}" \
--arg file "$diskImage" \
'{}
| .label = $system_label
| .boot_mode = $boot_mode
| .system = $system
| .logical_bytes = $logical_bytes
| .file = $file

View file

@ -1,6 +1,9 @@
#!/usr/bin/env nix-shell
#!nix-shell -p awscli -p jq -p qemu -i bash
# shellcheck shell=bash
#
# Future Deprecation?
# This entire thing should probably be replaced with a generic terraform config
# Uploads and registers NixOS images built from the
# <nixos/release.nix> amazonImage attribute. Images are uploaded and
@ -65,13 +68,18 @@ read_image_info() {
# We handle a single image per invocation, store all attributes in
# globals for convenience.
zfs_disks=$(read_image_info .disks)
image_label="$(read_image_info .label)${zfs_disks:+-ZFS}"
is_zfs_image=
if jq -e .boot <<< "$zfs_disks"; then
is_zfs_image=1
zfs_boot=".disks.boot"
fi
image_label="$(read_image_info .label)${is_zfs_image:+-ZFS}"
image_system=$(read_image_info .system)
image_files=( $(read_image_info "${zfs_disks:+.disks.root}.file") )
image_files=( $(read_image_info ".disks.root.file") )
image_logical_bytes=$(read_image_info "${zfs_disks:+.disks.boot}.logical_bytes")
image_logical_bytes=$(read_image_info "${zfs_boot:-.disks.root}.logical_bytes")
if [[ -n "$zfs_disks" ]]; then
if [[ -n "$is_zfs_image" ]]; then
image_files+=( $(read_image_info .disks.boot.file) )
fi
@ -192,7 +200,7 @@ upload_image() {
for image_file in "${image_files[@]}"; do
local aws_path=${image_file#/}
if [[ -n "$zfs_disks" ]]; then
if [[ -n "$is_zfs_image" ]]; then
local suffix=${image_file%.*}
suffix=${suffix##*.}
fi
@ -239,7 +247,7 @@ upload_image() {
"DeviceName=/dev/xvda,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
)
if [[ -n "$zfs_disks" ]]; then
if [[ -n "$is_zfs_image" ]]; then
local root_snapshot_id=$(read_state "$region.$image_label.root.$image_system" snapshot_id)
local root_image_logical_bytes=$(read_image_info ".disks.root.logical_bytes")
@ -270,6 +278,7 @@ upload_image() {
--region "$region" \
--architecture $amazon_arch \
--block-device-mappings "${block_device_mappings[@]}" \
--boot-mode $(read_image_info .boot_mode) \
"${extra_flags[@]}" \
| jq -r '.ImageId'
)

View file

@ -0,0 +1,102 @@
# Edit this configuration file to define what should be installed on
# your system. Help is available in the configuration.nix(5) man page
# and in the NixOS manual (accessible by running nixos-help).
{ config, pkgs, lib, ... }:
with lib;
{
imports =
[ # Include the default lxd configuration.
../../../modules/virtualisation/lxc-container.nix
# Include the container-specific autogenerated configuration.
./lxd.nix
];
# networking.hostName = mkForce "nixos"; # Overwrite the hostname.
# networking.wireless.enable = true; # Enables wireless support via wpa_supplicant.
# Set your time zone.
# time.timeZone = "Europe/Amsterdam";
# The global useDHCP flag is deprecated, therefore explicitly set to false here.
# Per-interface useDHCP will be mandatory in the future, so this generated config
# replicates the default behaviour.
networking.useDHCP = false;
networking.interfaces.eth0.useDHCP = true;
# Configure network proxy if necessary
# networking.proxy.default = "http://user:password@proxy:port/";
# networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# Select internationalisation properties.
# i18n.defaultLocale = "en_US.UTF-8";
# console = {
# font = "Lat2-Terminus16";
# keyMap = "us";
# };
# Enable the X11 windowing system.
# services.xserver.enable = true;
# Configure keymap in X11
# services.xserver.layout = "us";
# services.xserver.xkbOptions = "eurosign:e";
# Enable CUPS to print documents.
# services.printing.enable = true;
# Enable sound.
# sound.enable = true;
# hardware.pulseaudio.enable = true;
# Enable touchpad support (enabled default in most desktopManager).
# services.xserver.libinput.enable = true;
# Define a user account. Don't forget to set a password with passwd.
# users.users.jane = {
# isNormalUser = true;
# extraGroups = [ "wheel" ]; # Enable sudo for the user.
# };
# List packages installed in system profile. To search, run:
# $ nix search wget
# environment.systemPackages = with pkgs; [
# vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
# wget
# firefox
# ];
# Some programs need SUID wrappers, can be configured further or are
# started in user sessions.
# programs.mtr.enable = true;
# programs.gnupg.agent = {
# enable = true;
# enableSSHSupport = true;
# };
# List services that you want to enable:
# Enable the OpenSSH daemon.
# services.openssh.enable = true;
# Open ports in the firewall.
# networking.firewall.allowedTCPPorts = [ ... ];
# networking.firewall.allowedUDPPorts = [ ... ];
# Or disable the firewall altogether.
# networking.firewall.enable = false;
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "21.05"; # Did you read the comment?
# As this is intended as a stadalone image, undo some of the minimal profile stuff
documentation.enable = true;
documentation.nixos.enable = true;
environment.noXlibs = false;
}

View file

@ -0,0 +1,34 @@
{ lib, config, pkgs, ... }:
with lib;
{
imports = [
../../../modules/virtualisation/lxc-container.nix
];
virtualisation.lxc.templates.nix = {
enable = true;
target = "/etc/nixos/lxd.nix";
template = ./nix.tpl;
when = [ "create" "copy" ];
};
# copy the config for nixos-rebuild
system.activationScripts.config = ''
if [ ! -e /etc/nixos/configuration.nix ]; then
mkdir -p /etc/nixos
cat ${./lxd-image-inner.nix} > /etc/nixos/configuration.nix
sed 's|../../../modules/virtualisation/lxc-container.nix|<nixpkgs/nixos/modules/virtualisation/lxc-container.nix>|g' -i /etc/nixos/configuration.nix
fi
'';
# Network
networking.useDHCP = false;
networking.interfaces.eth0.useDHCP = true;
# As this is intended as a stadalone image, undo some of the minimal profile stuff
documentation.enable = true;
documentation.nixos.enable = true;
environment.noXlibs = false;
}

View file

@ -0,0 +1,9 @@
{ lib, config, pkgs, ... }:
with lib;
# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY
{
networking.hostName = "{{ container.name }}";
}

View file

@ -39,6 +39,7 @@ in
decompressFonts = mkOption {
type = types.bool;
default = config.programs.xwayland.enable;
defaultText = literalExpression "config.programs.xwayland.enable";
description = ''
Whether to decompress fonts in
<filename>/run/current-system/sw/share/X11/fonts</filename>.

View file

@ -6,6 +6,7 @@ with lib;
gtk.iconCache.enable = mkOption {
type = types.bool;
default = config.services.xserver.enable;
defaultText = literalExpression "config.services.xserver.enable";
description = ''
Whether to build icon theme caches for GTK applications.
'';

View file

@ -14,6 +14,12 @@ with lib;
allLocales = any (x: x == "all") config.i18n.supportedLocales;
locales = config.i18n.supportedLocales;
};
defaultText = literalExpression ''
pkgs.buildPackages.glibcLocales.override {
allLocales = any (x: x == "all") config.i18n.supportedLocales;
locales = config.i18n.supportedLocales;
}
'';
example = literalExpression "pkgs.glibcLocales";
description = ''
Customized pkg.glibcLocales package.

View file

@ -47,6 +47,15 @@ let
'';
};
allowDiscards = mkOption {
default = false;
type = types.bool;
description = ''
Whether to allow TRIM requests to the underlying device. This option
has security implications; please read the LUKS documentation before
activating it.
'';
};
};
};
@ -194,7 +203,6 @@ in
];
# Create missing swapfiles.
# FIXME: support changing the size of existing swapfiles.
systemd.services =
let
@ -214,17 +222,14 @@ in
${optionalString (sw.size != null) ''
currentSize=$(( $(stat -c "%s" "${sw.device}" 2>/dev/null || echo 0) / 1024 / 1024 ))
if [ "${toString sw.size}" != "$currentSize" ]; then
fallocate -l ${toString sw.size}M "${sw.device}" ||
dd if=/dev/zero of="${sw.device}" bs=1M count=${toString sw.size}
if [ "${toString sw.size}" -lt "$currentSize" ]; then
truncate --size "${toString sw.size}M" "${sw.device}"
fi
dd if=/dev/zero of="${sw.device}" bs=1M count=${toString sw.size}
chmod 0600 ${sw.device}
${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"}
fi
''}
${optionalString sw.randomEncryption.enable ''
cryptsetup plainOpen -c ${sw.randomEncryption.cipher} -d ${sw.randomEncryption.source} ${optionalString (sw.discardPolicy != null) "--allow-discards"} ${sw.device} ${sw.deviceName}
cryptsetup plainOpen -c ${sw.randomEncryption.cipher} -d ${sw.randomEncryption.source} \
${optionalString sw.randomEncryption.allowDiscards "--allow-discards"} ${sw.device} ${sw.deviceName}
mkswap ${sw.realDevice}
''}
'';

View file

@ -558,6 +558,7 @@ in {
input.gid = ids.gids.input;
kvm.gid = ids.gids.kvm;
render.gid = ids.gids.render;
sgx.gid = ids.gids.sgx;
shadow.gid = ids.gids.shadow;
};

View file

@ -83,7 +83,7 @@ in {
b43Firmware_5_1_138
b43Firmware_6_30_163_46
b43FirmwareCutter
] ++ optional (pkgs.stdenv.hostPlatform.isi686 || pkgs.stdenv.hostPlatform.isx86_64) facetimehd-firmware;
] ++ optional pkgs.stdenv.hostPlatform.isx86 facetimehd-firmware;
})
(mkIf cfg.wirelessRegulatoryDatabase {
hardware.firmware = [ pkgs.wireless-regdb ];

View file

@ -0,0 +1,12 @@
{ config, lib, pkgs, ... }:
let
cfg = config.hardware.flirc;
in
{
options.hardware.flirc.enable = lib.mkEnableOption "software to configure a Flirc USB device";
config = lib.mkIf cfg.enable {
environment.systemPackages = [ pkgs.flirc ];
services.udev.packages = [ pkgs.flirc ];
};
}

View file

@ -116,19 +116,14 @@ in {
description = "Ensure NixOS-configured CUPS printers";
wantedBy = [ "multi-user.target" ];
requires = [ cupsUnit ];
# in contrast to cups.socket, for cups.service, this is actually not enough,
# as the cups service reports its activation before clients can actually interact with it.
# Because of this, commands like `lpinfo -v` will report a bad file descriptor
# due to the missing UNIX socket without sufficient sleep time.
after = [ cupsUnit ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
# sleep 10 is required to wait until cups.service is actually initialized and has created its UNIX socket file
script = (optionalString (!config.services.printing.startWhenNeeded) "sleep 10\n")
+ (concatMapStringsSep "\n" ensurePrinter cfg.ensurePrinters)
script = concatMapStringsSep "\n" ensurePrinter cfg.ensurePrinters
+ optionalString (cfg.ensureDefaultPrinter != null) (ensureDefaultPrinter cfg.ensureDefaultPrinter);
};
};

View file

@ -12,6 +12,5 @@ with lib;
boot.loader.systemd-boot.consoleMode = mkDefault "1";
# TODO Find reasonable defaults X11 & wayland
services.xserver.dpi = lib.mkDefault 192;
};
}

View file

@ -179,7 +179,7 @@ in
in mkIf enabled {
assertions = [
{
assertion = with config.services.xserver.displayManager; gdm.nvidiaWayland -> cfg.modesetting.enable;
assertion = with config.services.xserver.displayManager; (gdm.enable && gdm.nvidiaWayland) -> cfg.modesetting.enable;
message = "You cannot use wayland with GDM without modesetting enabled for NVIDIA drivers, set `hardware.nvidia.modesetting.enable = true`";
}
@ -284,13 +284,17 @@ in
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
};
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
environment.etc."egl/egl_external_platform.d".source =
"/run/opengl-driver/share/egl/egl_external_platform.d/";
hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out;
hardware.opengl.extraPackages32 = optional offloadCfg.enable nvidia_x11.lib32;
environment.systemPackages = [ nvidia_x11.bin ]
++ optionals nvidiaSettings [ nvidia_x11.settings ]
++ optionals cfg.nvidiaSettings [ nvidia_x11.settings ]
++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ];
systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;

View file

@ -467,7 +467,7 @@ let
throw "Unsupported architecture";
# Syslinux (and isolinux) only supports x86-based architectures.
canx86BiosBoot = pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64;
canx86BiosBoot = pkgs.stdenv.hostPlatform.isx86;
in

View file

@ -1,7 +1,7 @@
{
x86_64-linux = "/nix/store/nzp4m3cmm7wawk031byh8jg4cdzjq212-nix-2.3.16";
i686-linux = "/nix/store/zsaza9pwim617ak15fsc31lv65b9w3in-nix-2.3.16";
aarch64-linux = "/nix/store/7f6z40gyd405yd50qkyzwilnqw106bx8-nix-2.3.16";
x86_64-darwin = "/nix/store/c43kyri67ia8mibs0id5ara7gqwlkybf-nix-2.3.16";
aarch64-darwin = "/nix/store/6jwhak3cvsgnbqs540n27g8pxnk427fr-nix-2.3.16";
x86_64-linux = "/nix/store/hapw7q1fkjxvprnkcgw9ppczavg4daj2-nix-2.4";
i686-linux = "/nix/store/8qlvh8pp5j8wgrzj3is2jlbhgrwgsiy9-nix-2.4";
aarch64-linux = "/nix/store/h48lkygcqj4hdibbdnpl67q7ks6vkrd6-nix-2.4";
x86_64-darwin = "/nix/store/c3mvzszvyzakvcp9spnjvsb8m2bpjk7m-nix-2.4";
aarch64-darwin = "/nix/store/hbfqs62r0hga2yr4zi5kc7fzhf71bq9n-nix-2.4";
}

View file

@ -1,4 +1,5 @@
#! @runtimeShell@ -e
# shellcheck shell=bash
# Shows the usage of this command to the user
@ -29,12 +30,12 @@ while [ $# -gt 0 ]; do
nixBuildArgs+=("--option" "$1" "$2"); shift
;;
*)
if [ ! -z "$networkExpr" ]; then
if [ -n "$networkExpr" ]; then
echo "Network expression already set!"
showUsage
exit 1
fi
networkExpr="$(readlink -f $1)"
networkExpr="$(readlink -f "$1")"
;;
esac
@ -49,4 +50,4 @@ fi
# Build a network of VMs
nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
--argstr networkExpr $networkExpr "${nixBuildArgs[@]}"
--argstr networkExpr "$networkExpr" "${nixBuildArgs[@]}"

View file

@ -1,4 +1,5 @@
#! @runtimeShell@
# shellcheck shell=bash
set -e
@ -60,6 +61,35 @@ chmod 0755 "$mountPoint/dev" "$mountPoint/sys"
mount --rbind /dev "$mountPoint/dev"
mount --rbind /sys "$mountPoint/sys"
# modified from https://github.com/archlinux/arch-install-scripts/blob/bb04ab435a5a89cd5e5ee821783477bc80db797f/arch-chroot.in#L26-L52
chroot_add_resolv_conf() {
local chrootdir=$1 resolv_conf=$1/etc/resolv.conf
[[ -e /etc/resolv.conf ]] || return 0
# Handle resolv.conf as a symlink to somewhere else.
if [[ -L $chrootdir/etc/resolv.conf ]]; then
# readlink(1) should always give us *something* since we know at this point
# it's a symlink. For simplicity, ignore the case of nested symlinks.
# We also ignore the possibility if `../`s escaping the root.
resolv_conf=$(readlink "$chrootdir/etc/resolv.conf")
if [[ $resolv_conf = /* ]]; then
resolv_conf=$chrootdir$resolv_conf
else
resolv_conf=$chrootdir/etc/$resolv_conf
fi
fi
# ensure file exists to bind mount over
if [[ ! -f $resolv_conf ]]; then
install -Dm644 /dev/null "$resolv_conf" || return 1
fi
mount --bind /etc/resolv.conf "$resolv_conf"
}
chroot_add_resolv_conf "$mountPoint" || print "ERROR: failed to set up resolv.conf"
(
# If silent, write both stdout and stderr of activation script to /dev/null
# otherwise, write both streams to stderr of this process

View file

@ -91,6 +91,11 @@ sub hasCPUFeature {
}
sub cpuManufacturer {
my $id = shift;
return $cpuinfo =~ /^vendor_id\s*:.* $id$/m;
}
# Determine CPU governor to use
if (-e "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors") {
@ -111,6 +116,9 @@ if (-e "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors") {
push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
# Look at the PCI devices and add necessary modules. Note that most
# modules are auto-detected so we don't need to list them here.

View file

@ -1,4 +1,5 @@
#! @runtimeShell@
# shellcheck shell=bash
set -e
shopt -s nullglob
@ -58,7 +59,7 @@ while [ "$#" -gt 0 ]; do
--no-channel-copy)
noChannelCopy=1
;;
--no-root-passwd)
--no-root-password|--no-root-passwd)
noRootPasswd=1
;;
--no-bootloader)

View file

@ -1,4 +1,5 @@
#! @runtimeShell@
# shellcheck shell=bash
case "$1" in
-h|--help)

View file

@ -80,6 +80,10 @@ let
];
};
# list of man outputs currently active intended for use as default values
# for man-related options, thus "man" is included unconditionally.
activeManOutputs = [ "man" ] ++ lib.optionals cfg.dev.enable [ "devman" ];
in
{
@ -130,7 +134,7 @@ in
name = "man-paths";
paths = config.environment.systemPackages;
pathsToLink = [ "/share/man" ];
extraOutputsToInstall = ["man"];
extraOutputsToInstall = activeManOutputs;
ignoreCollisions = true;
};
defaultText = literalDocBook "all man pages in <option>config.environment.systemPackages</option>";
@ -226,7 +230,7 @@ in
(mkIf cfg.man.enable {
environment.systemPackages = [ pkgs.man-db ];
environment.pathsToLink = [ "/share/man" ];
environment.extraOutputsToInstall = [ "man" ] ++ optional cfg.dev.enable "devman";
environment.extraOutputsToInstall = activeManOutputs;
environment.etc."man_db.conf".text =
let
manualCache = pkgs.runCommandLocal "man-cache" { } ''

View file

@ -351,6 +351,7 @@ in
hqplayer = 319;
moonraker = 320;
distcc = 321;
webdav = 322;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -638,7 +639,7 @@ in
qemu-libvirtd = 301;
kvm = 302; # default udev rules from systemd requires these
render = 303; # default udev rules from systemd requires these
# zeronet = 304; # removed 2019-01-03
sgx = 304; # default udev rules from systemd requires these
lirc = 305;
lidarr = 306;
slurm = 307;
@ -656,6 +657,7 @@ in
hqplayer = 319;
moonraker = 320;
distcc = 321;
webdav = 322;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View file

@ -84,12 +84,15 @@ in {
"bdev"
"binfmt"
"binfmt_misc"
"ceph"
"cgroup"
"cgroup2"
"cifs"
"coda"
"configfs"
"cramfs"
"cpuset"
"curlftpfs"
"debugfs"
"devfs"
"devpts"
@ -101,6 +104,13 @@ in {
"ftpfs"
"fuse"
"fusectl"
"fusesmb"
"fuse.ceph"
"fuse.glusterfs"
"fuse.gvfsd-fuse"
"fuse.mfs"
"fuse.rclone"
"fuse.rozofs"
"fuse.sshfs"
"gfs"
"gfs2"
@ -110,9 +120,15 @@ in {
"iso9660"
"jffs2"
"lustre"
"lustre_lite"
"misc"
"mfs"
"mqueue"
"ncpfs"
"nfs"
"NFS"
"nfs4"
"nfsd"
"nnpfs"
"ocfs"
"ocfs2"
@ -127,16 +143,14 @@ in {
"smbfs"
"sockfs"
"spufs"
"nfs"
"NFS"
"nfs4"
"nfsd"
"sshfs"
"subfs"
"supermount"
"sysfs"
"tmpfs"
"tracefs"
"ubifs"
"udev"
"udf"
"usbfs"
"vboxsf"
@ -149,7 +163,7 @@ in {
prunePaths = mkOption {
type = listOf path;
default = ["/tmp" "/var/tmp" "/var/cache" "/var/lock" "/var/run" "/var/spool" "/nix/store"];
default = [ "/tmp" "/var/tmp" "/var/cache" "/var/lock" "/var/run" "/var/spool" "/nix/store" "/nix/var/log/nix" ];
description = ''
Which paths to exclude from indexing
'';
@ -157,7 +171,7 @@ in {
pruneNames = mkOption {
type = listOf str;
default = [];
default = [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
description = ''
Directory components which should exclude paths containing them from indexing
'';
@ -202,7 +216,7 @@ in {
PRUNEFS="${lib.concatStringsSep " " cfg.pruneFS}"
PRUNENAMES="${lib.concatStringsSep " " cfg.pruneNames}"
PRUNEPATHS="${lib.concatStringsSep " " cfg.prunePaths}"
PRUNE_BIND_MOUNTSFR="${lib.boolToString cfg.pruneBindMounts}"
PRUNE_BIND_MOUNTS="${if cfg.pruneBindMounts then "yes" else "no"}"
'';
};
};

View file

@ -37,7 +37,7 @@ in
type = listOfMaintainers;
internal = true;
default = [];
example = [ lib.maintainers.all ];
example = literalExpression ''[ lib.maintainers.all ]'';
description = ''
List of maintainers of each module. This option should be defined at
most once per module.

View file

@ -49,6 +49,7 @@
./hardware/digitalbitbox.nix
./hardware/device-tree.nix
./hardware/gkraken.nix
./hardware/flirc.nix
./hardware/i2c.nix
./hardware/sensor/hddtemp.nix
./hardware/sensor/iio.nix
@ -128,6 +129,7 @@
./programs/cdemu.nix
./programs/chromium.nix
./programs/clickshare.nix
./programs/cnping.nix
./programs/command-not-found/command-not-found.nix
./programs/criu.nix
./programs/dconf.nix
@ -390,6 +392,7 @@
./services/display-managers/greetd.nix
./services/editors/emacs.nix
./services/editors/infinoted.nix
./services/finance/odoo.nix
./services/games/crossfire-server.nix
./services/games/deliantra-server.nix
./services/games/factorio.nix
@ -420,6 +423,7 @@
./services/hardware/pcscd.nix
./services/hardware/pommed.nix
./services/hardware/power-profiles-daemon.nix
./services/hardware/rasdaemon.nix
./services/hardware/ratbagd.nix
./services/hardware/sane.nix
./services/hardware/sane_extra_backends/brscan4.nix
@ -482,6 +486,9 @@
./services/mail/roundcube.nix
./services/mail/sympa.nix
./services/mail/nullmailer.nix
./services/matrix/mjolnir.nix
./services/matrix/pantalaimon.nix
./services/misc/ananicy.nix
./services/misc/airsonic.nix
./services/misc/ankisyncd.nix
./services/misc/apache-kafka.nix
@ -584,6 +591,7 @@
./services/misc/safeeyes.nix
./services/misc/sdrplay.nix
./services/misc/sickbeard.nix
./services/misc/signald.nix
./services/misc/siproxd.nix
./services/misc/snapper.nix
./services/misc/sonarr.nix
@ -603,6 +611,7 @@
./services/misc/uhub.nix
./services/misc/weechat.nix
./services/misc/xmr-stak.nix
./services/misc/xmrig.nix
./services/misc/zigbee2mqtt.nix
./services/misc/zoneminder.nix
./services/misc/zookeeper.nix
@ -675,12 +684,15 @@
./services/network-filesystems/tahoe.nix
./services/network-filesystems/diod.nix
./services/network-filesystems/u9fs.nix
./services/network-filesystems/webdav.nix
./services/network-filesystems/webdav-server-rs.nix
./services/network-filesystems/yandex-disk.nix
./services/network-filesystems/xtreemfs.nix
./services/network-filesystems/ceph.nix
./services/networking/3proxy.nix
./services/networking/adguardhome.nix
./services/networking/amuled.nix
./services/networking/antennas.nix
./services/networking/aria2.nix
./services/networking/asterisk.nix
./services/networking/atftpd.nix
@ -768,6 +780,7 @@
./services/networking/libreswan.nix
./services/networking/lldpd.nix
./services/networking/logmein-hamachi.nix
./services/networking/lxd-image-server.nix
./services/networking/mailpile.nix
./services/networking/magic-wormhole-mailbox-server.nix
./services/networking/matterbridge.nix
@ -885,6 +898,7 @@
./services/networking/unbound.nix
./services/networking/unifi.nix
./services/video/unifi-video.nix
./services/video/rtsp-simple-server.nix
./services/networking/v2ray.nix
./services/networking/vsftpd.nix
./services/networking/wasabibackend.nix
@ -969,6 +983,7 @@
./services/web-apps/atlassian/jira.nix
./services/web-apps/bookstack.nix
./services/web-apps/calibre-web.nix
./services/web-apps/code-server.nix
./services/web-apps/convos.nix
./services/web-apps/cryptpad.nix
./services/web-apps/dex.nix
@ -991,6 +1006,7 @@
./services/web-apps/jitsi-meet.nix
./services/web-apps/keycloak.nix
./services/web-apps/lemmy.nix
./services/web-apps/invidious.nix
./services/web-apps/limesurvey.nix
./services/web-apps/mastodon.nix
./services/web-apps/mattermost.nix
@ -1007,6 +1023,7 @@
./services/web-apps/pgpkeyserver-lite.nix
./services/web-apps/matomo.nix
./services/web-apps/moinmoin.nix
./services/web-apps/openwebrx.nix
./services/web-apps/restya-board.nix
./services/web-apps/sogo.nix
./services/web-apps/rss-bridge.nix
@ -1182,6 +1199,7 @@
./virtualisation/virtualbox-guest.nix
./virtualisation/virtualbox-host.nix
./virtualisation/vmware-guest.nix
./virtualisation/waydroid.nix
./virtualisation/xen-dom0.nix
./virtualisation/xe-guest-utilities.nix
]

View file

@ -40,6 +40,7 @@
# Tools to create / manipulate filesystems.
pkgs.ntfsprogs # for resizing NTFS partitions
pkgs.dosfstools
pkgs.mtools
pkgs.xfsprogs.bin
pkgs.jfsutils
pkgs.f2fs-tools

View file

@ -14,4 +14,6 @@ with lib;
documentation.enable = mkDefault false;
documentation.nixos.enable = mkDefault false;
programs.command-not-found.enable = mkDefault false;
}

View file

@ -1,9 +1,9 @@
{ config, lib, ... }:
{ config, pkgs, lib, ... }:
{
options.programs.bcc.enable = lib.mkEnableOption "bcc";
config = lib.mkIf config.programs.bcc.enable {
environment.systemPackages = [ config.boot.kernelPackages.bcc ];
boot.extraModulePackages = [ config.boot.kernelPackages.bcc ];
environment.systemPackages = [ pkgs.bcc ];
boot.extraModulePackages = [ pkgs.bcc ];
};
}

View file

@ -0,0 +1,21 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.cnping;
in
{
options = {
programs.cnping = {
enable = mkEnableOption "Whether to install a setcap wrapper for cnping";
};
};
config = mkIf cfg.enable {
security.wrappers.cnping = {
source = "${pkgs.cnping}/bin/cnping";
capabilities = "cap_net_raw+ep";
};
};
}

Some files were not shown because too many files have changed in this diff Show more