Project import generated by Copybara.

GitOrigin-RevId: 540dccb2aeaffa9dc69bfdc41c55abd7ccc6baa3
This commit is contained in:
Default email 2021-05-28 11:39:13 +02:00
parent c56b6b358f
commit ec92d4d331
1525 changed files with 35640 additions and 15820 deletions

View file

@ -46,6 +46,7 @@
/nixos/default.nix @nbp @infinisil
/nixos/lib/from-env.nix @nbp @infinisil
/nixos/lib/eval-config.nix @nbp @infinisil
/nixos/doc @ryantm
/nixos/doc/manual/configuration/abstractions.xml @nbp
/nixos/doc/manual/configuration/config-file.xml @nbp
/nixos/doc/manual/configuration/config-syntax.xml @nbp
@ -179,8 +180,7 @@
/pkgs/top-level/emacs-packages.nix @adisbladis
# Neovim
/pkgs/applications/editors/neovim @jonringer
/pkgs/applications/editors/neovim @teto
/pkgs/applications/editors/neovim @jonringer @teto
# VimPlugins
/pkgs/misc/vim-plugins @jonringer @softinio

View file

@ -23,6 +23,5 @@ Reviewing guidelines: https://nixos.org/manual/nixpkgs/unstable/#chap-reviewing-
- [ ] Tested via one or more NixOS test(s) if existing and applicable for the change (look inside [nixos/tests](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests))
- [ ] Tested compilation of all pkgs that depend on this change using `nix-shell -p nixpkgs-review --run "nixpkgs-review wip"`
- [ ] Tested execution of all binary files (usually in `./result/bin/`)
- [ ] Determined the impact on package closure size (by running `nix path-info -S` before and after)
- [ ] Ensured that relevant documentation is up to date
- [ ] Added a release notes entry if the change is major or breaking
- [ ] Fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/.github/CONTRIBUTING.md).

View file

@ -0,0 +1,22 @@
name: Backport
on:
pull_request:
types: [closed]
jobs:
backport:
name: Create backport PRs
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
# required to find all branches
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@9b8949dcd4295d364b0939f07d0c7593598d26cd
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
# should be kept in sync with `uses`
version: 9b8949dcd4295d364b0939f07d0c7593598d26cd

View file

@ -12,6 +12,7 @@ on:
jobs:
nixos:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v2
with:

View file

@ -12,6 +12,7 @@ on:
jobs:
nixpkgs:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v2
with:

View file

@ -8,7 +8,7 @@ on:
jobs:
sync-branch:
if: github.repository == 'NixOS/nixpkgs'
if: github.repository_owner == 'NixOS'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

View file

@ -0,0 +1,21 @@
name: NixOS manual checks
on:
pull_request:
branches-ignore:
- 'release-**'
paths:
- 'nixos/**/*.xml'
- 'nixos/**/*.md'
jobs:
tests:
runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS'
steps:
- uses: actions/checkout@v2
- uses: cachix/install-nix-action@v12
- name: Check DocBook files generated from Markdown are consistent
run: |
nixos/doc/manual/md-to-db.sh
git diff --exit-code

View file

@ -41,7 +41,7 @@ jobs:
- name: check branch
env:
PERMANENT_BRANCHES: "haskell-updates|master|nixos|nixpkgs|python-unstable|release|staging"
VALID_BRANCHES: "haskell-updates|master|python-unstable|release-20.09|staging|staging-20.09|staging-next"
VALID_BRANCHES: "haskell-updates|master|python-unstable|release-20.09|release-21.05|staging|staging-20.09|staging-21.05|staging-next|staging-next-21.05"
run: |
message() {
cat <<EOF

View file

@ -1 +1 @@
21.05
21.11

View file

@ -21,10 +21,10 @@
# Community
* [Discourse Forum](https://discourse.nixos.org/)
* [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
* [Matrix Chat](https://matrix.to/#/#community:nixos.org)
* [NixOS Weekly](https://weekly.nixos.org/)
* [Community-maintained wiki](https://nixos.wiki/)
* [Community-maintained list of ways to get in touch](https://nixos.wiki/wiki/Get_In_Touch#Chat) (Discord, Matrix, Telegram, other IRC channels, etc.)
* [Community-maintained list of ways to get in touch](https://nixos.wiki/wiki/Get_In_Touch#Chat) (Discord, Telegram, IRC, etc.)
# Other Project Repositories

View file

@ -238,7 +238,7 @@ The `staging` branch is a development branch where mass-rebuilds go. It should o
### Staging-next branch {#submitting-changes-staging-next-branch}
The `staging-next` branch is for stabilizing mass-rebuilds submitted to the `staging` branch prior to merging them into `master`. Mass-rebuilds should go via the `staging` branch. It should only see non-breaking commits that are fixing issues blocking it from being merged into the `master ` branch.
The `staging-next` branch is for stabilizing mass-rebuilds submitted to the `staging` branch prior to merging them into `master`. Mass-rebuilds must go via the `staging` branch. It must only see non-breaking commits that are fixing issues blocking it from being merged into the `master ` branch.
If the branch is already in a broken state, please refrain from adding extra new breakages. Stabilize it for a few days and then merge into master.
@ -248,6 +248,8 @@ For cherry-picking a commit to a stable release branch (“backporting”), use
Add a reason for the backport by using `git cherry-pick -xe <original commit>` instead when it is not obvious from the original commit message. It is not needed when it's a minor version update that includes security and bug fixes but don't add new features or when the commit fixes an otherwise broken package.
For backporting Pull Requests to stable branches, assign label `backport <branch>` to the original Pull Requests and automation should take care of the rest once the Pull Requests is merged.
Here is an example of a cherry-picked commit message with good reason description:
```

View file

@ -4,13 +4,19 @@
In this document and related Nix expressions, we use the term, _BEAM_, to describe the environment. BEAM is the name of the Erlang Virtual Machine and, as far as we're concerned, from a packaging perspective, all languages that run on the BEAM are interchangeable. That which varies, like the build system, is transparent to users of any given BEAM package, so we make no distinction.
## Available versions and deprecations schedule
### Elixir
nixpkgs follows the [official elixir deprecation schedule](https://hexdocs.pm/elixir/compatibility-and-deprecations.html) and keeps the last 5 released versions of Elixir available.
## Structure {#beam-structure}
All BEAM-related expressions are available via the top-level `beam` attribute, which includes:
- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlangR19`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlangR22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
- `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlangR19`.
- `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlang22`.
The default Erlang compiler, defined by `beam.interpreters.erlang`, is aliased as `erlang`. The default BEAM package set is defined by `beam.packages.erlang` and aliased at the top level as `beamPackages`.
@ -80,7 +86,7 @@ let
version = "0.0.1";
mixEnv = "prod";
mixDeps = packages.fetchMixDeps {
mixFodDeps = packages.fetchMixDeps {
pname = "mix-deps-${pname}";
inherit src mixEnv version;
# nix will complain and tell you the right value to replace this with
@ -124,7 +130,7 @@ let
in packages.mixRelease {
inherit src pname version mixEnv mixDeps;
inherit src pname version mixEnv mixFodDeps;
# if you have build time environment variables add them here
MY_ENV_VAR="my_value";
preInstall = ''

View file

@ -8,9 +8,9 @@
<xi:include href="preface.chapter.xml" />
<part>
<title>Using Nixpkgs</title>
<xi:include href="using/configuration.xml" />
<xi:include href="using/overlays.xml" />
<xi:include href="using/overrides.xml" />
<xi:include href="using/configuration.chapter.xml" />
<xi:include href="using/overlays.chapter.xml" />
<xi:include href="using/overrides.chapter.xml" />
<xi:include href="functions.xml" />
</part>
<part>

View file

@ -0,0 +1,356 @@
# Global configuration {#chap-packageconfig}
Nix comes with certain defaults about what packages can and cannot be installed, based on a package's metadata. By default, Nix will prevent installation if any of the following criteria are true:
- The package is thought to be broken, and has had its `meta.broken` set to `true`.
- The package isn't intended to run on the given system, as none of its `meta.platforms` match the given system.
- The package's `meta.license` is set to a license which is considered to be unfree.
- The package has known security vulnerabilities but has not or can not be updated for some reason, and a list of issues has been entered in to the package's `meta.knownVulnerabilities`.
Note that all this is checked during evaluation already, and the check includes any package that is evaluated. In particular, all build-time dependencies are checked. `nix-env -qa` will (attempt to) hide any packages that would be refused.
Each of these criteria can be altered in the nixpkgs configuration.
The nixpkgs configuration for a NixOS system is set in the `configuration.nix`, as in the following example:
```nix
{
nixpkgs.config = {
allowUnfree = true;
};
}
```
However, this does not allow unfree software for individual users. Their configurations are managed separately.
A user's nixpkgs configuration is stored in a user-specific configuration file located at `~/.config/nixpkgs/config.nix`. For example:
```nix
{
allowUnfree = true;
}
```
Note that we are not able to test or build unfree software on Hydra due to policy. Most unfree licenses prohibit us from either executing or distributing the software.
## Installing broken packages {#sec-allow-broken}
There are two ways to try compiling a package which has been marked as broken.
- For allowing the build of a broken package once, you can use an environment variable for a single invocation of the nix tools:
```ShellSession
$ export NIXPKGS_ALLOW_BROKEN=1
```
- For permanently allowing broken packages to be built, you may add `allowBroken = true;` to your user's configuration file, like this:
```nix
{
allowBroken = true;
}
```
## Installing packages on unsupported systems {#sec-allow-unsupported-system}
There are also two ways to try compiling a package which has been marked as unsupported for the given system.
- For allowing the build of an unsupported package once, you can use an environment variable for a single invocation of the nix tools:
```ShellSession
$ export NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1
```
- For permanently allowing unsupported packages to be built, you may add `allowUnsupportedSystem = true;` to your user's configuration file, like this:
```nix
{
allowUnsupportedSystem = true;
}
```
The difference between a package being unsupported on some system and being broken is admittedly a bit fuzzy. If a program *ought* to work on a certain platform, but doesn't, the platform should be included in `meta.platforms`, but marked as broken with e.g. `meta.broken = !hostPlatform.isWindows`. Of course, this begs the question of what \"ought\" means exactly. That is left to the package maintainer.
## Installing unfree packages {#sec-allow-unfree}
There are several ways to tweak how Nix handles a package which has been marked as unfree.
- To temporarily allow all unfree packages, you can use an environment variable for a single invocation of the nix tools:
```ShellSession
$ export NIXPKGS_ALLOW_UNFREE=1
```
- It is possible to permanently allow individual unfree packages, while still blocking unfree packages by default using the `allowUnfreePredicate` configuration option in the user configuration file.
This option is a function which accepts a package as a parameter, and returns a boolean. The following example configuration accepts a package and always returns false:
```nix
{
allowUnfreePredicate = (pkg: false);
}
```
For a more useful example, try the following. This configuration only allows unfree packages named roon-server and visual studio code:
```nix
{
allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
"roon-server"
"vscode"
];
}
```
- It is also possible to allow and block licenses that are specifically acceptable or not acceptable, using `allowlistedLicenses` and `blocklistedLicenses`, respectively.
The following example configuration allowlists the licenses `amd` and `wtfpl`:
```nix
{
allowlistedLicenses = with lib.licenses; [ amd wtfpl ];
}
```
The following example configuration blocklists the `gpl3Only` and `agpl3Only` licenses:
```nix
{
blocklistedLicenses = with lib.licenses; [ agpl3Only gpl3Only ];
}
```
Note that `allowlistedLicenses` only applies to unfree licenses unless `allowUnfree` is enabled. It is not a generic allowlist for all types of licenses. `blocklistedLicenses` applies to all licenses.
A complete list of licenses can be found in the file `lib/licenses.nix` of the nixpkgs tree.
## Installing insecure packages {#sec-allow-insecure}
There are several ways to tweak how Nix handles a package which has been marked as insecure.
- To temporarily allow all insecure packages, you can use an environment variable for a single invocation of the nix tools:
```ShellSession
$ export NIXPKGS_ALLOW_INSECURE=1
```
- It is possible to permanently allow individual insecure packages, while still blocking other insecure packages by default using the `permittedInsecurePackages` configuration option in the user configuration file.
The following example configuration permits the installation of the hypothetically insecure package `hello`, version `1.2.3`:
```nix
{
permittedInsecurePackages = [
"hello-1.2.3"
];
}
```
- It is also possible to create a custom policy around which insecure packages to allow and deny, by overriding the `allowInsecurePredicate` configuration option.
The `allowInsecurePredicate` option is a function which accepts a package and returns a boolean, much like `allowUnfreePredicate`.
The following configuration example only allows insecure packages with very short names:
```nix
{
allowInsecurePredicate = pkg: builtins.stringLength (lib.getName pkg) <= 5;
}
```
Note that `permittedInsecurePackages` is only checked if `allowInsecurePredicate` is not specified.
## Modify packages via `packageOverrides` {#sec-modify-via-packageOverrides}
You can define a function called `packageOverrides` in your local `~/.config/nixpkgs/config.nix` to override Nix packages. It must be a function that takes pkgs as an argument and returns a modified set of packages.
```nix
{
packageOverrides = pkgs: rec {
foo = pkgs.foo.override { ... };
};
}
```
## Declarative Package Management {#sec-declarative-package-management}
### Build an environment {#sec-building-environment}
Using `packageOverrides`, it is possible to manage packages declaratively. This means that we can list all of our desired packages within a declarative Nix expression. For example, to have `aspell`, `bc`, `ffmpeg`, `coreutils`, `gdb`, `nixUnstable`, `emscripten`, `jq`, `nox`, and `silver-searcher`, we could use the following in `~/.config/nixpkgs/config.nix`:
```nix
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
};
};
}
```
To install it into our environment, you can just run `nix-env -iA nixpkgs.myPackages`. If you want to load the packages to be built from a working copy of `nixpkgs` you just run `nix-env -f. -iA myPackages`. To explore what's been installed, just look through `~/.nix-profile/`. You can see that a lot of stuff has been installed. Some of this stuff is useful some of it isn't. Let's tell Nixpkgs to only link the stuff that we want:
```nix
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share" "/bin" ];
};
};
}
```
`pathsToLink` tells Nixpkgs to only link the paths listed which gets rid of the extra stuff in the profile. `/bin` and `/share` are good defaults for a user environment, getting rid of the clutter. If you are running on Nix on MacOS, you may want to add another path as well, `/Applications`, that makes GUI apps available.
### Getting documentation {#sec-getting-documentation}
After building that new environment, look through `~/.nix-profile` to make sure everything is there that we wanted. Discerning readers will note that some files are missing. Look inside `~/.nix-profile/share/man/man1/` to verify this. There are no man pages for any of the Nix tools! This is because some packages like Nix have multiple outputs for things like documentation (see section 4). Let's make Nix install those as well.
```nix
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" "/bin" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
```
This provides us with some useful documentation for using our packages. However, if we actually want those manpages to be detected by man, we need to set up our environment. This can also be managed within Nix expressions.
```nix
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
coreutils
ffmpeg
man
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
```
For this to work fully, you must also have this script sourced when you are logged in. Try adding something like this to your `~/.profile` file:
```ShellSession
#!/bin/sh
if [ -d $HOME/.nix-profile/etc/profile.d ]; then
for i in $HOME/.nix-profile/etc/profile.d/*.sh; do
if [ -r $i ]; then
. $i
fi
done
fi
```
Now just run `source $HOME/.profile` and you can starting loading man pages from your environment.
### GNU info setup {#sec-gnu-info-setup}
Configuring GNU info is a little bit trickier than man pages. To work correctly, info needs a database to be generated. This can be done with some small modifications to our environment scripts.
```nix
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
coreutils
ffmpeg
man
nixUnstable
emscripten
jq
nox
silver-searcher
texinfoInteractive
];
pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" "info" ];
postBuild = ''
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
'';
};
};
}
```
`postBuild` tells Nixpkgs to run a command after building the environment. In this case, `install-info` adds the installed info pages to `dir` which is GNU info's default root node. Note that `texinfoInteractive` is added to the environment to give the `install-info` command.

View file

@ -1,451 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-packageconfig">
<title>Global configuration</title>
<para>
Nix comes with certain defaults about what packages can and cannot be installed, based on a package's metadata. By default, Nix will prevent installation if any of the following criteria are true:
</para>
<itemizedlist>
<listitem>
<para>
The package is thought to be broken, and has had its <literal>meta.broken</literal> set to <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
The package isn't intended to run on the given system, as none of its <literal>meta.platforms</literal> match the given system.
</para>
</listitem>
<listitem>
<para>
The package's <literal>meta.license</literal> is set to a license which is considered to be unfree.
</para>
</listitem>
<listitem>
<para>
The package has known security vulnerabilities but has not or can not be updated for some reason, and a list of issues has been entered in to the package's <literal>meta.knownVulnerabilities</literal>.
</para>
</listitem>
</itemizedlist>
<para>
Note that all this is checked during evaluation already, and the check includes any package that is evaluated. In particular, all build-time dependencies are checked. <literal>nix-env -qa</literal> will (attempt to) hide any packages that would be refused.
</para>
<para>
Each of these criteria can be altered in the nixpkgs configuration.
</para>
<para>
The nixpkgs configuration for a NixOS system is set in the <literal>configuration.nix</literal>, as in the following example:
<programlisting>
{
nixpkgs.config = {
allowUnfree = true;
};
}
</programlisting>
However, this does not allow unfree software for individual users. Their configurations are managed separately.
</para>
<para>
A user's nixpkgs configuration is stored in a user-specific configuration file located at <filename>~/.config/nixpkgs/config.nix</filename>. For example:
<programlisting>
{
allowUnfree = true;
}
</programlisting>
</para>
<para>
Note that we are not able to test or build unfree software on Hydra due to policy. Most unfree licenses prohibit us from either executing or distributing the software.
</para>
<section xml:id="sec-allow-broken">
<title>Installing broken packages</title>
<para>
There are two ways to try compiling a package which has been marked as broken.
</para>
<itemizedlist>
<listitem>
<para>
For allowing the build of a broken package once, you can use an environment variable for a single invocation of the nix tools:
<screen><prompt>$ </prompt>export NIXPKGS_ALLOW_BROKEN=1</screen>
</para>
</listitem>
<listitem>
<para>
For permanently allowing broken packages to be built, you may add <literal>allowBroken = true;</literal> to your user's configuration file, like this:
<programlisting>
{
allowBroken = true;
}
</programlisting>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="sec-allow-unsupported-system">
<title>Installing packages on unsupported systems</title>
<para>
There are also two ways to try compiling a package which has been marked as unsupported for the given system.
</para>
<itemizedlist>
<listitem>
<para>
For allowing the build of an unsupported package once, you can use an environment variable for a single invocation of the nix tools:
<screen><prompt>$ </prompt>export NIXPKGS_ALLOW_UNSUPPORTED_SYSTEM=1</screen>
</para>
</listitem>
<listitem>
<para>
For permanently allowing unsupported packages to be built, you may add <literal>allowUnsupportedSystem = true;</literal> to your user's configuration file, like this:
<programlisting>
{
allowUnsupportedSystem = true;
}
</programlisting>
</para>
</listitem>
</itemizedlist>
<para>
The difference between a package being unsupported on some system and being broken is admittedly a bit fuzzy. If a program <emphasis>ought</emphasis> to work on a certain platform, but doesn't, the platform should be included in <literal>meta.platforms</literal>, but marked as broken with e.g. <literal>meta.broken = !hostPlatform.isWindows</literal>. Of course, this begs the question of what "ought" means exactly. That is left to the package maintainer.
</para>
</section>
<section xml:id="sec-allow-unfree">
<title>Installing unfree packages</title>
<para>
There are several ways to tweak how Nix handles a package which has been marked as unfree.
</para>
<itemizedlist>
<listitem>
<para>
To temporarily allow all unfree packages, you can use an environment variable for a single invocation of the nix tools:
<screen><prompt>$ </prompt>export NIXPKGS_ALLOW_UNFREE=1</screen>
</para>
</listitem>
<listitem>
<para>
It is possible to permanently allow individual unfree packages, while still blocking unfree packages by default using the <literal>allowUnfreePredicate</literal> configuration option in the user configuration file.
</para>
<para>
This option is a function which accepts a package as a parameter, and returns a boolean. The following example configuration accepts a package and always returns false:
<programlisting>
{
allowUnfreePredicate = (pkg: false);
}
</programlisting>
</para>
<para>
For a more useful example, try the following. This configuration only allows unfree packages named roon-server and visual studio code:
<programlisting>
{
allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
"roon-server"
"vscode"
];
}
</programlisting>
</para>
</listitem>
<listitem>
<para>
It is also possible to allow and block licenses that are specifically acceptable or not acceptable, using <literal>allowlistedLicenses</literal> and <literal>blocklistedLicenses</literal>, respectively.
</para>
<para>
The following example configuration allowlists the licenses <literal>amd</literal> and <literal>wtfpl</literal>:
<programlisting>
{
allowlistedLicenses = with lib.licenses; [ amd wtfpl ];
}
</programlisting>
</para>
<para>
The following example configuration blocklists the <literal>gpl3Only</literal> and <literal>agpl3Only</literal> licenses:
<programlisting>
{
blocklistedLicenses = with lib.licenses; [ agpl3Only gpl3Only ];
}
</programlisting>
</para>
<para>
Note that <literal>allowlistedLicenses</literal> only applies to unfree licenses unless <literal>allowUnfree</literal> is enabled. It is not a generic allowlist for all types of licenses. <literal>blocklistedLicenses</literal> applies to all licenses.
</para>
</listitem>
</itemizedlist>
<para>
A complete list of licenses can be found in the file <filename>lib/licenses.nix</filename> of the nixpkgs tree.
</para>
</section>
<section xml:id="sec-allow-insecure">
<title>Installing insecure packages</title>
<para>
There are several ways to tweak how Nix handles a package which has been marked as insecure.
</para>
<itemizedlist>
<listitem>
<para>
To temporarily allow all insecure packages, you can use an environment variable for a single invocation of the nix tools:
<screen><prompt>$ </prompt>export NIXPKGS_ALLOW_INSECURE=1</screen>
</para>
</listitem>
<listitem>
<para>
It is possible to permanently allow individual insecure packages, while still blocking other insecure packages by default using the <literal>permittedInsecurePackages</literal> configuration option in the user configuration file.
</para>
<para>
The following example configuration permits the installation of the hypothetically insecure package <literal>hello</literal>, version <literal>1.2.3</literal>:
<programlisting>
{
permittedInsecurePackages = [
"hello-1.2.3"
];
}
</programlisting>
</para>
</listitem>
<listitem>
<para>
It is also possible to create a custom policy around which insecure packages to allow and deny, by overriding the <literal>allowInsecurePredicate</literal> configuration option.
</para>
<para>
The <literal>allowInsecurePredicate</literal> option is a function which accepts a package and returns a boolean, much like <literal>allowUnfreePredicate</literal>.
</para>
<para>
The following configuration example only allows insecure packages with very short names:
<programlisting>
{
allowInsecurePredicate = pkg: builtins.stringLength (lib.getName pkg) &lt;= 5;
}
</programlisting>
</para>
<para>
Note that <literal>permittedInsecurePackages</literal> is only checked if <literal>allowInsecurePredicate</literal> is not specified.
</para>
</listitem>
</itemizedlist>
</section>
<!--============================================================-->
<section xml:id="sec-modify-via-packageOverrides">
<title>Modify packages via <literal>packageOverrides</literal></title>
<para>
You can define a function called <varname>packageOverrides</varname> in your local <filename>~/.config/nixpkgs/config.nix</filename> to override Nix packages. It must be a function that takes pkgs as an argument and returns a modified set of packages.
<programlisting>
{
packageOverrides = pkgs: rec {
foo = pkgs.foo.override { ... };
};
}
</programlisting>
</para>
</section>
<section xml:id="sec-declarative-package-management">
<title>Declarative Package Management</title>
<section xml:id="sec-building-environment">
<title>Build an environment</title>
<para>
Using <literal>packageOverrides</literal>, it is possible to manage packages declaratively. This means that we can list all of our desired packages within a declarative Nix expression. For example, to have <literal>aspell</literal>, <literal>bc</literal>, <literal>ffmpeg</literal>, <literal>coreutils</literal>, <literal>gdb</literal>, <literal>nixUnstable</literal>, <literal>emscripten</literal>, <literal>jq</literal>, <literal>nox</literal>, and <literal>silver-searcher</literal>, we could use the following in <filename>~/.config/nixpkgs/config.nix</filename>:
</para>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
};
};
}
</screen>
<para>
To install it into our environment, you can just run <literal>nix-env -iA nixpkgs.myPackages</literal>. If you want to load the packages to be built from a working copy of <literal>nixpkgs</literal> you just run <literal>nix-env -f. -iA myPackages</literal>. To explore what's been installed, just look through <filename>~/.nix-profile/</filename>. You can see that a lot of stuff has been installed. Some of this stuff is useful some of it isn't. Let's tell Nixpkgs to only link the stuff that we want:
</para>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
gdb
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share" "/bin" ];
};
};
}
</screen>
<para>
<literal>pathsToLink</literal> tells Nixpkgs to only link the paths listed which gets rid of the extra stuff in the profile. <filename>/bin</filename> and <filename>/share</filename> are good defaults for a user environment, getting rid of the clutter. If you are running on Nix on MacOS, you may want to add another path as well, <filename>/Applications</filename>, that makes GUI apps available.
</para>
</section>
<section xml:id="sec-getting-documentation">
<title>Getting documentation</title>
<para>
After building that new environment, look through <filename>~/.nix-profile</filename> to make sure everything is there that we wanted. Discerning readers will note that some files are missing. Look inside <filename>~/.nix-profile/share/man/man1/</filename> to verify this. There are no man pages for any of the Nix tools! This is because some packages like Nix have multiple outputs for things like documentation (see section 4). Let's make Nix install those as well.
</para>
<screen>
{
packageOverrides = pkgs: with pkgs; {
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
aspell
bc
coreutils
ffmpeg
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" "/bin" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
</screen>
<para>
This provides us with some useful documentation for using our packages. However, if we actually want those manpages to be detected by man, we need to set up our environment. This can also be managed within Nix expressions.
</para>
<screen>
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
coreutils
ffmpeg
man
nixUnstable
emscripten
jq
nox
silver-searcher
];
pathsToLink = [ "/share/man" "/share/doc" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" ];
};
};
}
</screen>
<para>
For this to work fully, you must also have this script sourced when you are logged in. Try adding something like this to your <filename>~/.profile</filename> file:
</para>
<screen>
#!/bin/sh
if [ -d $HOME/.nix-profile/etc/profile.d ]; then
for i in $HOME/.nix-profile/etc/profile.d/*.sh; do
if [ -r $i ]; then
. $i
fi
done
fi
</screen>
<para>
Now just run <literal>source $HOME/.profile</literal> and you can starting loading man pages from your environment.
</para>
</section>
<section xml:id="sec-gnu-info-setup">
<title>GNU info setup</title>
<para>
Configuring GNU info is a little bit trickier than man pages. To work correctly, info needs a database to be generated. This can be done with some small modifications to our environment scripts.
</para>
<screen>
{
packageOverrides = pkgs: with pkgs; rec {
myProfile = writeText "my-profile" ''
export PATH=$HOME/.nix-profile/bin:/nix/var/nix/profiles/default/bin:/sbin:/bin:/usr/sbin:/usr/bin
export MANPATH=$HOME/.nix-profile/share/man:/nix/var/nix/profiles/default/share/man:/usr/share/man
export INFOPATH=$HOME/.nix-profile/share/info:/nix/var/nix/profiles/default/share/info:/usr/share/info
'';
myPackages = pkgs.buildEnv {
name = "my-packages";
paths = [
(runCommand "profile" {} ''
mkdir -p $out/etc/profile.d
cp ${myProfile} $out/etc/profile.d/my-profile.sh
'')
aspell
bc
coreutils
ffmpeg
man
nixUnstable
emscripten
jq
nox
silver-searcher
texinfoInteractive
];
pathsToLink = [ "/share/man" "/share/doc" "/share/info" "/bin" "/etc" ];
extraOutputsToInstall = [ "man" "doc" "info" ];
postBuild = ''
if [ -x $out/bin/install-info -a -w $out/share/info ]; then
shopt -s nullglob
for i in $out/share/info/*.info $out/share/info/*.info.gz; do
$out/bin/install-info $i $out/share/info/dir
done
fi
'';
};
};
}
</screen>
<para>
<literal>postBuild</literal> tells Nixpkgs to run a command after building the environment. In this case, <literal>install-info</literal> adds the installed info pages to <literal>dir</literal> which is GNU info's default root node. Note that <literal>texinfoInteractive</literal> is added to the environment to give the <literal>install-info</literal> command.
</para>
</section>
</section>
</chapter>

View file

@ -0,0 +1,149 @@
# Overlays {#chap-overlays}
This chapter describes how to extend and change Nixpkgs using overlays. Overlays are used to add layers in the fixed-point used by Nixpkgs to compose the set of all packages.
Nixpkgs can be configured with a list of overlays, which are applied in order. This means that the order of the overlays can be significant if multiple layers override the same package.
## Installing overlays {#sec-overlays-install}
The list of overlays can be set either explicitly in a Nix expression, or through `<nixpkgs-overlays>` or user configuration files.
### Set overlays in NixOS or Nix expressions {#sec-overlays-argument}
On a NixOS system the value of the `nixpkgs.overlays` option, if present, is passed to the system Nixpkgs directly as an argument. Note that this does not affect the overlays for non-NixOS operations (e.g. `nix-env`), which are [looked up](#sec-overlays-lookup) independently.
The list of overlays can be passed explicitly when importing nixpkgs, for example `import <nixpkgs> { overlays = [ overlay1 overlay2 ]; }`.
NOTE: DO NOT USE THIS in nixpkgs. Further overlays can be added by calling the `pkgs.extend` or `pkgs.appendOverlays`, although it is often preferable to avoid these functions, because they recompute the Nixpkgs fixpoint, which is somewhat expensive to do.
### Install overlays via configuration lookup {#sec-overlays-lookup}
The list of overlays is determined as follows.
1. First, if an [`overlays` argument](#sec-overlays-argument) to the Nixpkgs function itself is given, then that is used and no path lookup will be performed.
2. Otherwise, if the Nix path entry `<nixpkgs-overlays>` exists, we look for overlays at that path, as described below.
See the section on `NIX_PATH` in the Nix manual for more details on how to set a value for `<nixpkgs-overlays>.`
3. If one of `~/.config/nixpkgs/overlays.nix` and `~/.config/nixpkgs/overlays/` exists, then we look for overlays at that path, as described below. It is an error if both exist.
If we are looking for overlays at a path, then there are two cases:
- If the path is a file, then the file is imported as a Nix expression and used as the list of overlays.
- If the path is a directory, then we take the content of the directory, order it lexicographically, and attempt to interpret each as an overlay by:
- Importing the file, if it is a `.nix` file.
- Importing a top-level `default.nix` file, if it is a directory.
Because overlays that are set in NixOS configuration do not affect non-NixOS operations such as `nix-env`, the `overlays.nix` option provides a convenient way to use the same overlays for a NixOS system configuration and user configuration: the same file can be used as `overlays.nix` and imported as the value of `nixpkgs.overlays`.
## Defining overlays {#sec-overlays-definition}
Overlays are Nix functions which accept two arguments, conventionally called `self` and `super`, and return a set of packages. For example, the following is a valid overlay.
```nix
self: super:
{
boost = super.boost.override {
python = self.python3;
};
rr = super.callPackage ./pkgs/rr {
stdenv = self.stdenv_32bit;
};
}
```
The first argument (`self`) corresponds to the final package set. You should use this set for the dependencies of all packages specified in your overlay. For example, all the dependencies of `rr` in the example above come from `self`, as well as the overridden dependencies used in the `boost` override.
The second argument (`super`) corresponds to the result of the evaluation of the previous stages of Nixpkgs. It does not contain any of the packages added by the current overlay, nor any of the following overlays. This set should be used either to refer to packages you wish to override, or to access functions defined in Nixpkgs. For example, the original recipe of `boost` in the above example, comes from `super`, as well as the `callPackage` function.
The value returned by this function should be a set similar to `pkgs/top-level/all-packages.nix`, containing overridden and/or new packages.
Overlays are similar to other methods for customizing Nixpkgs, in particular the `packageOverrides` attribute described in <xref linkend="sec-modify-via-packageOverrides"/>. Indeed, `packageOverrides` acts as an overlay with only the `super` argument. It is therefore appropriate for basic use, but overlays are more powerful and easier to distribute.
## Using overlays to configure alternatives {#sec-overlays-alternatives}
Certain software packages have different implementations of the same interface. Other distributions have functionality to switch between these. For example, Debian provides [DebianAlternatives](https://wiki.debian.org/DebianAlternatives). Nixpkgs has what we call `alternatives`, which are configured through overlays.
### BLAS/LAPACK {#sec-overlays-alternatives-blas-lapack}
In Nixpkgs, we have multiple implementations of the BLAS/LAPACK numerical linear algebra interfaces. They are:
- [OpenBLAS](https://www.openblas.net/)
The Nixpkgs attribute is `openblas` for ILP64 (integer width = 64 bits) and `openblasCompat` for LP64 (integer width = 32 bits). `openblasCompat` is the default.
- [LAPACK reference](http://www.netlib.org/lapack/) (also provides BLAS)
The Nixpkgs attribute is `lapack-reference`.
- [Intel MKL](https://software.intel.com/en-us/mkl) (only works on the x86_64 architecture, unfree)
The Nixpkgs attribute is `mkl`.
- [BLIS](https://github.com/flame/blis)
BLIS, available through the attribute `blis`, is a framework for linear algebra kernels. In addition, it implements the BLAS interface.
- [AMD BLIS/LIBFLAME](https://developer.amd.com/amd-aocl/blas-library/) (optimized for modern AMD x86_64 CPUs)
The AMD fork of the BLIS library, with attribute `amd-blis`, extends BLIS with optimizations for modern AMD CPUs. The changes are usually submitted to the upstream BLIS project after some time. However, AMD BLIS typically provides some performance improvements on AMD Zen CPUs. The complementary AMD LIBFLAME library, with attribute `amd-libflame`, provides a LAPACK implementation.
Introduced in [PR #83888](https://github.com/NixOS/nixpkgs/pull/83888), we are able to override the `blas` and `lapack` packages to use different implementations, through the `blasProvider` and `lapackProvider` argument. This can be used to select a different provider. BLAS providers will have symlinks in `$out/lib/libblas.so.3` and `$out/lib/libcblas.so.3` to their respective BLAS libraries. Likewise, LAPACK providers will have symlinks in `$out/lib/liblapack.so.3` and `$out/lib/liblapacke.so.3` to their respective LAPACK libraries. For example, Intel MKL is both a BLAS and LAPACK provider. An overlay can be created to use Intel MKL that looks like:
```nix
self: super:
{
blas = super.blas.override {
blasProvider = self.mkl;
};
lapack = super.lapack.override {
lapackProvider = self.mkl;
};
}
```
This overlay uses Intel's MKL library for both BLAS and LAPACK interfaces. Note that the same can be accomplished at runtime using `LD_LIBRARY_PATH` of `libblas.so.3` and `liblapack.so.3`. For instance:
```ShellSession
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib:$LD_LIBRARY_PATH nix-shell -p octave --run octave
```
Intel MKL requires an `openmp` implementation when running with multiple processors. By default, `mkl` will use Intel's `iomp` implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with `LD_PRELOAD`. Note that `mkl` is only available on `x86_64-linux` and `x86_64-darwin`. Moreover, Hydra is not building and distributing pre-compiled binaries using it.
For BLAS/LAPACK switching to work correctly, all packages must depend on `blas` or `lapack`. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, `LP64` (integer size = 32 bits) and `ILP64` (integer size = 64 bits). Some software needs special flags or patches to work with `ILP64`. You can check if `ILP64` is used in Nixpkgs with `blas.isILP64` and `lapack.isILP64`. Some software does NOT work with `ILP64`, and derivations need to specify an assertion to prevent this. You can prevent `ILP64` from being used with the following:
```nix
{ stdenv, blas, lapack, ... }:
assert (!blas.isILP64) && (!lapack.isILP64);
stdenv.mkDerivation {
...
}
```
### Switching the MPI implementation {#sec-overlays-alternatives-mpi}
All programs that are built with [MPI](https://en.wikipedia.org/wiki/Message_Passing_Interface) support use the generic attribute `mpi` as an input. At the moment Nixpkgs natively provides two different MPI implementations:
- [Open MPI](https://www.open-mpi.org/) (default), attribute name
`openmpi`
- [MPICH](https://www.mpich.org/), attribute name `mpich`
To provide MPI enabled applications that use `MPICH`, instead of the default `Open MPI`, simply use the following overlay:
```nix
self: super:
{
mpi = self.mpich;
}
```

View file

@ -1,279 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-overlays">
<title>Overlays</title>
<para>
This chapter describes how to extend and change Nixpkgs using overlays. Overlays are used to add layers in the fixed-point used by Nixpkgs to compose the set of all packages.
</para>
<para>
Nixpkgs can be configured with a list of overlays, which are applied in order. This means that the order of the overlays can be significant if multiple layers override the same package.
</para>
<!--============================================================-->
<section xml:id="sec-overlays-install">
<title>Installing overlays</title>
<para>
The list of overlays can be set either explicitly in a Nix expression, or through <literal>&lt;nixpkgs-overlays></literal> or user configuration files.
</para>
<section xml:id="sec-overlays-argument">
<title>Set overlays in NixOS or Nix expressions</title>
<para>
On a NixOS system the value of the <literal>nixpkgs.overlays</literal> option, if present, is passed to the system Nixpkgs directly as an argument. Note that this does not affect the overlays for non-NixOS operations (e.g. <literal>nix-env</literal>), which are <link xlink:href="#sec-overlays-lookup">looked</link> up independently.
</para>
<para>
The list of overlays can be passed explicitly when importing nixpkgs, for example <literal>import &lt;nixpkgs> { overlays = [ overlay1 overlay2 ]; }</literal>.
</para>
<para>
NOTE: DO NOT USE THIS in nixpkgs. Further overlays can be added by calling the <literal>pkgs.extend</literal> or <literal>pkgs.appendOverlays</literal>, although it is often preferable to avoid these functions, because they recompute the Nixpkgs fixpoint, which is somewhat expensive to do.
</para>
</section>
<section xml:id="sec-overlays-lookup">
<title>Install overlays via configuration lookup</title>
<para>
The list of overlays is determined as follows.
</para>
<para>
<orderedlist>
<listitem>
<para>
First, if an <link xlink:href="#sec-overlays-argument"><varname>overlays</varname> argument</link> to the Nixpkgs function itself is given, then that is used and no path lookup will be performed.
</para>
</listitem>
<listitem>
<para>
Otherwise, if the Nix path entry <literal>&lt;nixpkgs-overlays></literal> exists, we look for overlays at that path, as described below.
</para>
<para>
See the section on <literal>NIX_PATH</literal> in the Nix manual for more details on how to set a value for <literal>&lt;nixpkgs-overlays>.</literal>
</para>
</listitem>
<listitem>
<para>
If one of <filename>~/.config/nixpkgs/overlays.nix</filename> and <filename>~/.config/nixpkgs/overlays/</filename> exists, then we look for overlays at that path, as described below. It is an error if both exist.
</para>
</listitem>
</orderedlist>
</para>
<para>
If we are looking for overlays at a path, then there are two cases:
<itemizedlist>
<listitem>
<para>
If the path is a file, then the file is imported as a Nix expression and used as the list of overlays.
</para>
</listitem>
<listitem>
<para>
If the path is a directory, then we take the content of the directory, order it lexicographically, and attempt to interpret each as an overlay by:
<itemizedlist>
<listitem>
<para>
Importing the file, if it is a <literal>.nix</literal> file.
</para>
</listitem>
<listitem>
<para>
Importing a top-level <filename>default.nix</filename> file, if it is a directory.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</para>
<para>
Because overlays that are set in NixOS configuration do not affect non-NixOS operations such as <literal>nix-env</literal>, the <filename>overlays.nix</filename> option provides a convenient way to use the same overlays for a NixOS system configuration and user configuration: the same file can be used as <filename>overlays.nix</filename> and imported as the value of <literal>nixpkgs.overlays</literal>.
</para>
<!-- TODO: Example of sharing overlays between NixOS configuration
and configuration lookup. Also reference the example
from the sec-overlays-argument paragraph about NixOS.
-->
</section>
</section>
<!--============================================================-->
<section xml:id="sec-overlays-definition">
<title>Defining overlays</title>
<para>
Overlays are Nix functions which accept two arguments, conventionally called <varname>self</varname> and <varname>super</varname>, and return a set of packages. For example, the following is a valid overlay.
</para>
<programlisting>
self: super:
{
boost = super.boost.override {
python = self.python3;
};
rr = super.callPackage ./pkgs/rr {
stdenv = self.stdenv_32bit;
};
}
</programlisting>
<para>
The first argument (<varname>self</varname>) corresponds to the final package set. You should use this set for the dependencies of all packages specified in your overlay. For example, all the dependencies of <varname>rr</varname> in the example above come from <varname>self</varname>, as well as the overridden dependencies used in the <varname>boost</varname> override.
</para>
<para>
The second argument (<varname>super</varname>) corresponds to the result of the evaluation of the previous stages of Nixpkgs. It does not contain any of the packages added by the current overlay, nor any of the following overlays. This set should be used either to refer to packages you wish to override, or to access functions defined in Nixpkgs. For example, the original recipe of <varname>boost</varname> in the above example, comes from <varname>super</varname>, as well as the <varname>callPackage</varname> function.
</para>
<para>
The value returned by this function should be a set similar to <filename>pkgs/top-level/all-packages.nix</filename>, containing overridden and/or new packages.
</para>
<para>
Overlays are similar to other methods for customizing Nixpkgs, in particular the <literal>packageOverrides</literal> attribute described in <xref linkend="sec-modify-via-packageOverrides"/>. Indeed, <literal>packageOverrides</literal> acts as an overlay with only the <varname>super</varname> argument. It is therefore appropriate for basic use, but overlays are more powerful and easier to distribute.
</para>
</section>
<section xml:id="sec-overlays-alternatives">
<title>Using overlays to configure alternatives</title>
<para>
Certain software packages have different implementations of the same interface. Other distributions have functionality to switch between these. For example, Debian provides <link
xlink:href="https://wiki.debian.org/DebianAlternatives">DebianAlternatives</link>. Nixpkgs has what we call <literal>alternatives</literal>, which are configured through overlays.
</para>
<section xml:id="sec-overlays-alternatives-blas-lapack">
<title>BLAS/LAPACK</title>
<para>
In Nixpkgs, we have multiple implementations of the BLAS/LAPACK numerical linear algebra interfaces. They are:
</para>
<itemizedlist>
<listitem>
<para>
<link xlink:href="https://www.openblas.net/">OpenBLAS</link>
</para>
<para>
The Nixpkgs attribute is <literal>openblas</literal> for ILP64 (integer width = 64 bits) and <literal>openblasCompat</literal> for LP64 (integer width = 32 bits). <literal>openblasCompat</literal> is the default.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="http://www.netlib.org/lapack/">LAPACK reference</link> (also provides BLAS)
</para>
<para>
The Nixpkgs attribute is <literal>lapack-reference</literal>.
</para>
</listitem>
<listitem>
<para>
<link
xlink:href="https://software.intel.com/en-us/mkl">Intel MKL</link> (only works on the x86_64 architecture, unfree)
</para>
<para>
The Nixpkgs attribute is <literal>mkl</literal>.
</para>
</listitem>
<listitem>
<para>
<link
xlink:href="https://github.com/flame/blis">BLIS</link>
</para>
<para>
BLIS, available through the attribute <literal>blis</literal>, is a framework for linear algebra kernels. In addition, it implements the BLAS interface.
</para>
</listitem>
<listitem>
<para>
<link
xlink:href="https://developer.amd.com/amd-aocl/blas-library/">AMD BLIS/LIBFLAME</link> (optimized for modern AMD x86_64 CPUs)
</para>
<para>
The AMD fork of the BLIS library, with attribute <literal>amd-blis</literal>, extends BLIS with optimizations for modern AMD CPUs. The changes are usually submitted to the upstream BLIS project after some time. However, AMD BLIS typically provides some performance improvements on AMD Zen CPUs. The complementary AMD LIBFLAME library, with attribute <literal>amd-libflame</literal>, provides a LAPACK implementation.
</para>
</listitem>
</itemizedlist>
<para>
Introduced in <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/83888">PR #83888</link>, we are able to override the <literal>blas</literal> and <literal>lapack</literal> packages to use different implementations, through the <literal>blasProvider</literal> and <literal>lapackProvider</literal> argument. This can be used to select a different provider. BLAS providers will have symlinks in <literal>$out/lib/libblas.so.3</literal> and <literal>$out/lib/libcblas.so.3</literal> to their respective BLAS libraries. Likewise, LAPACK providers will have symlinks in <literal>$out/lib/liblapack.so.3</literal> and <literal>$out/lib/liblapacke.so.3</literal> to their respective LAPACK libraries. For example, Intel MKL is both a BLAS and LAPACK provider. An overlay can be created to use Intel MKL that looks like:
</para>
<programlisting>
self: super:
{
blas = super.blas.override {
blasProvider = self.mkl;
};
lapack = super.lapack.override {
lapackProvider = self.mkl;
};
}
</programlisting>
<para>
This overlay uses Intels MKL library for both BLAS and LAPACK interfaces. Note that the same can be accomplished at runtime using <literal>LD_LIBRARY_PATH</literal> of <literal>libblas.so.3</literal> and <literal>liblapack.so.3</literal>. For instance:
</para>
<screen>
<prompt>$ </prompt>LD_LIBRARY_PATH=$(nix-build -A mkl)/lib:$LD_LIBRARY_PATH nix-shell -p octave --run octave
</screen>
<para>
Intel MKL requires an <literal>openmp</literal> implementation when running with multiple processors. By default, <literal>mkl</literal> will use Intels <literal>iomp</literal> implementation if no other is specified, but this is a runtime-only dependency and binary compatible with the LLVM implementation. To use that one instead, Intel recommends users set it with <literal>LD_PRELOAD</literal>. Note that <literal>mkl</literal> is only available on <literal>x86_64-linux</literal> and <literal>x86_64-darwin</literal>. Moreover, Hydra is not building and distributing pre-compiled binaries using it.
</para>
<para>
For BLAS/LAPACK switching to work correctly, all packages must depend on <literal>blas</literal> or <literal>lapack</literal>. This ensures that only one BLAS/LAPACK library is used at one time. There are two versions of BLAS/LAPACK currently in the wild, <literal>LP64</literal> (integer size = 32 bits) and <literal>ILP64</literal> (integer size = 64 bits). Some software needs special flags or patches to work with <literal>ILP64</literal>. You can check if <literal>ILP64</literal> is used in Nixpkgs with <varname>blas.isILP64</varname> and <varname>lapack.isILP64</varname>. Some software does NOT work with <literal>ILP64</literal>, and derivations need to specify an assertion to prevent this. You can prevent <literal>ILP64</literal> from being used with the following:
</para>
<programlisting>
{ stdenv, blas, lapack, ... }:
assert (!blas.isILP64) &amp;&amp; (!lapack.isILP64);
stdenv.mkDerivation {
...
}
</programlisting>
</section>
<section xml:id="sec-overlays-alternatives-mpi">
<title>Switching the MPI implementation</title>
<para>
All programs that are built with <link xlink:href="https://en.wikipedia.org/wiki/Message_Passing_Interface">MPI</link> support use the generic attribute <varname>mpi</varname> as an input. At the moment Nixpkgs natively provides two different MPI implementations:
<itemizedlist>
<listitem>
<para>
<link xlink:href="https://www.open-mpi.org/">Open MPI</link> (default), attribute name <varname>openmpi</varname>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.mpich.org/">MPICH</link>, attribute name <varname>mpich</varname>
</para>
</listitem>
</itemizedlist>
</para>
<para>
To provide MPI enabled applications that use <literal>MPICH</literal>, instead of the default <literal>Open MPI</literal>, simply use the following overlay:
</para>
<programlisting>
self: super:
{
mpi = self.mpich;
}
</programlisting>
</section>
</section>
</chapter>

View file

@ -0,0 +1,104 @@
# Overriding {#chap-overrides}
Sometimes one wants to override parts of `nixpkgs`, e.g. derivation attributes, the results of derivations.
These functions are used to make changes to packages, returning only single packages. [Overlays](#chap-overlays), on the other hand, can be used to combine the overridden packages across the entire package set of Nixpkgs.
## &lt;pkg&gt;.override {#sec-pkg-override}
The function `override` is usually available for all the derivations in the nixpkgs expression (`pkgs`).
It is used to override the arguments passed to a function.
Example usages:
```nix
pkgs.foo.override { arg1 = val1; arg2 = val2; ... }
```
<!-- TODO: move below programlisting to a new section about extending and overlays and reference it -->
```nix
import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};
```
```nix
mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}
```
In the first example, `pkgs.foo` is the result of a function call with some default arguments, usually a derivation. Using `pkgs.foo.override` will call the same function with the given new arguments.
## &lt;pkg&gt;.overrideAttrs {#sec-pkg-overrideAttrs}
The function `overrideAttrs` allows overriding the attribute set passed to a `stdenv.mkDerivation` call, producing a new derivation based on the original one. This function is available on all derivations produced by the `stdenv.mkDerivation` function, which is most packages in the nixpkgs expression `pkgs`.
Example usage:
```nix
helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});
```
In the above example, the `separateDebugInfo` attribute is overridden to be true, thus building debug info for `helloWithDebug`, while all other attributes will be retained from the original `hello` package.
The argument `oldAttrs` is conventionally used to refer to the attr set originally passed to `stdenv.mkDerivation`.
::: note
Note that `separateDebugInfo` is processed only by the `stdenv.mkDerivation` function, not the generated, raw Nix derivation. Thus, using `overrideDerivation` will not work in this case, as it overrides only the attributes of the final derivation. It is for this reason that `overrideAttrs` should be preferred in (almost) all cases to `overrideDerivation`, i.e. to allow using `stdenv.mkDerivation` to process input arguments, as well as the fact that it is easier to use (you can use the same attribute names you see in your Nix code, instead of the ones generated (e.g. `buildInputs` vs `nativeBuildInputs`), and it involves less typing).
:::
## &lt;pkg&gt;.overrideDerivation {#sec-pkg-overrideDerivation}
::: warning
You should prefer `overrideAttrs` in almost all cases, see its documentation for the reasons why. `overrideDerivation` is not deprecated and will continue to work, but is less nice to use and does not have as many abilities as `overrideAttrs`.
:::
::: warning
Do not use this function in Nixpkgs as it evaluates a Derivation before modifying it, which breaks package abstraction and removes error-checking of function arguments. In addition, this evaluation-per-function application incurs a performance penalty, which can become a problem if many overrides are used. It is only intended for ad-hoc customisation, such as in `~/.config/nixpkgs/config.nix`.
:::
The function `overrideDerivation` creates a new derivation based on an existing one by overriding the original's attributes with the attribute set produced by the specified function. This function is available on all derivations defined using the `makeOverridable` function. Most standard derivation-producing functions, such as `stdenv.mkDerivation`, are defined using this function, which means most packages in the nixpkgs expression, `pkgs`, have this function.
Example usage:
```nix
mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});
```
In the above example, the `name`, `src`, and `patches` of the derivation will be overridden, while all other attributes will be retained from the original derivation.
The argument `oldAttrs` is used to refer to the attribute set of the original derivation.
::: note
A package's attributes are evaluated *before* being modified by the `overrideDerivation` function. For example, the `name` attribute reference in `url = "mirror://gnu/hello/${name}.tar.gz";` is filled-in *before* the `overrideDerivation` function modifies the attribute set. This means that overriding the `name` attribute, in this example, *will not* change the value of the `url` attribute. Instead, we need to override both the `name` *and* `url` attributes.
:::
## lib.makeOverridable {#sec-lib-makeOverridable}
The function `lib.makeOverridable` is used to make the result of a function easily customizable. This utility only makes sense for functions that accept an argument set and return an attribute set.
Example usage:
```nix
f = { a, b }: { result = a+b; };
c = lib.makeOverridable f { a = 1; b = 2; };
```
The variable `c` is the value of the `f` function applied with some default arguments. Hence the value of `c.result` is `3`, in this example.
The variable `c` however also has some additional functions, like
[c.override](#sec-pkg-override) which can be used to override the
default arguments. In this example the value of
`(c.override { a = 4; }).result` is 6.

View file

@ -1,145 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="chap-overrides">
<title>Overriding</title>
<para>
Sometimes one wants to override parts of <literal>nixpkgs</literal>, e.g. derivation attributes, the results of derivations.
</para>
<para>
These functions are used to make changes to packages, returning only single packages. <link xlink:href="#chap-overlays">Overlays</link>, on the other hand, can be used to combine the overridden packages across the entire package set of Nixpkgs.
</para>
<section xml:id="sec-pkg-override">
<title>&lt;pkg&gt;.override</title>
<para>
The function <varname>override</varname> is usually available for all the derivations in the nixpkgs expression (<varname>pkgs</varname>).
</para>
<para>
It is used to override the arguments passed to a function.
</para>
<para>
Example usages:
<programlisting>pkgs.foo.override { arg1 = val1; arg2 = val2; ... }</programlisting>
<!-- TODO: move below programlisting to a new section about extending and overlays
and reference it
-->
<programlisting>
import pkgs.path { overlays = [ (self: super: {
foo = super.foo.override { barSupport = true ; };
})]};
</programlisting>
<programlisting>
mypkg = pkgs.callPackage ./mypkg.nix {
mydep = pkgs.mydep.override { ... };
}
</programlisting>
</para>
<para>
In the first example, <varname>pkgs.foo</varname> is the result of a function call with some default arguments, usually a derivation. Using <varname>pkgs.foo.override</varname> will call the same function with the given new arguments.
</para>
</section>
<section xml:id="sec-pkg-overrideAttrs">
<title>&lt;pkg&gt;.overrideAttrs</title>
<para>
The function <varname>overrideAttrs</varname> allows overriding the attribute set passed to a <varname>stdenv.mkDerivation</varname> call, producing a new derivation based on the original one. This function is available on all derivations produced by the <varname>stdenv.mkDerivation</varname> function, which is most packages in the nixpkgs expression <varname>pkgs</varname>.
</para>
<para>
Example usage:
<programlisting>
helloWithDebug = pkgs.hello.overrideAttrs (oldAttrs: rec {
separateDebugInfo = true;
});
</programlisting>
</para>
<para>
In the above example, the <varname>separateDebugInfo</varname> attribute is overridden to be true, thus building debug info for <varname>helloWithDebug</varname>, while all other attributes will be retained from the original <varname>hello</varname> package.
</para>
<para>
The argument <varname>oldAttrs</varname> is conventionally used to refer to the attr set originally passed to <varname>stdenv.mkDerivation</varname>.
</para>
<note>
<para>
Note that <varname>separateDebugInfo</varname> is processed only by the <varname>stdenv.mkDerivation</varname> function, not the generated, raw Nix derivation. Thus, using <varname>overrideDerivation</varname> will not work in this case, as it overrides only the attributes of the final derivation. It is for this reason that <varname>overrideAttrs</varname> should be preferred in (almost) all cases to <varname>overrideDerivation</varname>, i.e. to allow using <varname>stdenv.mkDerivation</varname> to process input arguments, as well as the fact that it is easier to use (you can use the same attribute names you see in your Nix code, instead of the ones generated (e.g. <varname>buildInputs</varname> vs <varname>nativeBuildInputs</varname>), and it involves less typing).
</para>
</note>
</section>
<section xml:id="sec-pkg-overrideDerivation">
<title>&lt;pkg&gt;.overrideDerivation</title>
<warning>
<para>
You should prefer <varname>overrideAttrs</varname> in almost all cases, see its documentation for the reasons why. <varname>overrideDerivation</varname> is not deprecated and will continue to work, but is less nice to use and does not have as many abilities as <varname>overrideAttrs</varname>.
</para>
</warning>
<warning>
<para>
Do not use this function in Nixpkgs as it evaluates a Derivation before modifying it, which breaks package abstraction and removes error-checking of function arguments. In addition, this evaluation-per-function application incurs a performance penalty, which can become a problem if many overrides are used. It is only intended for ad-hoc customisation, such as in <filename>~/.config/nixpkgs/config.nix</filename>.
</para>
</warning>
<para>
The function <varname>overrideDerivation</varname> creates a new derivation based on an existing one by overriding the original's attributes with the attribute set produced by the specified function. This function is available on all derivations defined using the <varname>makeOverridable</varname> function. Most standard derivation-producing functions, such as <varname>stdenv.mkDerivation</varname>, are defined using this function, which means most packages in the nixpkgs expression, <varname>pkgs</varname>, have this function.
</para>
<para>
Example usage:
<programlisting>
mySed = pkgs.gnused.overrideDerivation (oldAttrs: {
name = "sed-4.2.2-pre";
src = fetchurl {
url = ftp://alpha.gnu.org/gnu/sed/sed-4.2.2-pre.tar.bz2;
sha256 = "11nq06d131y4wmf3drm0yk502d2xc6n5qy82cg88rb9nqd2lj41k";
};
patches = [];
});
</programlisting>
</para>
<para>
In the above example, the <varname>name</varname>, <varname>src</varname>, and <varname>patches</varname> of the derivation will be overridden, while all other attributes will be retained from the original derivation.
</para>
<para>
The argument <varname>oldAttrs</varname> is used to refer to the attribute set of the original derivation.
</para>
<note>
<para>
A package's attributes are evaluated *before* being modified by the <varname>overrideDerivation</varname> function. For example, the <varname>name</varname> attribute reference in <varname>url = "mirror://gnu/hello/${name}.tar.gz";</varname> is filled-in *before* the <varname>overrideDerivation</varname> function modifies the attribute set. This means that overriding the <varname>name</varname> attribute, in this example, *will not* change the value of the <varname>url</varname> attribute. Instead, we need to override both the <varname>name</varname> *and* <varname>url</varname> attributes.
</para>
</note>
</section>
<section xml:id="sec-lib-makeOverridable">
<title>lib.makeOverridable</title>
<para>
The function <varname>lib.makeOverridable</varname> is used to make the result of a function easily customizable. This utility only makes sense for functions that accept an argument set and return an attribute set.
</para>
<para>
Example usage:
<programlisting>
f = { a, b }: { result = a+b; };
c = lib.makeOverridable f { a = 1; b = 2; };
</programlisting>
</para>
<para>
The variable <varname>c</varname> is the value of the <varname>f</varname> function applied with some default arguments. Hence the value of <varname>c.result</varname> is <literal>3</literal>, in this example.
</para>
<para>
The variable <varname>c</varname> however also has some additional functions, like <link linkend="sec-pkg-override">c.override</link> which can be used to override the default arguments. In this example the value of <varname>(c.override { a = 4; }).result</varname> is 6.
</para>
</section>
</chapter>

View file

@ -18,6 +18,7 @@
"aarch64-linux"
"armv6l-linux"
"armv7l-linux"
"aarch64-darwin"
];
forAllSystems = f: lib.genAttrs systems (system: f system);

5
third_party/nixpkgs/lib/flake.nix vendored Normal file
View file

@ -0,0 +1,5 @@
{
description = "Library of low-level helper functions for nix expressions.";
outputs = { self }: { lib = import ./lib; };
}

View file

@ -41,6 +41,19 @@ rec {
else if final.isNetBSD then "nblibc"
# TODO(@Ericson2314) think more about other operating systems
else "native/impure";
# Choose what linker we wish to use by default. Someday we might also
# choose the C compiler, runtime library, C++ standard library, etc. in
# this way, nice and orthogonally, and deprecate `useLLVM`. But due to
# the monolithic GCC build we cannot actually make those choices
# independently, so we are just doing `linker` and keeping `useLLVM` for
# now.
linker =
/**/ if final.useLLVM or false then "lld"
else if final.isDarwin then "cctools"
# "bfd" and "gold" both come from GNU binutils. The existance of Gold
# is why we use the more obscure "bfd" and not "binutils" for this
# choice.
else "bfd";
extensions = {
sharedLibrary =
/**/ if final.isDarwin then ".dylib"
@ -118,7 +131,7 @@ rec {
else null;
# The canonical name for this attribute is darwinSdkVersion, but some
# platforms define the old name "sdkVer".
darwinSdkVersion = final.sdkVer or "10.12";
darwinSdkVersion = final.sdkVer or (if final.isAarch64 then "11.0" else "10.12");
darwinMinVersion = final.darwinSdkVersion;
darwinMinVersionVariable =
if final.isMacOS then "MACOSX_DEPLOYMENT_TARGET"

View file

@ -96,5 +96,5 @@ in {
embedded = filterDoubles predicates.isNone;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64-linux" "powerpc64le-linux"];
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64-linux" "powerpc64le-linux" "aarch64-darwin"];
}

View file

@ -70,6 +70,15 @@ rec {
useAndroidPrebuilt = true;
};
aarch64-android = {
config = "aarch64-unknown-linux-android";
sdkVer = "30";
ndkVer = "21";
libc = "bionic";
useAndroidPrebuilt = false;
useLLVM = true;
};
scaleway-c1 = armv7l-hf-multiplatform // platforms.scaleway-c1;
pogoplug4 = {
@ -231,6 +240,12 @@ rec {
useiOSPrebuilt = true;
};
aarch64-darwin = {
config = "aarch64-apple-darwin";
xcodePlatform = "MacOSX";
platform = {};
};
#
# Windows
#

View file

@ -375,6 +375,13 @@ rec {
};
};
apple-m1 = {
gcc = {
arch = "armv8.3-a+crypto+sha2+aes+crc+fp16+lse+simd+ras+rdm+rcpc";
cpu = "apple-a13";
};
};
##
## MIPS
##
@ -495,7 +502,10 @@ rec {
else if lib.versionOlder version "6" then sheevaplug
else if lib.versionOlder version "7" then raspberrypi
else armv7l-hf-multiplatform
else if platform.isAarch64 then aarch64-multiplatform
else if platform.isAarch64 then
if platform.isDarwin then apple-m1
else aarch64-multiplatform
else if platform.isRiscV then riscv-multiplatform

View file

@ -171,7 +171,7 @@ rec {
On each release the first letter is bumped and a new animal is chosen
starting with that new letter.
*/
codeName = "Okapi";
codeName = "Porcupine";
/* Returns the current nixpkgs version suffix as string. */
versionSuffix =

View file

@ -1528,6 +1528,12 @@
githubId = 510553;
name = "Jos van Bakel";
};
c4605 = {
email = "bolasblack@gmail.com";
github = "bolasblack";
githubId = 382011;
name = "c4605";
};
caadar = {
email = "v88m@posteo.net";
github = "caadar";
@ -3663,6 +3669,12 @@
githubId = 10353047;
name = "Tobias Happ";
};
gfrascadorio = {
email = "gfrascadorio@tutanota.com";
github = "gfrascadorio";
githubId = 37602871;
name = "Galois";
};
ggpeti = {
email = "ggpeti@gmail.com";
github = "ggpeti";
@ -4375,16 +4387,6 @@
githubId = 41924494;
name = "Ivar";
};
ivegotasthma = {
email = "ivegotasthma@protonmail.com";
github = "ivegotasthma";
githubId = 2437675;
name = "John Doe";
keys = [{
longkeyid = "rsa4096/09AC52AEA87817A4";
fingerprint = "4008 2A5B 56A4 79B9 83CB 95FD 09AC 52AE A878 17A4";
}];
};
ixmatus = {
email = "parnell@digitalmentat.com";
github = "ixmatus";
@ -5200,6 +5202,12 @@
githubId = 546087;
name = "Kristoffer K. Føllesdal";
};
kho-dialga = {
email = "ivandashenyou@gmail.com";
github = "kho-dialga";
githubId = 55767703;
name = "Iván Brito";
};
khumba = {
email = "bog@khumba.net";
github = "khumba";
@ -5515,6 +5523,12 @@
}];
name = "Las Safin";
};
l3af = {
email = "L3afMeAlon3@gmail.com";
github = "L3afMe";
githubId = 72546287;
name = "L3af";
};
laikq = {
email = "gwen@quasebarth.de";
github = "laikq";
@ -5706,6 +5720,12 @@
githubId = 6652840;
name = "Jade";
};
lgcl = {
email = "dev@lgcl.de";
name = "Leon Vack";
github = "LogicalOverflow";
githubId = 5919957;
};
lheckemann = {
email = "git@sphalerite.org";
github = "lheckemann";
@ -6409,6 +6429,10 @@
githubId = 44469426;
name = "Matheus de Souza Pessanha";
email = "matheus_pessanha2001@outlook.com";
keys = [{
longkeyid = "rsa4096/6DFD656220A3B849";
fingerprint = "2D4D 488F 17FB FF75 664E C016 6DFD 6562 20A3 B849";
}];
};
meatcar = {
email = "nixpkgs@denys.me";
@ -6462,10 +6486,10 @@
email = "softs@metabarcoding.org";
name = "Celine Mercier";
};
metadark = {
kira-bruneau = {
email = "kira.bruneau@pm.me";
name = "Kira Bruneau";
github = "metadark";
github = "kira-bruneau";
githubId = 382041;
};
meutraa = {
@ -7469,6 +7493,12 @@
githubId = 1538622;
name = "Michael Reilly";
};
onixie = {
email = "onixie@gmail.com";
github = "onixie";
githubId = 817073;
name = "Yc. Shen";
};
onsails = {
email = "andrey@onsails.com";
github = "onsails";
@ -8093,6 +8123,12 @@
githubId = 406946;
name = "Valentin Lorentz";
};
proofofkeags = {
email = "keagan.mcclelland@gmail.com";
github = "ProofOfKeags";
githubId = 4033651;
name = "Keagan McClelland";
};
protoben = {
email = "protob3n@gmail.com";
github = "protoben";
@ -8695,6 +8731,12 @@
githubId = 221121;
name = "Robert P. Seaton";
};
rraval = {
email = "ronuk.raval@gmail.com";
github = "rraval";
githubId = 373566;
name = "Ronuk Raval";
};
rszibele = {
email = "richard@szibele.com";
github = "rszibele";
@ -9135,6 +9177,12 @@
githubId = 1443459;
name = "Sheena Artrip";
};
sheepforce = {
email = "phillip.seeber@googlemail.com";
github = "sheepforce";
githubId = 16844216;
name = "Phillip Seeber";
};
sheganinans = {
email = "sheganinans@gmail.com";
github = "sheganinans";
@ -9493,6 +9541,12 @@
githubId = 1699155;
name = "Steve Elliott";
};
stelcodes = {
email = "stel@stel.codes";
github = "stelcodes";
githubId = 22163194;
name = "Stel Abrego";
};
stephank = {
email = "nix@stephank.nl";
github = "stephank";

View file

@ -78,6 +78,15 @@ with lib.maintainers; {
scope = "Maintain Freedesktop.org packages for graphical desktop.";
};
gcc = {
members = [
synthetica
vcunat
ericson2314
];
scope = "Maintain GCC (GNU Compiler Collection) compilers";
};
golang = {
members = [
c00w

View file

@ -0,0 +1,13 @@
# Contributing to this manual {#chap-contributing}
The DocBook and CommonMark sources of NixOS' manual are in the [nixos/doc/manual](https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual) subdirectory of the [Nixpkgs](https://github.com/NixOS/nixpkgs) repository.
You can quickly check your edits with the following:
```ShellSession
$ cd /path/to/nixpkgs
$ ./nixos/doc/manual/md-to-db.sh
$ nix-build nixos/release.nix -A manual.x86_64-linux
```
If the build succeeds, the manual will be in `./result/share/doc/nixos/index.html`.

View file

@ -1,22 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-contributing">
<title>Contributing to this manual</title>
<para>
The DocBook sources of NixOS' manual are in the <filename
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual">
nixos/doc/manual</filename> subdirectory of the <link
xlink:href="https://github.com/NixOS/nixpkgs">Nixpkgs</link> repository.
</para>
<para>
You can quickly check your edits with the following:
</para>
<screen>
<prompt>$ </prompt>cd /path/to/nixpkgs
<prompt>$ </prompt>nix-build nixos/release.nix -A manual.x86_64-linux
</screen>
<para>
If the build succeeds, the manual will be in
<filename>./result/share/doc/nixos/index.html</filename>.
</para>
</chapter>

View file

@ -0,0 +1,18 @@
# Building Your Own NixOS CD {#sec-building-cd}
Building a NixOS CD is as easy as configuring your own computer. The idea is to use another module which will replace your `configuration.nix` to configure the system that would be installed on the CD.
Default CD/DVD configurations are available inside `nixos/modules/installer/cd-dvd`
```ShellSession
$ git clone https://github.com/NixOS/nixpkgs.git
$ cd nixpkgs/nixos
$ nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd-dvd/installation-cd-minimal.nix default.nix
```
Before burning your CD/DVD, you can check the content of the image by mounting anywhere like suggested by the following command:
```ShellSession
# mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso</screen>
```
If you want to customize your NixOS CD in more detail, or generate other kinds of images, you might want to check out [nixos-generators](https://github.com/nix-community/nixos-generators). This can also be a good starting point when you want to use Nix to build a 'minimal' image that doesn't include a NixOS installation.

View file

@ -1,33 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-building-cd">
<title>Building Your Own NixOS CD</title>
<para>
Building a NixOS CD is as easy as configuring your own computer. The idea is
to use another module which will replace your
<filename>configuration.nix</filename> to configure the system that would be
installed on the CD.
</para>
<para>
Default CD/DVD configurations are available inside
<filename>nixos/modules/installer/cd-dvd</filename>.
<screen>
<prompt>$ </prompt>git clone https://github.com/NixOS/nixpkgs.git
<prompt>$ </prompt>cd nixpkgs/nixos
<prompt>$ </prompt>nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd-dvd/installation-cd-minimal.nix default.nix</screen>
</para>
<para>
Before burning your CD/DVD, you can check the content of the image by
mounting anywhere like suggested by the following command:
<screen>
<prompt># </prompt>mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso</screen>
</para>
<para>
If you want to customize your NixOS CD in more detail, or generate other kinds
of images, you might want to check out <link
xlink:href="https://github.com/nix-community/nixos-generators">nixos-generators</link>. This can also be a good starting point when you want to use Nix to build a
'minimal' image that doesn't include a NixOS installation.
</para>
</chapter>

View file

@ -13,8 +13,7 @@
<xi:include href="writing-modules.xml" />
<xi:include href="building-parts.xml" />
<xi:include href="writing-documentation.xml" />
<xi:include href="building-nixos.xml" />
<xi:include href="../from_md/development/building-nixos.chapter.xml" />
<xi:include href="nixos-tests.xml" />
<xi:include href="testing-installer.xml" />
<xi:include href="releases.xml" />
</part>

View file

@ -1,366 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="ch-releases">
<title>Releases</title>
<section xml:id="release-process">
<title>Release process</title>
<para>
Going through an example of releasing NixOS 19.09:
</para>
<section xml:id="one-month-before-the-beta">
<title>One month before the beta</title>
<itemizedlist>
<listitem>
<para>
Create an announcement on <link xlink:href="https://discourse.nixos.org">Discourse</link> as a warning about upcoming beta <quote>feature freeze</quote> in a month. <link xlink:href="https://discourse.nixos.org/t/nixos-19-09-feature-freeze/3707">See this post as an example</link>.
</para>
</listitem>
<listitem>
<para>
Discuss with Eelco Dolstra and the community (via IRC, ML) about what will reach the deadline. Any issue or Pull Request targeting the release should be included in the release milestone.
</para>
</listitem>
<listitem>
<para>
Remove attributes that we know we will not be able to support, especially if there is a stable alternative. E.g. Check that our Linux kernels <link xlink:href="https://www.kernel.org/category/releases.html">projected end-of-life</link> are after our release projected end-of-life.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="at-beta-release-time">
<title>At beta release time</title>
<orderedlist>
<listitem>
<para>
From the master branch run:
</para>
<programlisting>
git checkout -b release-19.09
</programlisting>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/10e61bf5be57736035ec7a804cb0bf3d083bf2cf#diff-9c798092bac0caeb5c52d509be0ca263R69">Bump the <literal>system.defaultChannel</literal> attribute in <literal>nixos/modules/misc/version.nix</literal></link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/10e61bf5be57736035ec7a804cb0bf3d083bf2cf#diff-831e8d9748240fb23e6734fdc2a6d16eR15">Update <literal>versionSuffix</literal> in <literal>nixos/release.nix</literal></link>
</para>
</listitem>
</orderedlist>
<para>
To get the commit count, use the following command:
</para>
<programlisting>
git rev-list --count release-19.09
</programlisting>
<orderedlist>
<listitem>
<para>
Edit changelog at <literal>nixos/doc/manual/release-notes/rl-1909.xml</literal>.
</para>
<itemizedlist>
<listitem>
<para>
Get all new NixOS modules:
</para>
<programlisting>
git diff release-19.03..release-19.09 nixos/modules/module-list.nix | grep ^+
</programlisting>
</listitem>
<listitem>
<para>
Note systemd, kernel, glibc, desktop environment, and Nix upgrades.
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
Tag the release:
</para>
<programlisting>
git tag --annotate --message="Release 19.09-beta" 19.09-beta
git push upstream 19.09-beta
</programlisting>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/01268fda85b7eee4e462c873d8654f975067731f#diff-2bc0e46110b507d6d5a344264ef15adaR1">On the <literal>master</literal> branch, increment the <literal>.version</literal> file</link>
</para>
<programlisting>
echo -n "20.03" > .version
</programlisting>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/01268fda85b7eee4e462c873d8654f975067731f#diff-03f3d41b68f62079c55001f1a1c55c1dR137">Update <literal>codeName</literal> in <literal>lib/trivial.nix</literal></link> This will be the name for the next release.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/01268fda85b7eee4e462c873d8654f975067731f#diff-e7ee5ff686cdcc513ca089d6e5682587R11">Create a new release notes file for the upcoming release + 1</link>, in our case this is <literal>rl-2003.xml</literal>.
</para>
</listitem>
<listitem>
<para>
Contact the infrastructure team to create the necessary Hydra Jobsets.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-org-configurations/blob/master/channels.nix">Create a channel at https://nixos.org/channels by creating a PR to nixos-org-configurations, changing <literal>channels.nix</literal></link>
</para>
</listitem>
<listitem>
<para>
Get all Hydra jobsets for the release to have their first evaluation.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/13559">Create an issue for tracking Zero Hydra Failures progress. ZHF is an effort to get build failures down to zero.</link>
</para>
</listitem>
</orderedlist>
</section>
<section xml:id="during-beta">
<title>During Beta</title>
<itemizedlist>
<listitem>
<para>
Monitor the master branch for bugfixes and minor updates and cherry-pick them to the release branch.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="before-the-final-release">
<title>Before the final release</title>
<itemizedlist>
<listitem>
<para>
Re-check that the release notes are complete.
</para>
</listitem>
<listitem>
<para>
Release Nix (currently only Eelco Dolstra can do that). <link xlink:href="https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/installer/tools/nix-fallback-paths.nix">Make sure fallback is updated.</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/commit/40fd9ae3ac8048758abdcfc7d28a78b5f22fe97e">Update README.md with new stable NixOS version information.</link>
</para>
</listitem>
<listitem>
<para>
Change <literal>stableBranch</literal> to <literal>true</literal> in Hydra and wait for the channel to update.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="at-final-release-time">
<title>At final release time</title>
<orderedlist>
<listitem>
<para>
Update <xref linkend="sec-upgrading" /> section of the manual to match new stable release version.
</para>
</listitem>
<listitem>
<para>
Update <literal>rl-1909.xml</literal> with the release date.
</para>
</listitem>
<listitem>
<para>
Tag the final release
</para>
<programlisting>
git tag --annotate --message="Release 19.09" 19.09
git push upstream 19.09
</programlisting>
</listitem>
<listitem>
<para>
Update <link xlink:href="https://github.com/NixOS/nixos-homepage">nixos-homepage</link> for the release.
</para>
<orderedlist>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-homepage/blob/47ac3571c4d71e841fd4e6c6e1872e762b9c4942/Makefile#L1">Update <literal>NIXOS_SERIES</literal> in the <literal>Makefile</literal></link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-homepage/blob/47ac3571c4d71e841fd4e6c6e1872e762b9c4942/nixos-release.tt#L1">Update <literal>nixos-release.tt</literal> with the new NixOS version</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-homepage/blob/47ac3571c4d71e841fd4e6c6e1872e762b9c4942/flake.nix#L10">Update the <literal>flake.nix</literal> input <literal>released-nixpkgs</literal> to 19.09</link>.
</para>
</listitem>
<listitem>
<para>
Run <literal>./update.sh</literal> (this updates flake.lock to updated channel).
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-homepage/blob/a5626c71c03a2dd69086564e56f1a230a2bb177a/logo/nixos-logo-19.09-loris-lores.png">Add a compressed version of the NixOS logo for 19.09</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixos-homepage/commit/a5626c71c03a2dd69086564e56f1a230a2bb177a#diff-9cdc6434d3e4fd93a6e5bb0a531a7c71R5">Compose a news item for the website RSS feed</link>.
</para>
</listitem>
</orderedlist>
</listitem>
<listitem>
<para>
Create a new topic on <link xlink:href="https://discourse.nixos.org/">the Discourse instance</link> to announce the release.
</para>
</listitem>
</orderedlist>
<para>
You should include the following information:
</para>
<itemizedlist>
<listitem>
<para>
Number of commits for the release:
</para>
<programlisting>
bash git log release-19.03..release-19.09 --format=%an | wc -l
</programlisting>
</listitem>
<listitem>
<para>
Commits by contributor:
</para>
<programlisting>
git shortlog --summary --numbered release-19.03..release-19.09
</programlisting>
</listitem>
</itemizedlist>
<para>
Best to check how the previous post was formulated to see what needs to be included.
</para>
</section>
</section>
<section xml:id="release-management-team">
<title>Release Management Team</title>
<para>
For each release there are two release managers. After each release the release manager having managed two releases steps down and the release management team of the last release appoints a new release manager.
</para>
<para>
This makes sure a release management team always consists of one release manager who already has managed one release and one release manager being introduced to their role, making it easier to pass on knowledge and experience.
</para>
<para>
Release managers for the current NixOS release are tracked by GitHub team <link xlink:href="https://github.com/orgs/NixOS/teams/nixos-release-managers/members"><literal>@NixOS/nixos-release-managers</literal></link>.
</para>
<para>
A release managers role and responsibilities are:
</para>
<itemizedlist>
<listitem>
<para>
manage the release process
</para>
</listitem>
<listitem>
<para>
start discussions about features and changes for a given release
</para>
</listitem>
<listitem>
<para>
create a roadmap
</para>
</listitem>
<listitem>
<para>
release in cooperation with Eelco Dolstra
</para>
</listitem>
<listitem>
<para>
decide which bug fixes, features, etc… get backported after a release
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="release-schedule">
<title>Release schedule</title>
<informaltable>
<tgroup cols="2">
<colspec align="left" />
<colspec align="left" />
<thead>
<row>
<entry>
Date
</entry>
<entry>
Event
</entry>
</row>
</thead>
<tbody>
<row>
<entry>
2016-07-25
</entry>
<entry>
Send email to nix-dev about upcoming branch-off
</entry>
</row>
<row>
<entry>
2016-09-01
</entry>
<entry><literal>release-16.09</literal> branch and corresponding jobsets are created,
change freeze
</entry>
</row>
<row>
<entry>
2016-09-30
</entry>
<entry>
NixOS 16.09 released
</entry>
</row>
</tbody>
</tgroup>
</informaltable>
</section>
</chapter>

View file

@ -0,0 +1,5 @@
This directory is temporarily needed while we transition the manual to CommonMark. It stores the output of the ../md-to-db.sh script that converts CommonMark files back to DocBook.
We are choosing to convert the Markdown to DocBook at authoring time instead of manual building time, because we do not want the pandoc toolchain to become part of the NixOS closure.
Do not edit the DocBook files inside this directory or its subdirectories. Instead, edit the corresponding .md file in the normal manual directories, and run ../md-to-db.sh to update the file here.

View file

@ -0,0 +1,22 @@
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="chap-contributing">
<title>Contributing to this manual</title>
<para>
The DocBook and CommonMark sources of NixOS manual are in the
<link xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual">nixos/doc/manual</link>
subdirectory of the
<link xlink:href="https://github.com/NixOS/nixpkgs">Nixpkgs</link>
repository.
</para>
<para>
You can quickly check your edits with the following:
</para>
<programlisting>
$ cd /path/to/nixpkgs
$ ./nixos/doc/manual/md-to-db.sh
$ nix-build nixos/release.nix -A manual.x86_64-linux
</programlisting>
<para>
If the build succeeds, the manual will be in
<literal>./result/share/doc/nixos/index.html</literal>.
</para>
</chapter>

View file

@ -0,0 +1,33 @@
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-building-cd">
<title>Building Your Own NixOS CD</title>
<para>
Building a NixOS CD is as easy as configuring your own computer. The
idea is to use another module which will replace your
<literal>configuration.nix</literal> to configure the system that
would be installed on the CD.
</para>
<para>
Default CD/DVD configurations are available inside
<literal>nixos/modules/installer/cd-dvd</literal>
</para>
<programlisting>
$ git clone https://github.com/NixOS/nixpkgs.git
$ cd nixpkgs/nixos
$ nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd-dvd/installation-cd-minimal.nix default.nix
</programlisting>
<para>
Before burning your CD/DVD, you can check the content of the image
by mounting anywhere like suggested by the following command:
</para>
<programlisting>
# mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso&lt;/screen&gt;
</programlisting>
<para>
If you want to customize your NixOS CD in more detail, or generate
other kinds of images, you might want to check out
<link xlink:href="https://github.com/nix-community/nixos-generators">nixos-generators</link>.
This can also be a good starting point when you want to use Nix to
build a <quote>minimal</quote> image that doesnt include a NixOS
installation.
</para>
</chapter>

View file

@ -0,0 +1,26 @@
<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="release-21.11">
<title>Release 21.11 (<quote>?</quote>, 2021.11/??)</title>
<para>
In addition to numerous new and upgraded packages, this release has
the following highlights:
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
Support is planned until the end of April 2022, handing over to
22.05.
</para>
</listitem>
</itemizedlist>
<section xml:id="backward-incompatibilities">
<title>Backward incompatibilities</title>
<itemizedlist spacing="compact">
<listitem>
<para>
The <literal>staticjinja</literal> package has been upgraded
from 1.0.4 to 2.0.0
</para>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -46,6 +46,12 @@
to increase the font size.
</para>
<para>
To install over a serial port connect with <literal>115200n8</literal>
(e.g. <command>picocom -b 115200 /dev/ttyUSB0</command>). When the
bootloader lists boot entries, select the serial console boot entry.
</para>
<section xml:id="sec-installation-booting-networking">
<title>Networking in the installer</title>

View file

@ -19,6 +19,6 @@
<xi:include href="./generated/options-db.xml"
xpointer="configuration-variable-list" />
</appendix>
<xi:include href="contributing-to-this-manual.xml" />
<xi:include href="./from_md/contributing-to-this-manual.chapter.xml" />
<xi:include href="release-notes/release-notes.xml" />
</book>

View file

@ -0,0 +1,33 @@
#! /usr/bin/env nix-shell
#! nix-shell -I nixpkgs=channel:nixpkgs-unstable -i bash -p pandoc
# This script is temporarily needed while we transition the manual to
# CommonMark. It converts the .md files in the regular manual folder
# into DocBook files in the from_md folder.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pushd $DIR
OUT="$DIR/from_md"
mapfile -t MD_FILES < <(find . -type f -regex '.*\.md$')
for mf in ${MD_FILES[*]}; do
if [ "${mf: -11}" == ".section.md" ]; then
mkdir -p $(dirname "$OUT/$mf")
pandoc "$mf" -t docbook \
--extract-media=media \
-f markdown+smart \
| cat > "$OUT/${mf%".section.md"}.section.xml"
fi
if [ "${mf: -11}" == ".chapter.md" ]; then
mkdir -p $(dirname "$OUT/$mf")
pandoc "$mf" -t docbook \
--top-level-division=chapter \
--extract-media=media \
-f markdown+smart \
| cat > "$OUT/${mf%".chapter.md"}.chapter.xml"
fi
done
popd

View file

@ -8,6 +8,7 @@
This section lists the release notes for each stable version of NixOS and
current unstable revision.
</para>
<xi:include href="../from_md/release-notes/rl-2111.section.xml" />
<xi:include href="rl-2105.xml" />
<xi:include href="rl-2009.xml" />
<xi:include href="rl-2003.xml" />

View file

@ -100,6 +100,18 @@
Now nginx uses the zlib-ng library by default.
</para>
</listitem>
<listitem>
<para>
KDE Gear (formerly KDE Applications) is upgraded to 21.04, see its
<link xlink:href="https://kde.org/announcements/gear/21.04/">release
notes</link> for details.
</para>
<para>
The <code>kdeApplications</code> package set is now <code>kdeGear</code>,
in keeping with the new name. The old name remains for compatibility, but
it is deprecated.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://libreswan.org/">Libreswan</link> has been updated
@ -195,6 +207,12 @@
The <option>security.hideProcessInformation</option> module has been removed.
It was broken since the switch to cgroups-v2.
</para>
</listitem>
<listitem>
<para>
The <literal>linuxPackages.ati_drivers_x11</literal> kernel modules have been removed.
The drivers only supported kernels prior to 4.2, and thus have become obsolete.
</para>
</listitem>
<listitem>
<para>
@ -377,6 +395,15 @@
which is the new stable release. OpenAFS 1.6 was removed.
</para>
</listitem>
<listitem>
<para>
The WireGuard module gained a new option
<option>networking.wireguard.interfaces.&lt;name&gt;.peers.*.dynamicEndpointRefreshSeconds</option>
that implements refreshing the IP of DNS-based endpoints periodically
(which WireGuard itself
<link xlink:href="https://lists.zx2c4.com/pipermail/wireguard/2017-November/002028.html">cannot do</link>).
</para>
</listitem>
<listitem>
<para>
MariaDB has been updated to 10.5.
@ -409,7 +436,7 @@
</para>
<programlisting>
TMPDIR=$(mktemp -d)
slaptest -f /path/to/slapd.conf $TMPDIR
slaptest -f /path/to/slapd.conf -F $TMPDIR
slapcat -F $TMPDIR -n0 -H 'ldap:///???(!(objectClass=olcSchemaConfig))'
</programlisting>
<para>
@ -1101,6 +1128,19 @@ environment.systemPackages = [
This prevents NVRAM from filling up, which ensures the latest diagnostic data is always stored and alleviates problems with writing new boot configurations.
</para>
</listitem>
<listitem>
<para>
Nixpkgs now contains <link xlink:href="https://github.com/NixOS/nixpkgs/pull/118232">automatically packaged GNOME Shell extensions</link> from the <link xlink:href="https://extensions.gnome.org/">GNOME Extensions</link> portal. You can find them, filed by their UUID, under <literal>gnome38Extensions</literal> attribute for GNOME 3.38 and under <literal>gnome40Extensions</literal> for GNOME 40. Finally, the <literal>gnomeExtensions</literal> attribute contains extensions for the latest GNOME Shell version in Nixpkgs, listed under a more human-friendly name. The unqualified attribute scope also contains manually packaged extensions. Note that the automatically packaged extensions are provided for convenience and are not checked or guaranteed to work.
</para>
</listitem>
<listitem>
<para>
Erlang/OTP versions older than R21 got dropped. We also dropped the cuter package, as it was purely an example of how to build a package.
We also dropped <literal>lfe_1_2</literal> as it could not build with R21+.
Moving forward, we expect to only support 3 yearly releases of OTP.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -0,0 +1,9 @@
# Release 21.11 (“?”, 2021.11/??) {#release-21.11}
In addition to numerous new and upgraded packages, this release has the following highlights:
* Support is planned until the end of April 2022, handing over to 22.05.
## Backward incompatibilities
* The `staticjinja` package has been upgraded from 1.0.4 to 2.0.0

View file

@ -316,8 +316,9 @@ class Machine:
start_command += "-cdrom " + args["cdrom"] + " "
if "usb" in args:
# https://github.com/qemu/qemu/blob/master/docs/usb2.txt
start_command += (
"-device piix3-usb-uhci -drive "
"-device usb-ehci -drive "
+ "id=usbdisk,file="
+ args["usb"]
+ ",if=none,readonly "

View file

@ -63,8 +63,7 @@ in
description = ''
On 64-bit systems, whether to support Direct Rendering for
32-bit applications (such as Wine). This is currently only
supported for the <literal>nvidia</literal> and
<literal>ati_unfree</literal> drivers, as well as
supported for the <literal>nvidia</literal> as well as
<literal>Mesa</literal>.
'';
};

View file

@ -1,40 +0,0 @@
# This module provides the proprietary ATI X11 / OpenGL drivers.
{ config, lib, pkgs, ... }:
with lib;
let
drivers = config.services.xserver.videoDrivers;
enabled = elem "ati_unfree" drivers;
ati_x11 = config.boot.kernelPackages.ati_drivers_x11;
in
{
config = mkIf enabled {
nixpkgs.config.xorg.abiCompat = "1.17";
services.xserver.drivers = singleton
{ name = "fglrx"; modules = [ ati_x11 ]; display = true; };
hardware.opengl.package = ati_x11;
hardware.opengl.package32 = pkgs.pkgsi686Linux.linuxPackages.ati_drivers_x11.override { libsOnly = true; kernel = null; };
hardware.opengl.setLdLibraryPath = true;
environment.systemPackages = [ ati_x11 ];
boot.extraModulePackages = [ ati_x11 ];
boot.blacklistedKernelModules = [ "radeon" ];
environment.etc.ati.source = "${ati_x11}/etc/ati";
};
}

View file

@ -24,6 +24,6 @@ in
};
meta = {
maintainers = with maintainers; [ metadark ];
maintainers = with maintainers; [ kira-bruneau ];
};
}

View file

@ -182,13 +182,29 @@ let
# Menu configuration
#
# Search using a "marker file"
search --set=root --file /EFI/nixos-installer-image
insmod gfxterm
insmod png
set gfxpayload=keep
set gfxmode=${concatStringsSep "," [
# GRUB will use the first valid mode listed here.
# `auto` will sometimes choose the smallest valid mode it detects.
# So instead we'll list a lot of possibly valid modes :/
#"3840x2160"
#"2560x1440"
"1920x1080"
"1366x768"
"1280x720"
"1024x768"
"800x600"
"auto"
]}
# Fonts can be loaded?
# (This font is assumed to always be provided as a fallback by NixOS)
if loadfont /EFI/boot/unicode.pf2; then
if loadfont (\$root)/EFI/boot/unicode.pf2; then
set with_fonts=true
fi
if [ "\$textmode" != "true" -a "\$with_fonts" == "true" ]; then
@ -212,11 +228,11 @@ let
${ # When there is a theme configured, use it, otherwise use the background image.
if config.isoImage.grubTheme != null then ''
# Sets theme.
set theme=/EFI/boot/grub-theme/theme.txt
set theme=(\$root)/EFI/boot/grub-theme/theme.txt
# Load theme fonts
$(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont /EFI/boot/grub-theme/%P\n")
$(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
'' else ''
if background_image /EFI/boot/efi-background.png; then
if background_image (\$root)/EFI/boot/efi-background.png; then
# Black background means transparent background when there
# is a background image set... This seems undocumented :(
set color_normal=black/black
@ -239,6 +255,9 @@ let
} ''
mkdir -p $out/EFI/boot/
# Add a marker so GRUB can find the filesystem.
touch $out/EFI/nixos-installer-image
# ALWAYS required modules.
MODULES="fat iso9660 part_gpt part_msdos \
normal boot linux configfile loopback chain halt \
@ -294,12 +313,12 @@ let
${grubMenuCfg}
hiddenentry 'Text mode' --hotkey 't' {
loadfont /EFI/boot/unicode.pf2
loadfont (\$root)/EFI/boot/unicode.pf2
set textmode=true
terminal_output gfxterm console
}
hiddenentry 'GUI mode' --hotkey 'g' {
$(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont /EFI/boot/grub-theme/%P\n")
$(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
set textmode=false
terminal_output gfxterm
}
@ -370,8 +389,10 @@ let
${lib.optionalString (refindBinary != null) ''
# GRUB apparently cannot do "chainloader" operations on "CD".
if [ "\$root" != "cd0" ]; then
# Force root to be the FAT partition
# Otherwise it breaks rEFInd's boot
search --set=root --no-floppy --fs-uuid 1234-5678
menuentry 'rEFInd' --class refind {
# \$root defaults to the drive the EFI is found on.
chainloader (\$root)/EFI/boot/${refindBinary}
}
fi
@ -403,7 +424,9 @@ let
mkdir ./boot
cp -p "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}" \
"${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}" ./boot/
touch --date=@0 ./EFI ./boot
# Rewrite dates for everything in the FS
find . -exec touch --date=2000-01-01 {} +
usage_size=$(du -sb --apparent-size . | tr -cd '[:digit:]')
# Make the image 110% as big as the files need to make up for FAT overhead

View file

@ -76,7 +76,6 @@
./hardware/wooting.nix
./hardware/uinput.nix
./hardware/video/amdgpu-pro.nix
./hardware/video/ati.nix
./hardware/video/capture/mwprocapture.nix
./hardware/video/bumblebee.nix
./hardware/video/displaylink.nix
@ -239,6 +238,7 @@
./services/amqp/activemq/default.nix
./services/amqp/rabbitmq.nix
./services/audio/alsa.nix
./services/audio/botamusique.nix
./services/audio/jack.nix
./services/audio/icecast.nix
./services/audio/jmusicbot.nix
@ -806,6 +806,7 @@
./services/networking/smartdns.nix
./services/networking/smokeping.nix
./services/networking/softether.nix
./services/networking/solanum.nix
./services/networking/spacecookie.nix
./services/networking/spiped.nix
./services/networking/squid.nix

View file

@ -19,6 +19,7 @@ in
package = mkOption {
type = types.package;
default = pkgs.atop;
defaultText = "pkgs.atop";
description = ''
Which package to use for Atop.
'';
@ -36,6 +37,7 @@ in
package = mkOption {
type = types.package;
default = config.boot.kernelPackages.netatop;
defaultText = "config.boot.kernelPackages.netatop";
description = ''
Which package to use for netatop.
'';

View file

@ -31,6 +31,6 @@ in
};
meta = {
maintainers = with maintainers; [ metadark ];
maintainers = with maintainers; [ kira-bruneau ];
};
}

View file

@ -0,0 +1,114 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.botamusique;
format = pkgs.formats.ini {};
configFile = format.generate "botamusique.ini" cfg.settings;
in
{
meta.maintainers = with lib.maintainers; [ hexa ];
options.services.botamusique = {
enable = mkEnableOption "botamusique, a bot to play audio streams on mumble";
package = mkOption {
type = types.package;
default = pkgs.botamusique;
description = "The botamusique package to use.";
};
settings = mkOption {
type = with types; submodule {
freeformType = format.type;
options = {
server.host = mkOption {
type = types.str;
default = "localhost";
example = "mumble.example.com";
description = "Hostname of the mumble server to connect to.";
};
server.port = mkOption {
type = types.port;
default = 64738;
description = "Port of the mumble server to connect to.";
};
bot.username = mkOption {
type = types.str;
default = "botamusique";
description = "Name the bot should appear with.";
};
bot.comment = mkOption {
type = types.str;
default = "Hi, I'm here to play radio, local music or youtube/soundcloud music. Have fun!";
description = "Comment displayed for the bot.";
};
};
};
default = {};
description = ''
Your <filename>configuration.ini</filename> as a Nix attribute set. Look up
possible options in the <link xlink:href="https://github.com/azlux/botamusique/blob/master/configuration.example.ini">configuration.example.ini</link>.
'';
};
};
config = mkIf cfg.enable {
systemd.services.botamusique = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
unitConfig.Documentation = "https://github.com/azlux/botamusique/wiki";
environment.HOME = "/var/lib/botamusique";
serviceConfig = {
ExecStart = "${cfg.package}/bin/botamusique --config ${configFile}";
Restart = "always"; # the bot exits when the server connection is lost
# Hardening
CapabilityBoundingSet = [ "" ];
DynamicUser = true;
IPAddressDeny = [
"link-local"
"multicast"
];
LockPersonality = true;
MemoryDenyWriteExecute = true;
ProcSubset = "pid";
PrivateDevices = true;
PrivateUsers = true;
PrivateTmp = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
StateDirectory = "botamusique";
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"~@privileged"
"~@resources"
];
UMask = "0077";
WorkingDirectory = "/var/lib/botamusique";
};
};
};
}

View file

@ -197,14 +197,14 @@ in {
])) (attrValues cfg.commands);
after = [ "zfs.target" ];
serviceConfig = {
ExecStartPre = (map (pool: lib.escapeShellArgs [
ExecStartPre = let
allowCmd = permissions: pool: lib.escapeShellArgs [
"+/run/booted-system/sw/bin/zfs" "allow"
cfg.user "hold,send" pool
]) (getPools "source")) ++
(map (pool: lib.escapeShellArgs [
"+/run/booted-system/sw/bin/zfs" "allow"
cfg.user "create,mount,receive,rollback" pool
]) (getPools "target"));
cfg.user (concatStringsSep "," permissions) pool
];
in
(map (allowCmd [ "hold" "send" "snapshot" "destroy" ]) (getPools "source")) ++
(map (allowCmd [ "create" "mount" "receive" "rollback" ]) (getPools "target"));
User = cfg.user;
Group = cfg.group;
};

View file

@ -48,8 +48,9 @@ let
cluster = "local";
user = name;
};
current-context = "local";
name = "local";
}];
current-context = "local";
});
caCert = secret "ca";

View file

@ -43,17 +43,15 @@ in
enable = mkEnableOption "the Firebird super server";
package = mkOption {
default = pkgs.firebirdSuper;
defaultText = "pkgs.firebirdSuper";
default = pkgs.firebird;
defaultText = "pkgs.firebird";
type = types.package;
/*
Example: <code>package = pkgs.firebirdSuper.override { icu =
pkgs.icu; };</code> which is not recommended for compatibility
reasons. See comments at the firebirdSuper derivation
*/
example = ''
<code>package = pkgs.firebird_3;</code>
'';
description = ''
Which firebird derivation to use.
Which Firebird package to be installed: <code>pkgs.firebird_3</code>
For SuperServer use override: <code>pkgs.firebird_3.override { superServer = true; };</code>
'';
};
@ -74,7 +72,7 @@ in
};
baseDir = mkOption {
default = "/var/db/firebird"; # ubuntu is using /var/lib/firebird/2.1/data/.. ?
default = "/var/lib/firebird";
type = types.str;
description = ''
Location containing data/ and system/ directories.
@ -111,6 +109,14 @@ in
cp ${firebird}/security2.fdb "${systemDir}"
fi
if ! test -e "${systemDir}/security3.fdb"; then
cp ${firebird}/security3.fdb "${systemDir}"
fi
if ! test -e "${systemDir}/security4.fdb"; then
cp ${firebird}/security4.fdb "${systemDir}"
fi
chmod -R 700 "${dataDir}" "${systemDir}" /var/log/firebird
'';

View file

@ -13,13 +13,12 @@ in {
};
config = mkIf cfg.enable {
systemd.services.spacenavd = {
systemd.user.services.spacenavd = {
description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion";
after = [ "syslog.target" ];
wantedBy = [ "graphical.target" ];
serviceConfig = {
ExecStart = "${pkgs.spacenavd}/bin/spacenavd -d -l syslog";
StandardError = "syslog";
};
};
};

View file

@ -31,11 +31,11 @@
<link linkend="opt-services.mailman.enable">enable</link> = true;
<link linkend="opt-services.mailman.serve.enable">serve.enable</link> = true;
<link linkend="opt-services.mailman.hyperkitty.enable">hyperkitty.enable</link> = true;
<link linkend="opt-services.mailman.hyperkitty.enable">webHosts</link> = ["lists.example.org"];
<link linkend="opt-services.mailman.hyperkitty.enable">siteOwner</link> = "mailman@example.org";
<link linkend="opt-services.mailman.webHosts">webHosts</link> = ["lists.example.org"];
<link linkend="opt-services.mailman.siteOwner">siteOwner</link> = "mailman@example.org";
};
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">services.nginx.virtualHosts."lists.example.org".enableACME</link> = true;
<link linkend="opt-services.mailman.hyperkitty.enable">networking.firewall.allowedTCPPorts</link> = [ 25 80 443 ];
<link linkend="opt-networking.firewall.allowedTCPPorts">networking.firewall.allowedTCPPorts</link> = [ 25 80 443 ];
}</programlisting>
</para>
<para>

View file

@ -726,6 +726,10 @@ in {
User = "matrix-synapse";
Group = "matrix-synapse";
WorkingDirectory = cfg.dataDir;
ExecStartPre = [ ("+" + (pkgs.writeShellScript "matrix-synapse-fix-permissions" ''
chown matrix-synapse:matrix-synapse ${cfg.dataDir}/homeserver.signing.key
chmod 0600 ${cfg.dataDir}/homeserver.signing.key
'')) ];
ExecStart = ''
${cfg.package}/bin/homeserver \
${ concatMapStringsSep "\n " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
@ -733,6 +737,7 @@ in {
'';
ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
Restart = "on-failure";
UMask = "0077";
};
};
};

View file

@ -386,6 +386,10 @@ let
List of relabel configurations.
'';
metric_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
List of metric relabel configurations.
'';
sample_limit = mkDefOpt types.int "0" ''
Per-scrape limit on number of scraped samples that will be accepted.
If more than this number of samples are present after metric relabelling

View file

@ -48,6 +48,7 @@ let
"node"
"openldap"
"openvpn"
"pihole"
"postfix"
"postgres"
"py-air-control"

View file

@ -0,0 +1,74 @@
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.pihole;
in
{
port = 9617;
extraOpts = {
apiToken = mkOption {
type = types.str;
default = "";
example = "580a770cb40511eb85290242ac130003580a770cb40511eb85290242ac130003";
description = ''
pi-hole API token which can be used instead of a password
'';
};
interval = mkOption {
type = types.str;
default = "10s";
example = "30s";
description = ''
How often to scrape new data
'';
};
password = mkOption {
type = types.str;
default = "";
example = "password";
description = ''
The password to login into pihole. An api token can be used instead.
'';
};
piholeHostname = mkOption {
type = types.str;
default = "pihole";
example = "127.0.0.1";
description = ''
Hostname or address where to find the pihole webinterface
'';
};
piholePort = mkOption {
type = types.port;
default = "80";
example = "443";
description = ''
The port pihole webinterface is reachable on
'';
};
protocol = mkOption {
type = types.enum [ "http" "https" ];
default = "http";
example = "https";
description = ''
The protocol which is used to connect to pihole
'';
};
};
serviceOpts = {
serviceConfig = {
ExecStart = ''
${pkgs.bash}/bin/bash -c "${pkgs.prometheus-pihole-exporter}/bin/pihole-exporter \
-interval ${cfg.interval} \
${optionalString (cfg.apiToken != "") "-pihole_api_token ${cfg.apiToken}"} \
-pihole_hostname ${cfg.piholeHostname} \
${optionalString (cfg.password != "") "-pihole_password ${cfg.password}"} \
-pihole_port ${toString cfg.piholePort} \
-pihole_protocol ${cfg.protocol} \
-port ${toString cfg.port}"
'';
};
};
}

View file

@ -1,4 +1,4 @@
#! @shell@ -e
#! @runtimeShell@ -e
# skip this if there are no modems at all
if ! stat -t "@spoolAreaPath@"/etc/config.* >/dev/null 2>&1

View file

@ -3,7 +3,7 @@
let
inherit (lib.options) literalExample mkEnableOption mkOption;
inherit (lib.types) bool enum int lines attrsOf nullOr path str submodule;
inherit (lib.types) bool enum ints lines attrsOf nullOr path str submodule;
inherit (lib.modules) mkDefault mkIf mkMerge;
commonDescr = ''
@ -18,7 +18,6 @@ let
'';
str1 = lib.types.addCheck str (s: s!=""); # non-empty string
int1 = lib.types.addCheck int (i: i>0); # positive integer
configAttrType =
# Options in HylaFAX configuration files can be
@ -27,7 +26,7 @@ let
# This type definition resolves all
# those types into a list of strings.
let
inherit (lib.types) attrsOf coercedTo listOf;
inherit (lib.types) attrsOf coercedTo int listOf;
innerType = coercedTo bool (x: if x then "Yes" else "No")
(coercedTo int (toString) str);
in
@ -290,7 +289,7 @@ in
'';
};
faxcron.infoDays = mkOption {
type = int1;
type = ints.positive;
default = 30;
description = ''
Set the expiration time for data in the
@ -298,7 +297,7 @@ in
'';
};
faxcron.logDays = mkOption {
type = int1;
type = ints.positive;
default = 30;
description = ''
Set the expiration time for
@ -306,7 +305,7 @@ in
'';
};
faxcron.rcvDays = mkOption {
type = int1;
type = ints.positive;
default = 7;
description = ''
Set the expiration time for files in
@ -343,7 +342,7 @@ in
'';
};
faxqclean.doneqMinutes = mkOption {
type = int1;
type = ints.positive;
default = 15;
example = literalExample "24*60";
description = ''
@ -353,7 +352,7 @@ in
'';
};
faxqclean.docqMinutes = mkOption {
type = int1;
type = ints.positive;
default = 60;
example = literalExample "24*60";
description = ''

View file

@ -1,4 +1,4 @@
#! @shell@ -e
#! @runtimeShell@ -e
# The following lines create/update the HylaFAX spool directory:
# Subdirectories/files with persistent data are kept,
@ -80,7 +80,7 @@ touch clientlog faxcron.lastrun xferfaxlog
chown @faxuser@:@faxgroup@ clientlog faxcron.lastrun xferfaxlog
# create symlinks for frozen directories/files
lnsym --target-directory=. "@hylafax@"/spool/{COPYRIGHT,bin,config}
lnsym --target-directory=. "@hylafaxplus@"/spool/{COPYRIGHT,bin,config}
# create empty temporary directories
update --mode=0700 -d client dev status
@ -93,7 +93,7 @@ install -d "@spoolAreaPath@/etc"
cd "@spoolAreaPath@/etc"
# create symlinks to all files in template's etc
lnsym --target-directory=. "@hylafax@/spool/etc"/*
lnsym --target-directory=. "@hylafaxplus@/spool/etc"/*
# set LOCKDIR in setup.cache
sed --regexp-extended 's|^(UUCP_LOCKDIR=).*$|\1'"'@lockPath@'|g" --in-place setup.cache

View file

@ -13,11 +13,10 @@ let
# creates hylafax config file,
# makes sure "Include" is listed *first*
let
mkLines = conf:
(lib.concatLists
(lib.flip lib.mapAttrsToList conf
(k: map (v: "${k}: ${v}")
)));
mkLines = lib.flip lib.pipe [
(lib.mapAttrsToList (key: map (val: "${key}: ${val}")))
lib.concatLists
];
include = mkLines { Include = conf.Include or []; };
other = mkLines ( conf // { Include = []; } );
in
@ -48,13 +47,12 @@ let
name = "hylafax-setup-spool.sh";
src = ./spool.sh;
isExecutable = true;
inherit (pkgs.stdenv) shell;
hylafax = pkgs.hylafaxplus;
faxuser = "uucp";
faxgroup = "uucp";
lockPath = "/var/lock";
inherit globalConfigPath modemConfigPath;
inherit (cfg) sendmailPath spoolAreaPath userAccessFile;
inherit (pkgs) hylafaxplus runtimeShell;
};
waitFaxqScript = pkgs.substituteAll {
@ -64,8 +62,8 @@ let
src = ./faxq-wait.sh;
isExecutable = true;
timeoutSec = toString 10;
inherit (pkgs.stdenv) shell;
inherit (cfg) spoolAreaPath;
inherit (pkgs) runtimeShell;
};
sockets.hylafax-hfaxd = {
@ -108,8 +106,10 @@ let
PrivateDevices = true; # breaks /dev/tty...
PrivateNetwork = true;
PrivateTmp = true;
#ProtectClock = true; # breaks /dev/tty... (why?)
ProtectControlGroups = true;
#ProtectHome = true; # breaks custom spool dirs
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
#ProtectSystem = "strict"; # breaks custom spool dirs

View file

@ -115,6 +115,8 @@ in {
config = mkIf cfg.enable {
environment.etc."knot-resolver/kresd.conf".source = configFile; # not required
networking.resolvconf.useLocalResolver = mkDefault true;
users.users.knot-resolver =
{ isSystemUser = true;
group = "knot-resolver";

View file

@ -0,0 +1,101 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkEnableOption mkIf mkOption types;
inherit (pkgs) solanum;
cfg = config.services.solanum;
configFile = pkgs.writeText "solanum.conf" cfg.config;
in
{
###### interface
options = {
services.solanum = {
enable = mkEnableOption "Solanum IRC daemon";
config = mkOption {
type = types.str;
default = ''
serverinfo {
name = "irc.example.com";
sid = "1ix";
description = "irc!";
vhost = "0.0.0.0";
vhost6 = "::";
};
listen {
host = "0.0.0.0";
port = 6667;
};
auth {
user = "*@*";
class = "users";
flags = exceed_limit;
};
channel {
default_split_user_count = 0;
};
'';
description = ''
Solanum IRC daemon configuration file.
check <link xlink:href="https://github.com/solanum-ircd/solanum/blob/main/doc/reference.conf"/> for all options.
'';
};
openFilesLimit = mkOption {
type = types.int;
default = 1024;
description = ''
Maximum number of open files. Limits the clients and server connections.
'';
};
motd = mkOption {
type = types.nullOr types.lines;
default = null;
description = ''
Solanum MOTD text.
Solanum will read its MOTD from <literal>/etc/solanum/ircd.motd</literal>.
If set, the value of this option will be written to this path.
'';
};
};
};
###### implementation
config = mkIf cfg.enable (lib.mkMerge [
{
systemd.services.solanum = {
description = "Solanum IRC daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = "${solanum}/bin/solanum -foreground -logfile /dev/stdout -configfile ${configFile} -pidfile /run/solanum/ircd.pid";
DynamicUser = true;
User = "solanum";
StateDirectory = "solanum";
RuntimeDirectory = "solanum";
LimitNOFILE = "${toString cfg.openFilesLimit}";
};
};
}
(mkIf (cfg.motd != null) {
environment.etc."solanum/ircd.motd".text = cfg.motd;
})
]);
}

View file

@ -198,7 +198,32 @@ let
example = "demo.wireguard.io:12913";
type = with types; nullOr str;
description = ''Endpoint IP or hostname of the peer, followed by a colon,
and then a port number of the peer.'';
and then a port number of the peer.
Warning for endpoints with changing IPs:
The WireGuard kernel side cannot perform DNS resolution.
Thus DNS resolution is done once by the <literal>wg</literal> userspace
utility, when setting up WireGuard. Consequently, if the IP address
behind the name changes, WireGuard will not notice.
This is especially common for dynamic-DNS setups, but also applies to
any other DNS-based setup.
If you do not use IP endpoints, you likely want to set
<option>networking.wireguard.dynamicEndpointRefreshSeconds</option>
to refresh the IPs periodically.
'';
};
dynamicEndpointRefreshSeconds = mkOption {
default = 0;
example = 5;
type = with types; int;
description = ''
Periodically re-execute the <literal>wg</literal> utility every
this many seconds in order to let WireGuard notice DNS / hostname
changes.
Setting this to <literal>0</literal> disables periodic reexecution.
'';
};
persistentKeepalive = mkOption {
@ -259,12 +284,18 @@ let
'';
};
generatePeerUnit = { interfaceName, interfaceCfg, peer }:
peerUnitServiceName = interfaceName: publicKey: dynamicRefreshEnabled:
let
keyToUnitName = replaceChars
[ "/" "-" " " "+" "=" ]
[ "-" "\\x2d" "\\x20" "\\x2b" "\\x3d" ];
unitName = keyToUnitName peer.publicKey;
unitName = keyToUnitName publicKey;
refreshSuffix = optionalString dynamicRefreshEnabled "-refresh";
in
"wireguard-${interfaceName}-peer-${unitName}${refreshSuffix}";
generatePeerUnit = { interfaceName, interfaceCfg, peer }:
let
psk =
if peer.presharedKey != null
then pkgs.writeText "wg-psk" peer.presharedKey
@ -273,7 +304,12 @@ let
dst = interfaceCfg.interfaceNamespace;
ip = nsWrap "ip" src dst;
wg = nsWrap "wg" src dst;
in nameValuePair "wireguard-${interfaceName}-peer-${unitName}"
dynamicRefreshEnabled = peer.dynamicEndpointRefreshSeconds != 0;
# We generate a different name (a `-refresh` suffix) when `dynamicEndpointRefreshSeconds`
# to avoid that the same service switches `Type` (`oneshot` vs `simple`),
# with the intent to make scripting more obvious.
serviceName = peerUnitServiceName interfaceName peer.publicKey dynamicRefreshEnabled;
in nameValuePair serviceName
{
description = "WireGuard Peer - ${interfaceName} - ${peer.publicKey}";
requires = [ "wireguard-${interfaceName}.service" ];
@ -283,36 +319,59 @@ let
environment.WG_ENDPOINT_RESOLUTION_RETRIES = "infinity";
path = with pkgs; [ iproute2 wireguard-tools ];
serviceConfig = {
serviceConfig =
if !dynamicRefreshEnabled
then
{
Type = "oneshot";
RemainAfterExit = true;
}
else
{
Type = "simple"; # re-executes 'wg' indefinitely
# Note that `Type = "oneshot"` services with `RemainAfterExit = true`
# cannot be used with systemd timers (see `man systemd.timer`),
# which is why `simple` with a loop is the best choice here.
# It also makes starting and stopping easiest.
};
script = let
wg_setup = "${wg} set ${interfaceName} peer ${peer.publicKey}" +
optionalString (psk != null) " preshared-key ${psk}" +
optionalString (peer.endpoint != null) " endpoint ${peer.endpoint}" +
optionalString (peer.persistentKeepalive != null) " persistent-keepalive ${toString peer.persistentKeepalive}" +
optionalString (peer.allowedIPs != []) " allowed-ips ${concatStringsSep "," peer.allowedIPs}";
wg_setup = concatStringsSep " " (
[ ''${wg} set ${interfaceName} peer "${peer.publicKey}"'' ]
++ optional (psk != null) ''preshared-key "${psk}"''
++ optional (peer.endpoint != null) ''endpoint "${peer.endpoint}"''
++ optional (peer.persistentKeepalive != null) ''persistent-keepalive "${toString peer.persistentKeepalive}"''
++ optional (peer.allowedIPs != []) ''allowed-ips "${concatStringsSep "," peer.allowedIPs}"''
);
route_setup =
optionalString interfaceCfg.allowedIPsAsRoutes
(concatMapStringsSep "\n"
(allowedIP:
"${ip} route replace ${allowedIP} dev ${interfaceName} table ${interfaceCfg.table}"
''${ip} route replace "${allowedIP}" dev "${interfaceName}" table "${interfaceCfg.table}"''
) peer.allowedIPs);
in ''
${wg_setup}
${route_setup}
${optionalString (peer.dynamicEndpointRefreshSeconds != 0) ''
# Re-execute 'wg' periodically to notice DNS / hostname changes.
# Note this will not time out on transient DNS failures such as DNS names
# because we have set 'WG_ENDPOINT_RESOLUTION_RETRIES=infinity'.
# Also note that 'wg' limits its maximum retry delay to 20 seconds as of writing.
while ${wg_setup}; do
sleep "${toString peer.dynamicEndpointRefreshSeconds}";
done
''}
'';
postStop = let
route_destroy = optionalString interfaceCfg.allowedIPsAsRoutes
(concatMapStringsSep "\n"
(allowedIP:
"${ip} route delete ${allowedIP} dev ${interfaceName} table ${interfaceCfg.table}"
''${ip} route delete "${allowedIP}" dev "${interfaceName}" table "${interfaceCfg.table}"''
) peer.allowedIPs);
in ''
${wg} set ${interfaceName} peer ${peer.publicKey} remove
${wg} set "${interfaceName}" peer "${peer.publicKey}" remove
${route_destroy}
'';
};
@ -348,23 +407,25 @@ let
${values.preSetup}
${ipPreMove} link add dev ${name} type wireguard
${optionalString (values.interfaceNamespace != null && values.interfaceNamespace != values.socketNamespace) "${ipPreMove} link set ${name} netns ${ns}"}
${ipPreMove} link add dev "${name}" type wireguard
${optionalString (values.interfaceNamespace != null && values.interfaceNamespace != values.socketNamespace) ''${ipPreMove} link set "${name}" netns "${ns}"''}
${concatMapStringsSep "\n" (ip:
"${ipPostMove} address add ${ip} dev ${name}"
''${ipPostMove} address add "${ip}" dev "${name}"''
) values.ips}
${wg} set ${name} private-key ${privKey} ${
optionalString (values.listenPort != null) " listen-port ${toString values.listenPort}"}
${concatStringsSep " " (
[ ''${wg} set "${name}" private-key "${privKey}"'' ]
++ optional (values.listenPort != null) ''listen-port "${toString values.listenPort}"''
)}
${ipPostMove} link set up dev ${name}
${ipPostMove} link set up dev "${name}"
${values.postSetup}
'';
postStop = ''
${ipPostMove} link del dev ${name}
${ipPostMove} link del dev "${name}"
${values.postShutdown}
'';
};
@ -374,7 +435,7 @@ let
nsList = filter (ns: ns != null) [ src dst ];
ns = last nsList;
in
if (length nsList > 0 && ns != "init") then "ip netns exec ${ns} ${cmd}" else cmd;
if (length nsList > 0 && ns != "init") then ''ip netns exec "${ns}" "${cmd}"'' else cmd;
in
{

View file

@ -299,9 +299,8 @@ in
# Ensure essential files exist.
if [[ ! -f ${cfg.dataDir}/configs/znc.conf ]]; then
echo "No znc.conf file found in ${cfg.dataDir}. Creating one now."
cp --no-clobber ${cfg.configFile} ${cfg.dataDir}/configs/znc.conf
cp --no-preserve=ownership --no-clobber ${cfg.configFile} ${cfg.dataDir}/configs/znc.conf
chmod u+rw ${cfg.dataDir}/configs/znc.conf
chown ${cfg.user} ${cfg.dataDir}/configs/znc.conf
fi
if [[ ! -f ${cfg.dataDir}/znc.pem ]]; then

View file

@ -121,7 +121,6 @@ in {
EnvironmentFile = [ configFile ] ++ optional (cfg.environmentFile != null) cfg.environmentFile;
ExecStart = "${bitwarden_rs}/bin/bitwarden_rs";
LimitNOFILE = "1048576";
LimitNPROC = "64";
PrivateTmp = "true";
PrivateDevices = "true";
ProtectHome = "true";

View file

@ -54,6 +54,7 @@ in
frontendUrl = lib.mkOption {
type = lib.types.str;
apply = x: if lib.hasSuffix "/" x then x else x + "/";
example = "keycloak.example.com/auth";
description = ''
The public URL used as base for all frontend requests. Should
@ -84,20 +85,34 @@ in
'';
};
certificatePrivateKeyBundle = lib.mkOption {
sslCertificate = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/ssl_cert";
description = ''
The path to a PEM formatted bundle of the private key and
certificate to use for TLS connections.
The path to a PEM formatted certificate to use for TLS/SSL
connections.
This should be a string, not a Nix path, since Nix paths are
copied into the world-readable Nix store.
'';
};
databaseType = lib.mkOption {
sslCertificateKey = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
example = "/run/keys/ssl_key";
description = ''
The path to a PEM formatted private key to use for TLS/SSL
connections.
This should be a string, not a Nix path, since Nix paths are
copied into the world-readable Nix store.
'';
};
database = {
type = lib.mkOption {
type = lib.types.enum [ "mysql" "postgresql" ];
default = "postgresql";
example = "mysql";
@ -106,7 +121,7 @@ in
'';
};
databaseHost = lib.mkOption {
host = lib.mkOption {
type = lib.types.str;
default = "localhost";
description = ''
@ -114,7 +129,7 @@ in
'';
};
databasePort =
port =
let
dbPorts = {
postgresql = 5432;
@ -123,22 +138,22 @@ in
in
lib.mkOption {
type = lib.types.port;
default = dbPorts.${cfg.databaseType};
default = dbPorts.${cfg.database.type};
description = ''
Port of the database to connect to.
'';
};
databaseUseSSL = lib.mkOption {
useSSL = lib.mkOption {
type = lib.types.bool;
default = cfg.databaseHost != "localhost";
default = cfg.database.host != "localhost";
description = ''
Whether the database connection should be secured by SSL /
TLS.
'';
};
databaseCaCert = lib.mkOption {
caCert = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = ''
@ -153,18 +168,18 @@ in
'';
};
databaseCreateLocally = lib.mkOption {
createLocally = lib.mkOption {
type = lib.types.bool;
default = true;
description = ''
Whether a database should be automatically created on the
local host. Set this to false if you plan on provisioning a
local database yourself. This has no effect if
services.keycloak.databaseHost is customized.
services.keycloak.database.host is customized.
'';
};
databaseUsername = lib.mkOption {
username = lib.mkOption {
type = lib.types.str;
default = "keycloak";
description = ''
@ -173,14 +188,14 @@ in
automatically provisioned.
To use this with a local database, set <xref
linkend="opt-services.keycloak.databaseCreateLocally" /> to
linkend="opt-services.keycloak.database.createLocally" /> to
<literal>false</literal> and create the database and user
manually. The database should be called
<literal>keycloak</literal>.
'';
};
databasePasswordFile = lib.mkOption {
passwordFile = lib.mkOption {
type = lib.types.path;
example = "/run/keys/db_password";
description = ''
@ -190,6 +205,7 @@ in
copied into the world-readable Nix store.
'';
};
};
package = lib.mkOption {
type = lib.types.package;
@ -261,12 +277,12 @@ in
config =
let
# We only want to create a database if we're actually going to connect to it.
databaseActuallyCreateLocally = cfg.databaseCreateLocally && cfg.databaseHost == "localhost";
createLocalPostgreSQL = databaseActuallyCreateLocally && cfg.databaseType == "postgresql";
createLocalMySQL = databaseActuallyCreateLocally && cfg.databaseType == "mysql";
databaseActuallyCreateLocally = cfg.database.createLocally && cfg.database.host == "localhost";
createLocalPostgreSQL = databaseActuallyCreateLocally && cfg.database.type == "postgresql";
createLocalMySQL = databaseActuallyCreateLocally && cfg.database.type == "mysql";
mySqlCaKeystore = pkgs.runCommandNoCC "mysql-ca-keystore" {} ''
${pkgs.jre}/bin/keytool -importcert -trustcacerts -alias MySQLCACert -file ${cfg.databaseCaCert} -keystore $out -storepass notsosecretpassword -noprompt
${pkgs.jre}/bin/keytool -importcert -trustcacerts -alias MySQLCACert -file ${cfg.database.caCert} -keystore $out -storepass notsosecretpassword -noprompt
'';
keycloakConfig' = builtins.foldl' lib.recursiveUpdate {
@ -282,11 +298,11 @@ in
};
"subsystem=datasources"."data-source=KeycloakDS" = {
max-pool-size = "20";
user-name = if databaseActuallyCreateLocally then "keycloak" else cfg.databaseUsername;
user-name = if databaseActuallyCreateLocally then "keycloak" else cfg.database.username;
password = "@db-password@";
};
} [
(lib.optionalAttrs (cfg.databaseType == "postgresql") {
(lib.optionalAttrs (cfg.database.type == "postgresql") {
"subsystem=datasources" = {
"jdbc-driver=postgresql" = {
driver-module-name = "org.postgresql";
@ -294,16 +310,16 @@ in
driver-xa-datasource-class-name = "org.postgresql.xa.PGXADataSource";
};
"data-source=KeycloakDS" = {
connection-url = "jdbc:postgresql://${cfg.databaseHost}:${builtins.toString cfg.databasePort}/keycloak";
connection-url = "jdbc:postgresql://${cfg.database.host}:${builtins.toString cfg.database.port}/keycloak";
driver-name = "postgresql";
"connection-properties=ssl".value = lib.boolToString cfg.databaseUseSSL;
} // (lib.optionalAttrs (cfg.databaseCaCert != null) {
"connection-properties=sslrootcert".value = cfg.databaseCaCert;
"connection-properties=ssl".value = lib.boolToString cfg.database.useSSL;
} // (lib.optionalAttrs (cfg.database.caCert != null) {
"connection-properties=sslrootcert".value = cfg.database.caCert;
"connection-properties=sslmode".value = "verify-ca";
});
};
})
(lib.optionalAttrs (cfg.databaseType == "mysql") {
(lib.optionalAttrs (cfg.database.type == "mysql") {
"subsystem=datasources" = {
"jdbc-driver=mysql" = {
driver-module-name = "com.mysql";
@ -311,22 +327,22 @@ in
driver-class-name = "com.mysql.jdbc.Driver";
};
"data-source=KeycloakDS" = {
connection-url = "jdbc:mysql://${cfg.databaseHost}:${builtins.toString cfg.databasePort}/keycloak";
connection-url = "jdbc:mysql://${cfg.database.host}:${builtins.toString cfg.database.port}/keycloak";
driver-name = "mysql";
"connection-properties=useSSL".value = lib.boolToString cfg.databaseUseSSL;
"connection-properties=requireSSL".value = lib.boolToString cfg.databaseUseSSL;
"connection-properties=verifyServerCertificate".value = lib.boolToString cfg.databaseUseSSL;
"connection-properties=useSSL".value = lib.boolToString cfg.database.useSSL;
"connection-properties=requireSSL".value = lib.boolToString cfg.database.useSSL;
"connection-properties=verifyServerCertificate".value = lib.boolToString cfg.database.useSSL;
"connection-properties=characterEncoding".value = "UTF-8";
valid-connection-checker-class-name = "org.jboss.jca.adapters.jdbc.extensions.mysql.MySQLValidConnectionChecker";
validate-on-match = true;
exception-sorter-class-name = "org.jboss.jca.adapters.jdbc.extensions.mysql.MySQLExceptionSorter";
} // (lib.optionalAttrs (cfg.databaseCaCert != null) {
} // (lib.optionalAttrs (cfg.database.caCert != null) {
"connection-properties=trustCertificateKeyStoreUrl".value = "file:${mySqlCaKeystore}";
"connection-properties=trustCertificateKeyStorePassword".value = "notsosecretpassword";
});
};
})
(lib.optionalAttrs (cfg.certificatePrivateKeyBundle != null) {
(lib.optionalAttrs (cfg.sslCertificate != null && cfg.sslCertificateKey != null) {
"socket-binding-group=standard-sockets"."socket-binding=https".port = cfg.httpsPort;
"core-service=management"."security-realm=UndertowRealm"."server-identity=ssl" = {
keystore-path = "/run/keycloak/ssl/certificate_private_key_bundle.p12";
@ -537,7 +553,9 @@ in
jbossCliScript = pkgs.writeText "jboss-cli-script" (mkJbossScript keycloakConfig');
keycloakConfig = pkgs.runCommandNoCC "keycloak-config" {} ''
keycloakConfig = pkgs.runCommandNoCC "keycloak-config" {
nativeBuildInputs = [ cfg.package ];
} ''
export JBOSS_BASE_DIR="$(pwd -P)";
export JBOSS_MODULEPATH="${cfg.package}/modules";
export JBOSS_LOG_DIR="$JBOSS_BASE_DIR/log";
@ -547,11 +565,11 @@ in
mkdir -p {deployments,ssl}
"${cfg.package}/bin/standalone.sh"&
standalone.sh&
attempt=1
max_attempts=30
while ! ${cfg.package}/bin/jboss-cli.sh --connect ':read-attribute(name=server-state)'; do
while ! jboss-cli.sh --connect ':read-attribute(name=server-state)'; do
if [[ "$attempt" == "$max_attempts" ]]; then
echo "ERROR: Could not connect to Keycloak after $attempt attempts! Failing.." >&2
exit 1
@ -561,7 +579,7 @@ in
(( attempt++ ))
done
${cfg.package}/bin/jboss-cli.sh --connect --file=${jbossCliScript} --echo-command
jboss-cli.sh --connect --file=${jbossCliScript} --echo-command
cp configuration/standalone.xml $out
'';
@ -570,8 +588,8 @@ in
assertions = [
{
assertion = (cfg.databaseUseSSL && cfg.databaseType == "postgresql") -> (cfg.databaseCaCert != null);
message = "A CA certificate must be specified (in 'services.keycloak.databaseCaCert') when PostgreSQL is used with SSL";
assertion = (cfg.database.useSSL && cfg.database.type == "postgresql") -> (cfg.database.caCert != null);
message = "A CA certificate must be specified (in 'services.keycloak.database.caCert') when PostgreSQL is used with SSL";
}
];
@ -581,6 +599,7 @@ in
after = [ "postgresql.service" ];
before = [ "keycloak.service" ];
bindsTo = [ "postgresql.service" ];
path = [ config.services.postgresql.package ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
@ -588,13 +607,15 @@ in
Group = "postgres";
};
script = ''
set -eu
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
PSQL=${config.services.postgresql.package}/bin/psql
create_role="$(mktemp)"
trap 'rm -f "$create_role"' ERR EXIT
db_password="$(<'${cfg.databasePasswordFile}')"
$PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='keycloak'" | grep -q 1 || $PSQL -tAc "CREATE ROLE keycloak WITH LOGIN PASSWORD '$db_password' CREATEDB"
$PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = 'keycloak'" | grep -q 1 || $PSQL -tAc 'CREATE DATABASE "keycloak" OWNER "keycloak"'
echo "CREATE ROLE keycloak WITH LOGIN PASSWORD '$(<'${cfg.database.passwordFile}')' CREATEDB" > "$create_role"
psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='keycloak'" | grep -q 1 || psql -tA --file="$create_role"
psql -tAc "SELECT 1 FROM pg_database WHERE datname = 'keycloak'" | grep -q 1 || psql -tAc 'CREATE DATABASE "keycloak" OWNER "keycloak"'
'';
};
@ -602,6 +623,7 @@ in
after = [ "mysql.service" ];
before = [ "keycloak.service" ];
bindsTo = [ "mysql.service" ];
path = [ config.services.mysql.package ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
@ -609,13 +631,14 @@ in
Group = config.services.mysql.group;
};
script = ''
set -eu
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
db_password="$(<'${cfg.databasePasswordFile}')"
db_password="$(<'${cfg.database.passwordFile}')"
( echo "CREATE USER IF NOT EXISTS 'keycloak'@'localhost' IDENTIFIED BY '$db_password';"
echo "CREATE DATABASE keycloak CHARACTER SET utf8 COLLATE utf8_unicode_ci;"
echo "GRANT ALL PRIVILEGES ON keycloak.* TO 'keycloak'@'localhost';"
) | ${config.services.mysql.package}/bin/mysql -N
) | mysql -N
'';
};
@ -634,6 +657,8 @@ in
bindsTo = databaseServices;
wantedBy = [ "multi-user.target" ];
path = with pkgs; [
cfg.package
openssl
replace-secret
];
environment = {
@ -644,14 +669,21 @@ in
serviceConfig = {
ExecStartPre = let
startPreFullPrivileges = ''
set -eu
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
install -T -m 0400 -o keycloak -g keycloak '${cfg.databasePasswordFile}' /run/keycloak/secrets/db_password
'' + lib.optionalString (cfg.certificatePrivateKeyBundle != null) ''
install -T -m 0400 -o keycloak -g keycloak '${cfg.certificatePrivateKeyBundle}' /run/keycloak/secrets/ssl_cert_pk_bundle
umask u=rwx,g=,o=
install -T -m 0400 -o keycloak -g keycloak '${cfg.database.passwordFile}' /run/keycloak/secrets/db_password
'' + lib.optionalString (cfg.sslCertificate != null && cfg.sslCertificateKey != null) ''
install -T -m 0400 -o keycloak -g keycloak '${cfg.sslCertificate}' /run/keycloak/secrets/ssl_cert
install -T -m 0400 -o keycloak -g keycloak '${cfg.sslCertificateKey}' /run/keycloak/secrets/ssl_key
'';
startPre = ''
set -eu
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
umask u=rwx,g=,o=
install -m 0600 ${cfg.package}/standalone/configuration/*.properties /run/keycloak/configuration
install -T -m 0600 ${keycloakConfig} /run/keycloak/configuration/standalone.xml
@ -659,11 +691,14 @@ in
replace-secret '@db-password@' '/run/keycloak/secrets/db_password' /run/keycloak/configuration/standalone.xml
export JAVA_OPTS=-Djboss.server.config.user.dir=/run/keycloak/configuration
${cfg.package}/bin/add-user-keycloak.sh -u admin -p '${cfg.initialAdminPassword}'
'' + lib.optionalString (cfg.certificatePrivateKeyBundle != null) ''
add-user-keycloak.sh -u admin -p '${cfg.initialAdminPassword}'
'' + lib.optionalString (cfg.sslCertificate != null && cfg.sslCertificateKey != null) ''
pushd /run/keycloak/ssl/
cat /run/keycloak/secrets/ssl_cert_pk_bundle <(echo) /etc/ssl/certs/ca-certificates.crt > allcerts.pem
${pkgs.openssl}/bin/openssl pkcs12 -export -in /run/keycloak/secrets/ssl_cert_pk_bundle -chain \
cat /run/keycloak/secrets/ssl_cert <(echo) \
/run/keycloak/secrets/ssl_key <(echo) \
/etc/ssl/certs/ca-certificates.crt \
> allcerts.pem
openssl pkcs12 -export -in /run/keycloak/secrets/ssl_cert -inkey /run/keycloak/secrets/ssl_key -chain \
-name "${cfg.frontendUrl}" -out certificate_private_key_bundle.p12 \
-CAfile allcerts.pem -passout pass:notsosecretpassword
popd
@ -697,4 +732,5 @@ in
};
meta.doc = ./keycloak.xml;
meta.maintainers = [ lib.maintainers.talyz ];
}

View file

@ -41,31 +41,31 @@
<productname>PostgreSQL</productname> or
<productname>MySQL</productname>. Which one is used can be
configured in <xref
linkend="opt-services.keycloak.databaseType" />. The selected
linkend="opt-services.keycloak.database.type" />. The selected
database will automatically be enabled and a database and role
created unless <xref
linkend="opt-services.keycloak.databaseHost" /> is changed from
linkend="opt-services.keycloak.database.host" /> is changed from
its default of <literal>localhost</literal> or <xref
linkend="opt-services.keycloak.databaseCreateLocally" /> is set
linkend="opt-services.keycloak.database.createLocally" /> is set
to <literal>false</literal>.
</para>
<para>
External database access can also be configured by setting
<xref linkend="opt-services.keycloak.databaseHost" />, <xref
linkend="opt-services.keycloak.databaseUsername" />, <xref
linkend="opt-services.keycloak.databaseUseSSL" /> and <xref
linkend="opt-services.keycloak.databaseCaCert" /> as
<xref linkend="opt-services.keycloak.database.host" />, <xref
linkend="opt-services.keycloak.database.username" />, <xref
linkend="opt-services.keycloak.database.useSSL" /> and <xref
linkend="opt-services.keycloak.database.caCert" /> as
appropriate. Note that you need to manually create a database
called <literal>keycloak</literal> and allow the configured
database user full access to it.
</para>
<para>
<xref linkend="opt-services.keycloak.databasePasswordFile" />
<xref linkend="opt-services.keycloak.database.passwordFile" />
must be set to the path to a file containing the password used
to log in to the database. If <xref linkend="opt-services.keycloak.databaseHost" />
and <xref linkend="opt-services.keycloak.databaseCreateLocally" />
to log in to the database. If <xref linkend="opt-services.keycloak.database.host" />
and <xref linkend="opt-services.keycloak.database.createLocally" />
are kept at their defaults, the database role
<literal>keycloak</literal> with that password is provisioned
on the local database instance.
@ -115,17 +115,17 @@
</para>
<para>
For HTTPS support, a TLS certificate and private key is
required. They should be <link
HTTPS support requires a TLS/SSL certificate and a private key,
both <link
xlink:href="https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail">PEM
formatted</link> and concatenated into a single file. The path
to this file should be configured in
<xref linkend="opt-services.keycloak.certificatePrivateKeyBundle" />.
formatted</link>. Their paths should be set through <xref
linkend="opt-services.keycloak.sslCertificate" /> and <xref
linkend="opt-services.keycloak.sslCertificateKey" />.
</para>
<warning>
<para>
The path should be provided as a string, not a Nix path,
The paths should be provided as a strings, not a Nix paths,
since Nix paths are copied into the world readable Nix store.
</para>
</warning>
@ -195,8 +195,9 @@ services.keycloak = {
<link linkend="opt-services.keycloak.initialAdminPassword">initialAdminPassword</link> = "e6Wcm0RrtegMEHl"; # change on first login
<link linkend="opt-services.keycloak.frontendUrl">frontendUrl</link> = "https://keycloak.example.com/auth";
<link linkend="opt-services.keycloak.forceBackendUrlToFrontendUrl">forceBackendUrlToFrontendUrl</link> = true;
<link linkend="opt-services.keycloak.certificatePrivateKeyBundle">certificatePrivateKeyBundle</link> = "/run/keys/ssl_cert";
<link linkend="opt-services.keycloak.databasePasswordFile">databasePasswordFile</link> = "/run/keys/db_password";
<link linkend="opt-services.keycloak.sslCertificate">sslCertificate</link> = "/run/keys/ssl_cert";
<link linkend="opt-services.keycloak.sslCertificateKey">sslCertificateKey</link> = "/run/keys/ssl_key";
<link linkend="opt-services.keycloak.database.passwordFile">database.passwordFile</link> = "/run/keys/db_password";
};
</programlisting>
</para>

View file

@ -448,10 +448,10 @@ in {
join pg_namespace s on s.oid = c.relnamespace \
where s.nspname not in ('pg_catalog', 'pg_toast', 'information_schema') \
and s.nspname not like 'pg_temp%';" | sed -n 3p` -eq 0 ]; then
SAFETY_ASSURED=1 rake db:schema:load
rake db:seed
SAFETY_ASSURED=1 rails db:schema:load
rails db:seed
else
rake db:migrate
rails db:migrate
fi
'';
path = [ cfg.package pkgs.postgresql ];

View file

@ -9,6 +9,7 @@ let
# Disable automatically generating desktop icon
noDesktopIcon=true
noBackup=${cfg.noBackup}
[Network]
# host setting is relevant only for web deployments - set the host on which the server will listen
@ -28,7 +29,7 @@ in
type = types.str;
default = "/var/lib/trilium";
description = ''
The directory storing the nodes database and the configuration.
The directory storing the notes database and the configuration.
'';
};
@ -40,6 +41,14 @@ in
'';
};
noBackup = mkOption {
type = types.bool;
default = false;
description = ''
Disable periodic database backups.
'';
};
host = mkOption {
type = types.str;
default = "127.0.0.1";
@ -85,7 +94,7 @@ in
config = lib.mkIf cfg.enable (lib.mkMerge [
{
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = with lib.maintainers; [ fliegendewurst ];
users.groups.trilium = {};
users.users.trilium = {

View file

@ -230,13 +230,13 @@ let
defaultListen =
if vhost.listen != [] then vhost.listen
else ((optionals hasSSL (
else optionals (hasSSL || vhost.rejectSSL) (
singleton { addr = "0.0.0.0"; port = 443; ssl = true; }
++ optional enableIPv6 { addr = "[::]"; port = 443; ssl = true; }
)) ++ optionals (!onlySSL) (
) ++ optionals (!onlySSL) (
singleton { addr = "0.0.0.0"; port = 80; ssl = false; }
++ optional enableIPv6 { addr = "[::]"; port = 80; ssl = false; }
));
);
hostListen =
if vhost.forceSSL
@ -303,6 +303,9 @@ let
${optionalString (hasSSL && vhost.sslTrustedCertificate != null) ''
ssl_trusted_certificate ${vhost.sslTrustedCertificate};
''}
${optionalString vhost.rejectSSL ''
ssl_reject_handshake on;
''}
${mkBasicAuth vhostName vhost}
@ -771,20 +774,27 @@ in
}
{
assertion = all (conf: with conf;
!(addSSL && (onlySSL || enableSSL)) &&
!(forceSSL && (onlySSL || enableSSL)) &&
!(addSSL && forceSSL)
assertion = all (host: with host;
count id [ addSSL (onlySSL || enableSSL) forceSSL rejectSSL ] <= 1
) (attrValues virtualHosts);
message = ''
Options services.nginx.service.virtualHosts.<name>.addSSL,
services.nginx.virtualHosts.<name>.onlySSL and services.nginx.virtualHosts.<name>.forceSSL
are mutually exclusive.
services.nginx.virtualHosts.<name>.onlySSL,
services.nginx.virtualHosts.<name>.forceSSL and
services.nginx.virtualHosts.<name>.rejectSSL are mutually exclusive.
'';
}
{
assertion = all (conf: !(conf.enableACME && conf.useACMEHost != null)) (attrValues virtualHosts);
assertion = any (host: host.rejectSSL) (attrValues virtualHosts) -> versionAtLeast cfg.package.version "1.19.4";
message = ''
services.nginx.virtualHosts.<name>.rejectSSL requires nginx version
1.19.4 or above; see the documentation for services.nginx.package.
'';
}
{
assertion = all (host: !(host.enableACME && host.useACMEHost != null)) (attrValues virtualHosts);
message = ''
Options services.nginx.service.virtualHosts.<name>.enableACME and
services.nginx.virtualHosts.<name>.useACMEHost are mutually exclusive.

View file

@ -118,6 +118,18 @@ with lib;
'';
};
rejectSSL = mkOption {
type = types.bool;
default = false;
description = ''
Whether to listen for and reject all HTTPS connections to this vhost. Useful in
<link linkend="opt-services.nginx.virtualHosts._name_.default">default</link>
server blocks to avoid serving the certificate for another vhost. Uses the
<literal>ssl_reject_handshake</literal> directive available in nginx versions
1.19.4 and above.
'';
};
sslCertificate = mkOption {
type = types.path;
example = "/var/host.cert";

View file

@ -128,6 +128,7 @@ in
cinnamon-session
cinnamon-desktop
cinnamon-menus
cinnamon-translations
# utils needed by some scripts
killall
@ -137,6 +138,9 @@ in
# cinnamon-killer-daemon: provided by cinnamon-common
gnome.networkmanagerapplet # session requirement - also nm-applet not needed
# For a polkit authentication agent
polkit_gnome
# packages
nemo
cinnamon-control-center

View file

@ -58,8 +58,8 @@ in
# Link some extra directories in /run/current-system/software/share
environment.pathsToLink = [ "/share" ];
# virtual file systems support for PCManFM-QT
services.gvfs.enable = true;
services.gvfs.package = pkgs.gvfs;
services.upower.enable = config.powerManagement.enable;
};

View file

@ -8,7 +8,7 @@ let
cfg = xcfg.desktopManager.plasma5;
libsForQt5 = pkgs.plasma5Packages;
inherit (libsForQt5) kdeApplications kdeFrameworks plasma5;
inherit (libsForQt5) kdeGear kdeFrameworks plasma5;
inherit (pkgs) writeText;
pulseaudio = config.hardware.pulseaudio;
@ -213,7 +213,7 @@ in
environment.systemPackages =
with libsForQt5;
with plasma5; with kdeApplications; with kdeFrameworks;
with plasma5; with kdeGear; with kdeFrameworks;
[
frameworkintegration
kactivities
@ -316,6 +316,7 @@ in
++ lib.optionals config.hardware.bluetooth.enable [ bluedevil bluez-qt pkgs.openobex pkgs.obexftp ]
++ lib.optional config.networking.networkmanager.enable plasma-nm
++ lib.optional config.hardware.pulseaudio.enable plasma-pa
++ lib.optional config.services.pipewire.pulse.enable plasma-pa
++ lib.optional config.powerManagement.enable powerdevil
++ lib.optional config.services.colord.enable pkgs.colord-kde
++ lib.optionals config.services.samba.enable [ kdenetwork-filesharing pkgs.samba ]

View file

@ -140,24 +140,27 @@ let
umount /crypt-ramfs 2>/dev/null
'';
openCommand = name': { name, device, header, keyFile, keyFileSize, keyFileOffset, allowDiscards, yubikey, gpgCard, fido2, fallbackToPassword, preOpenCommands, postOpenCommands,... }: assert name' == name;
openCommand = name: dev: assert name == dev.name;
let
csopen = "cryptsetup luksOpen ${device} ${name} ${optionalString allowDiscards "--allow-discards"} ${optionalString (header != null) "--header=${header}"}";
cschange = "cryptsetup luksChangeKey ${device} ${optionalString (header != null) "--header=${header}"}";
csopen = "cryptsetup luksOpen ${dev.device} ${dev.name}"
+ optionalString dev.allowDiscards " --allow-discards"
+ optionalString dev.bypassWorkqueues " --perf-no_read_workqueue --perf-no_write_workqueue"
+ optionalString (dev.header != null) " --header=${dev.header}";
cschange = "cryptsetup luksChangeKey ${dev.device} ${optionalString (dev.header != null) "--header=${dev.header}"}";
in ''
# Wait for luksRoot (and optionally keyFile and/or header) to appear, e.g.
# if on a USB drive.
wait_target "device" ${device} || die "${device} is unavailable"
wait_target "device" ${dev.device} || die "${dev.device} is unavailable"
${optionalString (header != null) ''
wait_target "header" ${header} || die "${header} is unavailable"
${optionalString (dev.header != null) ''
wait_target "header" ${dev.header} || die "${dev.header} is unavailable"
''}
do_open_passphrase() {
local passphrase
while true; do
echo -n "Passphrase for ${device}: "
echo -n "Passphrase for ${dev.device}: "
passphrase=
while true; do
if [ -e /crypt-ramfs/passphrase ]; then
@ -166,7 +169,7 @@ let
break
else
# ask cryptsetup-askpass
echo -n "${device}" > /crypt-ramfs/device
echo -n "${dev.device}" > /crypt-ramfs/device
# and try reading it from /dev/console with a timeout
IFS= read -t 1 -r passphrase
@ -182,7 +185,7 @@ let
fi
fi
done
echo -n "Verifying passphrase for ${device}..."
echo -n "Verifying passphrase for ${dev.device}..."
echo -n "$passphrase" | ${csopen} --key-file=-
if [ $? == 0 ]; then
echo " - success"
@ -202,13 +205,13 @@ let
# LUKS
open_normally() {
${if (keyFile != null) then ''
if wait_target "key file" ${keyFile}; then
${csopen} --key-file=${keyFile} \
${optionalString (keyFileSize != null) "--keyfile-size=${toString keyFileSize}"} \
${optionalString (keyFileOffset != null) "--keyfile-offset=${toString keyFileOffset}"}
${if (dev.keyFile != null) then ''
if wait_target "key file" ${dev.keyFile}; then
${csopen} --key-file=${dev.keyFile} \
${optionalString (dev.keyFileSize != null) "--keyfile-size=${toString dev.keyFileSize}"} \
${optionalString (dev.keyFileOffset != null) "--keyfile-offset=${toString dev.keyFileOffset}"}
else
${if fallbackToPassword then "echo" else "die"} "${keyFile} is unavailable"
${if dev.fallbackToPassword then "echo" else "die"} "${dev.keyFile} is unavailable"
echo " - failing back to interactive password prompt"
do_open_passphrase
fi
@ -217,7 +220,7 @@ let
''}
}
${optionalString (luks.yubikeySupport && (yubikey != null)) ''
${optionalString (luks.yubikeySupport && (dev.yubikey != null)) ''
# YubiKey
rbtohex() {
( od -An -vtx1 | tr -d ' \n' )
@ -243,16 +246,16 @@ let
local new_response
local new_k_luks
mount -t ${yubikey.storage.fsType} ${yubikey.storage.device} /crypt-storage || \
mount -t ${dev.yubikey.storage.fsType} ${dev.yubikey.storage.device} /crypt-storage || \
die "Failed to mount YubiKey salt storage device"
salt="$(cat /crypt-storage${yubikey.storage.path} | sed -n 1p | tr -d '\n')"
iterations="$(cat /crypt-storage${yubikey.storage.path} | sed -n 2p | tr -d '\n')"
salt="$(cat /crypt-storage${dev.yubikey.storage.path} | sed -n 1p | tr -d '\n')"
iterations="$(cat /crypt-storage${dev.yubikey.storage.path} | sed -n 2p | tr -d '\n')"
challenge="$(echo -n $salt | openssl-wrap dgst -binary -sha512 | rbtohex)"
response="$(ykchalresp -${toString yubikey.slot} -x $challenge 2>/dev/null)"
response="$(ykchalresp -${toString dev.yubikey.slot} -x $challenge 2>/dev/null)"
for try in $(seq 3); do
${optionalString yubikey.twoFactor ''
${optionalString dev.yubikey.twoFactor ''
echo -n "Enter two-factor passphrase: "
k_user=
while true; do
@ -278,9 +281,9 @@ let
''}
if [ ! -z "$k_user" ]; then
k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString yubikey.keyLength} $iterations $response | rbtohex)"
k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $iterations $response | rbtohex)"
else
k_luks="$(echo | pbkdf2-sha512 ${toString yubikey.keyLength} $iterations $response | rbtohex)"
k_luks="$(echo | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $iterations $response | rbtohex)"
fi
echo -n "$k_luks" | hextorb | ${csopen} --key-file=-
@ -302,7 +305,7 @@ let
[ "$opened" == false ] && die "Maximum authentication errors reached"
echo -n "Gathering entropy for new salt (please enter random keys to generate entropy if this blocks for long)..."
for i in $(seq ${toString yubikey.saltLength}); do
for i in $(seq ${toString dev.yubikey.saltLength}); do
byte="$(dd if=/dev/random bs=1 count=1 2>/dev/null | rbtohex)";
new_salt="$new_salt$byte";
echo -n .
@ -310,25 +313,25 @@ let
echo "ok"
new_iterations="$iterations"
${optionalString (yubikey.iterationStep > 0) ''
new_iterations="$(($new_iterations + ${toString yubikey.iterationStep}))"
${optionalString (dev.yubikey.iterationStep > 0) ''
new_iterations="$(($new_iterations + ${toString dev.yubikey.iterationStep}))"
''}
new_challenge="$(echo -n $new_salt | openssl-wrap dgst -binary -sha512 | rbtohex)"
new_response="$(ykchalresp -${toString yubikey.slot} -x $new_challenge 2>/dev/null)"
new_response="$(ykchalresp -${toString dev.yubikey.slot} -x $new_challenge 2>/dev/null)"
if [ ! -z "$k_user" ]; then
new_k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString yubikey.keyLength} $new_iterations $new_response | rbtohex)"
new_k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $new_iterations $new_response | rbtohex)"
else
new_k_luks="$(echo | pbkdf2-sha512 ${toString yubikey.keyLength} $new_iterations $new_response | rbtohex)"
new_k_luks="$(echo | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $new_iterations $new_response | rbtohex)"
fi
echo -n "$new_k_luks" | hextorb > /crypt-ramfs/new_key
echo -n "$k_luks" | hextorb | ${cschange} --key-file=- /crypt-ramfs/new_key
if [ $? == 0 ]; then
echo -ne "$new_salt\n$new_iterations" > /crypt-storage${yubikey.storage.path}
echo -ne "$new_salt\n$new_iterations" > /crypt-storage${dev.yubikey.storage.path}
else
echo "Warning: Could not update LUKS key, current challenge persists!"
fi
@ -338,7 +341,7 @@ let
}
open_with_hardware() {
if wait_yubikey ${toString yubikey.gracePeriod}; then
if wait_yubikey ${toString dev.yubikey.gracePeriod}; then
do_open_yubikey
else
echo "No YubiKey found, falling back to non-YubiKey open procedure"
@ -347,7 +350,7 @@ let
}
''}
${optionalString (luks.gpgSupport && (gpgCard != null)) ''
${optionalString (luks.gpgSupport && (dev.gpgCard != null)) ''
do_open_gpg_card() {
# Make all of these local to this function
@ -355,12 +358,12 @@ let
local pin
local opened
gpg --import /gpg-keys/${device}/pubkey.asc > /dev/null 2> /dev/null
gpg --import /gpg-keys/${dev.device}/pubkey.asc > /dev/null 2> /dev/null
gpg --card-status > /dev/null 2> /dev/null
for try in $(seq 3); do
echo -n "PIN for GPG Card associated with device ${device}: "
echo -n "PIN for GPG Card associated with device ${dev.device}: "
pin=
while true; do
if [ -e /crypt-ramfs/passphrase ]; then
@ -382,8 +385,8 @@ let
fi
fi
done
echo -n "Verifying passphrase for ${device}..."
echo -n "$pin" | gpg -q --batch --passphrase-fd 0 --pinentry-mode loopback -d /gpg-keys/${device}/cryptkey.gpg 2> /dev/null | ${csopen} --key-file=- > /dev/null 2> /dev/null
echo -n "Verifying passphrase for ${dev.device}..."
echo -n "$pin" | gpg -q --batch --passphrase-fd 0 --pinentry-mode loopback -d /gpg-keys/${dev.device}/cryptkey.gpg 2> /dev/null | ${csopen} --key-file=- > /dev/null 2> /dev/null
if [ $? == 0 ]; then
echo " - success"
${if luks.reusePassphrases then ''
@ -403,7 +406,7 @@ let
}
open_with_hardware() {
if wait_gpgcard ${toString gpgCard.gracePeriod}; then
if wait_gpgcard ${toString dev.gpgCard.gracePeriod}; then
do_open_gpg_card
else
echo "No GPG Card found, falling back to normal open procedure"
@ -412,15 +415,15 @@ let
}
''}
${optionalString (luks.fido2Support && (fido2.credential != null)) ''
${optionalString (luks.fido2Support && (dev.fido2.credential != null)) ''
open_with_hardware() {
local passsphrase
${if fido2.passwordLess then ''
${if dev.fido2.passwordLess then ''
export passphrase=""
'' else ''
read -rsp "FIDO2 salt for ${device}: " passphrase
read -rsp "FIDO2 salt for ${dev.device}: " passphrase
echo
''}
${optionalString (lib.versionOlder kernelPackages.kernel.version "5.4") ''
@ -428,7 +431,7 @@ let
echo "Please move your mouse to create needed randomness."
''}
echo "Waiting for your FIDO2 device..."
fido2luks open ${device} ${name} ${fido2.credential} --await-dev ${toString fido2.gracePeriod} --salt string:$passphrase
fido2luks open ${dev.device} ${dev.name} ${dev.fido2.credential} --await-dev ${toString dev.fido2.gracePeriod} --salt string:$passphrase
if [ $? -ne 0 ]; then
echo "No FIDO2 key found, falling back to normal open procedure"
open_normally
@ -437,16 +440,16 @@ let
''}
# commands to run right before we mount our device
${preOpenCommands}
${dev.preOpenCommands}
${if (luks.yubikeySupport && (yubikey != null)) || (luks.gpgSupport && (gpgCard != null)) || (luks.fido2Support && (fido2.credential != null)) then ''
${if (luks.yubikeySupport && (dev.yubikey != null)) || (luks.gpgSupport && (dev.gpgCard != null)) || (luks.fido2Support && (dev.fido2.credential != null)) then ''
open_with_hardware
'' else ''
open_normally
''}
# commands to run right after we mounted our device
${postOpenCommands}
${dev.postOpenCommands}
'';
askPass = pkgs.writeScriptBin "cryptsetup-askpass" ''
@ -621,6 +624,17 @@ in
'';
};
bypassWorkqueues = mkOption {
default = false;
type = types.bool;
description = ''
Whether to bypass dm-crypt's internal read and write workqueues.
Enabling this should improve performance on SSDs; see
<link xlink:href="https://wiki.archlinux.org/index.php/Dm-crypt/Specialties#Disable_workqueue_for_increased_solid_state_drive_(SSD)_performance">here</link>
for more information. Needs Linux 5.9 or later.
'';
};
fallbackToPassword = mkOption {
default = false;
type = types.bool;
@ -833,6 +847,11 @@ in
{ assertion = !(luks.fido2Support && luks.yubikeySupport);
message = "FIDO2 and YubiKey may not be used at the same time.";
}
{ assertion = any (dev: dev.bypassWorkqueues) (attrValues luks.devices)
-> versionAtLeast kernelPackages.kernel.version "5.9";
message = "boot.initrd.luks.devices.<name>.bypassWorkqueues is not supported for kernels older than 5.9";
}
];
# actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested

View file

@ -321,6 +321,7 @@ in
RemainAfterExit = true;
};
unitConfig = {
ConditionPathIsMountPoint = "!/sys/fs/pstore";
ConditionVirtualization = "!container";
DefaultDependencies = false; # needed to prevent a cycle
};

View file

@ -11,9 +11,10 @@ let
auth_unix_rw = "polkit"
${cfg.extraConfig}
'';
ovmfFilePrefix = if pkgs.stdenv.isAarch64 then "AAVMF" else "OVMF";
qemuConfigFile = pkgs.writeText "qemu.conf" ''
${optionalString cfg.qemuOvmf ''
nvram = ["/run/libvirt/nix-ovmf/OVMF_CODE.fd:/run/libvirt/nix-ovmf/OVMF_VARS.fd"]
nvram = [ "/run/libvirt/nix-ovmf/${ovmfFilePrefix}_CODE.fd:/run/libvirt/nix-ovmf/${ovmfFilePrefix}_VARS.fd" ]
''}
${optionalString (!cfg.qemuRunAsRoot) ''
user = "qemu-libvirtd"
@ -206,8 +207,8 @@ in {
done
${optionalString cfg.qemuOvmf ''
ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_CODE.fd /run/${dirName}/nix-ovmf/
ln -s --force ${pkgs.OVMF.fd}/FV/OVMF_VARS.fd /run/${dirName}/nix-ovmf/
ln -s --force ${pkgs.OVMF.fd}/FV/${ovmfFilePrefix}_CODE.fd /run/${dirName}/nix-ovmf/
ln -s --force ${pkgs.OVMF.fd}/FV/${ovmfFilePrefix}_VARS.fd /run/${dirName}/nix-ovmf/
''}
'';

View file

@ -428,7 +428,7 @@ let
extraVeths = {};
additionalCapabilities = [];
ephemeral = false;
timeoutStartSec = "15s";
timeoutStartSec = "1min";
allowedDevices = [];
hostAddress = null;
hostAddress6 = null;

View file

@ -47,11 +47,12 @@ in
boot = handleTestOn ["x86_64-linux"] ./boot.nix {}; # syslinux is unsupported on aarch64
boot-stage1 = handleTest ./boot-stage1.nix {};
borgbackup = handleTest ./borgbackup.nix {};
botamusique = handleTest ./botamusique.nix {};
buildbot = handleTest ./buildbot.nix {};
buildkite-agents = handleTest ./buildkite-agents.nix {};
caddy = handleTest ./caddy.nix {};
cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
cage = handleTestOn ["x86_64-linux"] ./cage.nix {};
cage = handleTest ./cage.nix {};
cagebreak = handleTest ./cagebreak.nix {};
calibre-web = handleTest ./calibre-web.nix {};
cassandra_2_1 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_1; };
@ -384,6 +385,7 @@ in
snapcast = handleTest ./snapcast.nix {};
snapper = handleTest ./snapper.nix {};
sogo = handleTest ./sogo.nix {};
solanum = handleTest ./solanum.nix {};
solr = handleTest ./solr.nix {};
sonarr = handleTest ./sonarr.nix {};
spacecookie = handleTest ./spacecookie.nix {};

View file

@ -14,6 +14,9 @@ let assertions = rec {
'';
unit = name: state: ''
with subtest("Unit ${name} should be ${state}"):
if "${state}" == "active":
machine.wait_for_unit("${name}")
else:
machine.require_unit_state("${name}", "${state}")
'';
version = ''
@ -44,9 +47,19 @@ let assertions = rec {
if present then
unit "atop.service" "active"
+ ''
with subtest("atop.service should have written some data to /var/log/atop"):
with subtest("atop.service should write some data to /var/log/atop"):
def has_data_files(last: bool) -> bool:
files = int(machine.succeed("ls -1 /var/log/atop | wc -l"))
assert files > 0, "Expected at least 1 data file"
if files == 0:
machine.log("Did not find at least one 1 data file")
if not last:
machine.log("Will retry...")
return False
return True
with machine.nested("Waiting for data files"):
retry(has_data_files)
'' else unit "atop.service" "inactive";
atopRotateTimer = present:
unit "atop-rotate.timer" (if present then "active" else "inactive");
@ -55,11 +68,21 @@ let assertions = rec {
unit "atopacct.service" "active"
+ ''
with subtest("atopacct.service should enable process accounting"):
machine.succeed("test -f /run/pacct_source")
machine.wait_until_succeeds("test -f /run/pacct_source")
with subtest("atopacct.service should write data to /run/pacct_shadow.d"):
def has_data_files(last: bool) -> bool:
files = int(machine.succeed("ls -1 /run/pacct_shadow.d | wc -l"))
assert files >= 1, "Expected at least 1 pacct_shadow.d file"
if files == 0:
machine.log("Did not find at least one 1 data file")
if not last:
machine.log("Will retry...")
return False
return True
with machine.nested("Waiting for data files"):
retry(has_data_files)
'' else unit "atopacct.service" "inactive";
netatop = present:
if present then

View file

@ -0,0 +1,47 @@
import ./make-test-python.nix ({ pkgs, lib, ...} :
{
name = "botamusique";
meta.maintainers = with lib.maintainers; [ hexa ];
nodes = {
machine = { config, ... }: {
services.murmur = {
enable = true;
registerName = "NixOS tests";
};
services.botamusique = {
enable = true;
settings = {
server = {
channel = "NixOS tests";
};
bot = {
version = false;
auto_check_update = false;
};
};
};
};
};
testScript = ''
start_all()
machine.wait_for_unit("murmur.service")
machine.wait_for_unit("botamusique.service")
machine.sleep(10)
machine.wait_until_succeeds(
"journalctl -u murmur.service -e | grep -q '<1:botamusique(-1)> Authenticated'"
)
with subtest("Check systemd hardening"):
output = machine.execute("systemctl show botamusique.service")[1]
machine.log(output)
output = machine.execute("systemd-analyze security botamusique.service")[1]
machine.log(output)
'';
})

View file

@ -18,10 +18,8 @@ import ./make-test-python.nix ({ pkgs, ...} :
};
virtualisation.memorySize = 1024;
# Need to switch to a different VGA card / GPU driver because Cage segfaults with the default one (std):
# machine # [ 14.355893] .cage-wrapped[736]: segfault at 20 ip 00007f035fa0d8c7 sp 00007ffce9e4a2f0 error 4 in libwlroots.so.8[7f035fa07000+5a000]
# machine # [ 14.358108] Code: 4f a8 ff ff eb aa 0f 1f 44 00 00 c3 0f 1f 80 00 00 00 00 41 54 49 89 f4 55 31 ed 53 48 89 fb 48 8d 7f 18 48 8d 83 b8 00 00 00 <80> 7f 08 00 75 0d 48 83 3f 00 0f 85 91 00 00 00 48 89 fd 48 83 c7
virtualisation.qemu.options = [ "-vga virtio" ];
# Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch:
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
};
enableOCR = true;

View file

@ -36,8 +36,8 @@ in
environment.systemPackages = [ pkgs.cagebreak pkgs.wayland-utils ];
virtualisation.memorySize = 1024;
# Need to switch to a different VGA card / GPU driver than the default one (std) so that Cagebreak can launch:
virtualisation.qemu.options = [ "-vga virtio" ];
# Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch:
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
};
enableOCR = true;

View file

@ -20,6 +20,20 @@ import ./make-test-python.nix ({ pkgs, ... }: {
docker.wait_for_unit("sockets.target")
with subtest("includeStorePath"):
with subtest("assumption"):
docker.succeed("${examples.helloOnRoot} | docker load")
docker.succeed("set -euo pipefail; docker run --rm hello | grep -i hello")
docker.succeed("docker image rm hello:latest")
with subtest("includeStorePath = false; breaks example"):
docker.succeed("${examples.helloOnRootNoStore} | docker load")
docker.fail("set -euo pipefail; docker run --rm hello | grep -i hello")
docker.succeed("docker image rm hello:latest")
with subtest("includeStorePath = false; works with mounted store"):
docker.succeed("${examples.helloOnRootNoStore} | docker load")
docker.succeed("set -euo pipefail; docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
docker.succeed("docker image rm hello:latest")
with subtest("Ensure Docker images use a stable date by default"):
docker.succeed(
"docker load --input='${examples.bash}'"

View file

@ -3,7 +3,8 @@
# client using their Keycloak login.
let
frontendUrl = "http://keycloak/auth";
certs = import ./common/acme/server/snakeoil-certs.nix;
frontendUrl = "https://${certs.domain}/auth";
initialAdminPassword = "h4IhoJFnt2iQIR9";
keycloakTest = import ./make-test-python.nix (
@ -17,12 +18,27 @@ let
nodes = {
keycloak = { ... }: {
virtualisation.memorySize = 1024;
security.pki.certificateFiles = [
certs.ca.cert
];
networking.extraHosts = ''
127.0.0.1 ${certs.domain}
'';
services.keycloak = {
enable = true;
inherit frontendUrl databaseType initialAdminPassword;
databaseUsername = "bogus";
databasePasswordFile = pkgs.writeText "dbPassword" "wzf6vOCbPp6cqTH";
inherit frontendUrl initialAdminPassword;
sslCertificate = certs.${certs.domain}.cert;
sslCertificateKey = certs.${certs.domain}.key;
database = {
type = databaseType;
username = "bogus";
passwordFile = pkgs.writeText "dbPassword" "wzf6vOCbPp6cqTH";
};
};
environment.systemPackages = with pkgs; [
xmlstarlet
libtidy

View file

@ -33,14 +33,22 @@ in {
autosnap = true;
};
datasets."pool/test".useTemplate = [ "test" ];
datasets."pool/sanoid".useTemplate = [ "test" ];
extraArgs = [ "--verbose" ];
};
services.syncoid = {
enable = true;
sshKey = "/var/lib/syncoid/id_ecdsa";
commonArgs = [ "--no-sync-snap" ];
commands."pool/test".target = "root@target:pool/test";
commands = {
# Sync snapshot taken by sanoid
"pool/sanoid" = {
target = "root@target:pool/sanoid";
extraArgs = [ "--no-sync-snap" ];
};
# Take snapshot and sync
"pool/syncoid".target = "root@target:pool/syncoid";
};
};
};
target = { ... }: {
@ -54,18 +62,19 @@ in {
testScript = ''
source.succeed(
"mkdir /tmp/mnt",
"mkdir /mnt",
"parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
"udevadm settle",
"zpool create pool /dev/vdb1",
"zfs create -o mountpoint=legacy pool/test",
"mount -t zfs pool/test /tmp/mnt",
"zpool create pool -R /mnt /dev/vdb1",
"zfs create pool/sanoid",
"zfs create pool/syncoid",
"udevadm settle",
)
target.succeed(
"mkdir /mnt",
"parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
"udevadm settle",
"zpool create pool /dev/vdb1",
"zpool create pool -R /mnt /dev/vdb1",
"udevadm settle",
)
@ -76,16 +85,15 @@ in {
"chown -R syncoid:syncoid /var/lib/syncoid/",
)
source.succeed("touch /tmp/mnt/test.txt")
# Take snapshot with sanoid
source.succeed("touch /mnt/pool/sanoid/test.txt")
source.systemctl("start --wait sanoid.service")
# Sync snapshots
target.wait_for_open_port(22)
source.succeed("touch /mnt/pool/syncoid/test.txt")
source.systemctl("start --wait syncoid.service")
target.succeed(
"mkdir /tmp/mnt",
"zfs set mountpoint=legacy pool/test",
"mount -t zfs pool/test /tmp/mnt",
)
target.succeed("cat /tmp/mnt/test.txt")
target.succeed("cat /mnt/pool/sanoid/test.txt")
target.succeed("cat /mnt/pool/syncoid/test.txt")
'';
})

View file

@ -0,0 +1,89 @@
let
clients = [
"ircclient1"
"ircclient2"
];
server = "solanum";
ircPort = 6667;
channel = "nixos-cat";
iiDir = "/tmp/irc";
in
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "solanum";
nodes = {
"${server}" = {
networking.firewall.allowedTCPPorts = [ ircPort ];
services.solanum = {
enable = true;
};
};
} // lib.listToAttrs (builtins.map (client: lib.nameValuePair client {
imports = [
./common/user-account.nix
];
systemd.services.ii = {
requires = [ "network.target" ];
wantedBy = [ "default.target" ];
serviceConfig = {
Type = "simple";
ExecPreStartPre = "mkdir -p ${iiDir}";
ExecStart = ''
${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
'';
User = "alice";
};
};
}) clients);
testScript =
let
msg = client: "Hello, my name is ${client}";
clientScript = client: [
''
${client}.wait_for_unit("network.target")
${client}.systemctl("start ii")
${client}.wait_for_unit("ii")
${client}.wait_for_file("${iiDir}/${server}/out")
''
# wait until first PING from server arrives before joining,
# so we don't try it too early
''
${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
''
# join ${channel}
''
${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
''
# send a greeting
''
${client}.succeed(
"echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
)
''
# check that all greetings arrived on all clients
] ++ builtins.map (other: ''
${client}.succeed(
"grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
)
'') clients;
# foldl', but requires a non-empty list instead of a start value
reduce = f: list:
builtins.foldl' f (builtins.head list) (builtins.tail list);
in ''
start_all()
${server}.systemctl("status solanum")
${server}.wait_for_open_port(${toString ircPort})
# run clientScript for all clients so that every list
# entry is executed by every client before advancing
# to the next one.
'' + lib.concatStrings
(reduce
(lib.zipListsWith (cs: c: cs + c))
(builtins.map clientScript clients));
})

View file

@ -42,8 +42,8 @@ import ./make-test-python.nix ({ pkgs, lib, ...} :
programs.gnupg.agent.enable = true;
virtualisation.memorySize = 1024;
# Need to switch to a different VGA card / GPU driver than the default one (std) so that Sway can launch:
virtualisation.qemu.options = [ "-vga virtio" ];
# Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch:
virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
};
enableOCR = true;

View file

@ -8,7 +8,7 @@
, curl
, dbus
, doxygen
, ffmpeg_3
, ffmpeg
, fftw
, fftwSinglePrec
, flac
@ -56,13 +56,13 @@
}:
stdenv.mkDerivation rec {
pname = "ardour";
version = "6.5";
version = "6.7";
# don't fetch releases from the GitHub mirror, they are broken
src = fetchgit {
url = "git://git.ardour.org/ardour/ardour.git";
rev = version;
sha256 = "0sd38hchyr16biq9hcxha4ljy3pf0yhcgn90i5zfqcznnc57ildx";
sha256 = "19jc29fjwgvqbg3gnmy50mrz8mh5x4nwddglasvwx83nc87qwllx";
};
patches = [
@ -89,7 +89,7 @@ stdenv.mkDerivation rec {
cppunit
curl
dbus
ffmpeg_3
ffmpeg
fftw
fftwSinglePrec
flac
@ -148,8 +148,8 @@ stdenv.mkDerivation rec {
sed 's|/usr/include/libintl.h|${glibc.dev}/include/libintl.h|' -i wscript
patchShebangs ./tools/
substituteInPlace libs/ardour/video_tools_paths.cc \
--replace 'ffmpeg_exe = X_("");' 'ffmpeg_exe = X_("${ffmpeg_3}/bin/ffmpeg");' \
--replace 'ffprobe_exe = X_("");' 'ffprobe_exe = X_("${ffmpeg_3}/bin/ffprobe");'
--replace 'ffmpeg_exe = X_("");' 'ffmpeg_exe = X_("${ffmpeg}/bin/ffmpeg");' \
--replace 'ffprobe_exe = X_("");' 'ffprobe_exe = X_("${ffmpeg}/bin/ffprobe");'
'';
postInstall = ''

View file

@ -1,8 +1,49 @@
{ lib, mkDerivation, fetchFromGitHub, fetchpatch, boost, cmake, chromaprint, gettext, gst_all_1, liblastfm
, qtbase, qtx11extras, qttools
, taglib, fftw, glew, qjson, sqlite, libgpod, libplist, usbmuxd, libmtp
, libpulseaudio, gvfs, libcdio, libechonest, libspotify, pcre, projectm, protobuf
, qca2, pkg-config, sparsehash, config, makeWrapper, gst_plugins }:
{ lib
, mkDerivation
, fetchFromGitHub
, fetchpatch
, boost
, cmake
, chromaprint
, gettext
, gst_all_1
, liblastfm
, qtbase
, qtx11extras
, qttools
, taglib
, fftw
, glew
, qjson
, sqlite
, libgpod
, libplist
, usbmuxd
, libmtp
, libpulseaudio
, gvfs
, libcdio
, libechonest
, libspotify
, pcre
, projectm
, protobuf
, qca2
, pkg-config
, sparsehash
, config
, makeWrapper
, gst_plugins
, util-linux
, libunwind
, libselinux
, elfutils
, libsepol
, orc
, alsaLib
}:
let
withIpod = config.clementine.ipod or false;
@ -22,9 +63,26 @@ let
patches = [
./clementine-spotify-blob.patch
(fetchpatch {
# "short-term" fix for execution on wayland (1.4.0rc1-131-g2179027a6)
# for https://github.com/clementine-player/Clementine/issues/6587
url = "https://github.com/clementine-player/Clementine/commit/2179027a6d97530c857e43be873baacd696ff332.patch";
sha256 = "0344bfcyvjim5ph8w4km6zkg96rj5g9ybp9x14qgyw2gkdksimn6";
})
];
nativeBuildInputs = [ cmake pkg-config makeWrapper ];
nativeBuildInputs = [
cmake
pkg-config
makeWrapper
util-linux
libunwind
libselinux
elfutils
libsepol
orc
];
buildInputs = [
boost
@ -48,6 +106,8 @@ let
qttools
sqlite
taglib
alsaLib
]
++ lib.optionals (withIpod) [ libgpod libplist usbmuxd ]
++ lib.optionals (withMTP) [ libmtp ]
@ -132,4 +192,5 @@ let
};
};
in free
in
free

View file

@ -1,13 +1,22 @@
{ lib, stdenv, fetchurl, libogg }:
{ lib, stdenv, fetchurl, fetchpatch, libogg }:
stdenv.mkDerivation rec {
name = "flac-1.3.3";
pname = "flac";
version = "1.3.3";
src = fetchurl {
url = "http://downloads.xiph.org/releases/flac/${name}.tar.xz";
url = "http://downloads.xiph.org/releases/flac/${pname}-${version}.tar.xz";
sha256 = "0j0p9sf56a2fm2hkjnf7x3py5ir49jyavg4q5zdyd7bcf6yq4gi1";
};
patches = [
(fetchpatch {
name = "CVE-2020-0499.patch";
url = "https://github.com/xiph/flac/commit/2e7931c27eb15e387da440a37f12437e35b22dd4.patch";
sha256 = "160qzq9ms5addz7sx06pnyjjkqrffr54r4wd8735vy4x008z71ah";
})
];
buildInputs = [ libogg ];
#doCheck = true; # takes lots of time

View file

@ -13,13 +13,13 @@
stdenv.mkDerivation rec {
pname = "ft2-clone";
version = "1.46";
version = "1.47";
src = fetchFromGitHub {
owner = "8bitbubsy";
repo = "ft2-clone";
rev = "v${version}";
sha256 = "sha256-Y6FgIbNCsxnM/B2bEB7oufBjU1BnBYaz7/oysWttIOc=";
sha256 = "sha256-KLHJROOtRPtGHBYEMByY7LG6FY4vES6WndCiz7okan8=";
};
# Adapt the linux-only CMakeLists to darwin (more reliable than make-macos.sh)

View file

@ -55,7 +55,6 @@ in stdenv.mkDerivation {
gsettings-desktop-schemas
] ++ gst_plugins;
enableParallelBuilding = true;
postInstall = ''
glib-compile-schemas "$out"/share/glib-2.0/schemas
'';

View file

@ -0,0 +1,37 @@
{ lib
, fetchFromGitLab
, rustPlatform
, pkg-config
, clang
, libclang
, glib
, gtk4
, pipewire
}:
rustPlatform.buildRustPackage rec {
pname = "helvum";
version = "0.2.0";
src = fetchFromGitLab {
domain = "gitlab.freedesktop.org";
owner = "ryuukyu";
repo = pname;
rev = version;
sha256 = "sha256-sQ4epL3QNOLHuR/dr/amHgiaxV/1SWeb3eijnjAAR3w=";
};
cargoSha256 = "sha256-uNTSU06Fz/ud04K40e98rb7o/uAht0DsiJOXeHX72vw=";
nativeBuildInputs = [ clang pkg-config ];
buildInputs = [ glib gtk4 pipewire ];
LIBCLANG_PATH = "${libclang.lib}/lib";
meta = with lib; {
description = "A GTK patchbay for pipewire";
homepage = "https://gitlab.freedesktop.org/ryuukyu/helvum";
license = licenses.gpl3Only;
maintainers = with maintainers; [ fufexan ];
};
}

Some files were not shown because too many files have changed in this diff Show more