Project import generated by Copybara.

GitOrigin-RevId: 536fe36e23ab0fc8b7f35c24603422eee9fc17a2
This commit is contained in:
Default email 2021-02-05 18:12:51 +01:00
parent f55f861e17
commit ae91cbe6cc
10137 changed files with 93303 additions and 73740 deletions

View file

@ -47,27 +47,17 @@ indent_style = space
insert_final_newline = unset insert_final_newline = unset
trim_trailing_whitespace = unset trim_trailing_whitespace = unset
[*.{key,ovpn}] [*.{asc,key,ovpn}]
insert_final_newline = unset insert_final_newline = unset
end_of_line = unset end_of_line = unset
trim_trailing_whitespace = unset
[*.lock] [*.lock]
indent_size = unset indent_size = unset
[deps.nix]
insert_final_newline = unset
[pkgs/tools/networking/dd-agent/*-deps.nix]
insert_final_newline = unset
[eggs.nix] [eggs.nix]
trim_trailing_whitespace = unset trim_trailing_whitespace = unset
[gemset.nix]
insert_final_newline = unset
[node-{composition,packages,packages-generated}.nix]
insert_final_newline = unset
[nixos/modules/services/networking/ircd-hybrid/*.{conf,in}] [nixos/modules/services/networking/ircd-hybrid/*.{conf,in}]
trim_trailing_whitespace = unset trim_trailing_whitespace = unset
@ -92,15 +82,6 @@ insert_final_newline = unset
indent_style = unset indent_style = unset
trim_trailing_whitespace = unset trim_trailing_whitespace = unset
[pkgs/development/mobile/androidenv/generated/{addons,packages}.nix]
trim_trailing_whitespace = unset
[pkgs/development/node-packages/composition.nix]
insert_final_newline = unset
[pkgs/development/{perl-modules,ocaml-modules,tools/ocaml}/**]
indent_style = unset
[pkgs/servers/dict/wordnet_structures.py] [pkgs/servers/dict/wordnet_structures.py]
trim_trailing_whitespace = unset trim_trailing_whitespace = unset

View file

@ -76,6 +76,7 @@
/pkgs/development/interpreters/python @FRidh /pkgs/development/interpreters/python @FRidh
/pkgs/development/python-modules @FRidh @jonringer /pkgs/development/python-modules @FRidh @jonringer
/doc/languages-frameworks/python.section.md @FRidh /doc/languages-frameworks/python.section.md @FRidh
/pkgs/development/tools/poetry2nix @adisbladis
# Haskell # Haskell
/pkgs/development/compilers/ghc @cdepillabout /pkgs/development/compilers/ghc @cdepillabout

View file

@ -37,7 +37,7 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
* Not start with the package name. * Not start with the package name.
* Not have a period at the end. * Not have a period at the end.
* `meta.license` must be set and fit the upstream license. * `meta.license` must be set and fit the upstream license.
* If there is no upstream license, `meta.license` should default to `stdenv.lib.licenses.unfree`. * If there is no upstream license, `meta.license` should default to `lib.licenses.unfree`.
* `meta.maintainers` must be set. * `meta.maintainers` must be set.
See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes) and on how to [submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes). See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes) and on how to [submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes).

View file

@ -16,5 +16,5 @@ jobs:
-X POST \ -X POST \
-H "Accept: application/vnd.github.v3+json" \ -H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \ -H "Authorization: token $GITHUB_TOKEN" \
-d '{"state": "failure", "target_url": " ", "description": "This failed status will be cleared when ofborg finishes eval.", "context": "Wait for ofborg"}' \ -d '{"state": "pending", "target_url": " ", "description": "This pending status will be cleared when ofborg starts eval.", "context": "Wait for ofborg"}' \
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.pull_request.head.sha }}" "https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.pull_request.head.sha }}"

View file

@ -178,6 +178,12 @@ args.stdenv.mkDerivation (args // {
</programlisting> </programlisting>
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Arguments should be listed in the order they are used, with the
exception of <varname>lib</varname>, which always goes first.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Prefer using the top-level <varname>lib</varname> over its alias Prefer using the top-level <varname>lib</varname> over its alias

View file

@ -12,7 +12,7 @@ xlink:href="https://github.com/NixOS/nixpkgs/tree/master/doc">doc</filename> sub
<screen> <screen>
<prompt>$ </prompt>cd /path/to/nixpkgs/doc <prompt>$ </prompt>cd /path/to/nixpkgs/doc
<prompt>$ </prompt>nix-shell <prompt>$ </prompt>nix-shell
<prompt>[nix-shell]$ </prompt>make <prompt>[nix-shell]$ </prompt>make $makeFlags
</screen> </screen>
<para> <para>
If you experience problems, run <command>make debug</command> to help understand the docbook errors. If you experience problems, run <command>make debug</command> to help understand the docbook errors.

View file

@ -1711,4 +1711,43 @@ recursiveUpdate
</example> </example>
</section> </section>
<section xml:id="function-library-lib.attrsets.cartesianProductOfSets">
<title><function>lib.attrsets.cartesianProductOfSets</function></title>
<subtitle><literal>cartesianProductOfSets :: AttrSet -> [ AttrSet ]</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.cartesianProductOfSets" />
<para>
Return the cartesian product of attribute set value combinations.
</para>
<variablelist>
<varlistentry>
<term>
<varname>set</varname>
</term>
<listitem>
<para>
An attribute set with attributes that carry lists of values.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.cartesianProductOfSets-example">
<title>Creating the cartesian product of a list of attribute values</title>
<programlisting><![CDATA[
cartesianProductOfSets { a = [ 1 2 ]; b = [ 10 20 ]; }
=> [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
{ a = 2; b = 10; }
{ a = 2; b = 20; }
]
]]></programlisting>
</example>
</section>
</section> </section>

View file

@ -46,7 +46,7 @@ depend: standard-library
More information can be found in the [official Agda documentation on library management](https://agda.readthedocs.io/en/v2.6.1/tools/package-system.html). More information can be found in the [official Agda documentation on library management](https://agda.readthedocs.io/en/v2.6.1/tools/package-system.html).
## Compiling Agda ## Compiling Agda
Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee` is made available to the Agda program via the `--with-compiler` flag. Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee754` is made available to the Agda program via the `--with-compiler` flag.
This can be overridden by a different version of `ghc` as follows: This can be overridden by a different version of `ghc` as follows:
``` ```

View file

@ -42,8 +42,8 @@ It also takes other standard `mkDerivation` attributes, they are added as such,
Here is a simple package example. It is a pure Coq library, thus it depends on Coq. It builds on the Mathematical Components library, thus it also takes some `mathcomp` derivations as `extraBuildInputs`. Here is a simple package example. It is a pure Coq library, thus it depends on Coq. It builds on the Mathematical Components library, thus it also takes some `mathcomp` derivations as `extraBuildInputs`.
```nix ```nix
{ coq, mkCoqDerivation, mathcomp, mathcomp-finmap, mathcomp-bigenough, { lib, mkCoqDerivation, version ? null
lib, version ? null }: , coq, mathcomp, mathcomp-finmap, mathcomp-bigenough }:
with lib; mkCoqDerivation { with lib; mkCoqDerivation {
/* namePrefix leads to e.g. `name = coq8.11-mathcomp1.11-multinomials-1.5.2` */ /* namePrefix leads to e.g. `name = coq8.11-mathcomp1.11-multinomials-1.5.2` */
namePrefix = [ "coq" "mathcomp" ]; namePrefix = [ "coq" "mathcomp" ];

View file

@ -60,7 +60,7 @@ See the `zlib` example:
stdenv = pkgs.emscriptenStdenv; stdenv = pkgs.emscriptenStdenv;
}).overrideDerivation }).overrideDerivation
(old: rec { (old: rec {
buildInputs = old.buildInputs ++ [ pkgconfig ]; buildInputs = old.buildInputs ++ [ pkg-config ];
# we need to reset this setting! # we need to reset this setting!
NIX_CFLAGS_COMPILE=""; NIX_CFLAGS_COMPILE="";
configurePhase = '' configurePhase = ''
@ -117,8 +117,8 @@ This `xmlmirror` example features a emscriptenPackage which is defined completel
xmlmirror = pkgs.buildEmscriptenPackage rec { xmlmirror = pkgs.buildEmscriptenPackage rec {
name = "xmlmirror"; name = "xmlmirror";
buildInputs = [ pkgconfig autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ]; buildInputs = [ pkg-config autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ];
nativeBuildInputs = [ pkgconfig zlib ]; nativeBuildInputs = [ pkg-config zlib ];
src = pkgs.fetchgit { src = pkgs.fetchgit {
url = "https://gitlab.com/odfplugfest/xmlmirror.git"; url = "https://gitlab.com/odfplugfest/xmlmirror.git";

View file

@ -69,11 +69,11 @@ prelude
As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`: As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
```nix ```nix
{ build-idris-package { lib
, build-idris-package
, fetchFromGitHub , fetchFromGitHub
, contrib , contrib
, lightyear , lightyear
, lib
}: }:
build-idris-package { build-idris-package {
name = "yaml"; name = "yaml";
@ -94,11 +94,11 @@ build-idris-package {
sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7"; sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7";
}; };
meta = { meta = with lib; {
description = "Idris YAML lib"; description = "Idris YAML lib";
homepage = "https://github.com/Heather/Idris.Yaml"; homepage = "https://github.com/Heather/Idris.Yaml";
license = lib.licenses.mit; license = licenses.mit;
maintainers = [ lib.maintainers.brainrape ]; maintainers = [ maintainers.brainrape ];
}; };
} }
``` ```

View file

@ -116,7 +116,7 @@ The first step will be to build the Maven project as a fixed-output derivation i
> Traditionally the Maven repository is at `~/.m2/repository`. We will override this to be the `$out` directory. > Traditionally the Maven repository is at `~/.m2/repository`. We will override this to be the `$out` directory.
```nix ```nix
{ stdenv, lib, maven }: { lib, stdenv, maven }:
stdenv.mkDerivation { stdenv.mkDerivation {
name = "maven-repository"; name = "maven-repository";
buildInputs = [ maven ]; buildInputs = [ maven ];
@ -168,7 +168,7 @@ If your package uses _SNAPSHOT_ dependencies or _version ranges_; there is a str
Regardless of which strategy is chosen above, the step to build the derivation is the same. Regardless of which strategy is chosen above, the step to build the derivation is the same.
```nix ```nix
{ stdenv, lib, maven, callPackage }: { stdenv, maven, callPackage }:
# pick a repository derivation, here we will use buildMaven # pick a repository derivation, here we will use buildMaven
let repository = callPackage ./build-maven-repository.nix { }; let repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec { in stdenv.mkDerivation rec {
@ -222,7 +222,7 @@ We will read the Maven repository and flatten it to a single list. This list wil
We make sure to provide this classpath to the `makeWrapper`. We make sure to provide this classpath to the `makeWrapper`.
```nix ```nix
{ stdenv, lib, maven, callPackage, makeWrapper, jre }: { stdenv, maven, callPackage, makeWrapper, jre }:
let let
repository = callPackage ./build-maven-repository.nix { }; repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec { in stdenv.mkDerivation rec {
@ -298,7 +298,7 @@ Main-Class: Main
We will modify the derivation above to add a symlink to our repository so that it's accessible to our JAR during the `installPhase`. We will modify the derivation above to add a symlink to our repository so that it's accessible to our JAR during the `installPhase`.
```nix ```nix
{ stdenv, lib, maven, callPackage, makeWrapper, jre }: { stdenv, maven, callPackage, makeWrapper, jre }:
# pick a repository derivation, here we will use buildMaven # pick a repository derivation, here we will use buildMaven
let repository = callPackage ./build-maven-repository.nix { }; let repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec { in stdenv.mkDerivation rec {

View file

@ -32,11 +32,11 @@ buildDunePackage rec {
propagatedBuildInputs = [ bigstringaf result ]; propagatedBuildInputs = [ bigstringaf result ];
doCheck = true; doCheck = true;
meta = { meta = with lib; {
homepage = "https://github.com/inhabitedtype/angstrom"; homepage = "https://github.com/inhabitedtype/angstrom";
description = "OCaml parser combinators built for speed and memory efficiency"; description = "OCaml parser combinators built for speed and memory efficiency";
license = lib.licenses.bsd3; license = licenses.bsd3;
maintainers = with lib.maintainers; [ sternenseemann ]; maintainers = with maintainers; [ sternenseemann ];
}; };
} }
``` ```

View file

@ -110,7 +110,7 @@ ClassC3Componentised = buildPerlPackage rec {
On Darwin, if a script has too many `-Idir` flags in its first line (its “shebang line”), it will not run. This can be worked around by calling the `shortenPerlShebang` function from the `postInstall` phase: On Darwin, if a script has too many `-Idir` flags in its first line (its “shebang line”), it will not run. This can be worked around by calling the `shortenPerlShebang` function from the `postInstall` phase:
```nix ```nix
{ stdenv, lib, buildPerlPackage, fetchurl, shortenPerlShebang }: { lib, stdenv, buildPerlPackage, fetchurl, shortenPerlShebang }:
ImageExifTool = buildPerlPackage { ImageExifTool = buildPerlPackage {
pname = "Image-ExifTool"; pname = "Image-ExifTool";

View file

@ -610,6 +610,10 @@ Using the example above, the analagous pytestCheckHook usage would be:
"download" "download"
"update" "update"
]; ];
disabledTestFiles = [
"tests/test_failing.py"
];
``` ```
This is expecially useful when tests need to be conditionallydisabled, This is expecially useful when tests need to be conditionallydisabled,

View file

@ -8,7 +8,7 @@ There are primarily two problems which the Qt infrastructure is designed to addr
```{=docbook} ```{=docbook}
<programlisting> <programlisting>
{ mkDerivation, lib, qtbase }: <co xml:id='qt-default-nix-co-1' /> { mkDerivation, qtbase }: <co xml:id='qt-default-nix-co-1' />
mkDerivation { <co xml:id='qt-default-nix-co-2' /> mkDerivation { <co xml:id='qt-default-nix-co-2' />
pname = "myapp"; pname = "myapp";
@ -92,32 +92,43 @@ mkDerivation {
} }
``` ```
## Adding a library to Nixpkgs ## Adding a library to Nixpkgs
Add a Qt library to all-packages.nix by adding it to the collection inside `mkLibsForQt5`. This ensures that the library is built with every available version of Qt as needed. Qt libraries are added to `qt5-packages.nix` and are made available for every Qt
version supported.
### Example Adding a Qt library to all-packages.nix {#qt-library-all-packages-nix} ### Example adding a Qt library {#qt-library-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
``` ```
{ {
# ... # ...
mkLibsForQt5 = self: with self; { mylib = callPackage ../path/to/mylib {};
# ...
mylib = callPackage ../path/to/mylib {};
};
# ... # ...
} }
``` ```
## Adding an application to Nixpkgs ## Adding an application to Nixpkgs
Add a Qt application to *all-packages.nix* using `libsForQt5.callPackage` instead of the usual `callPackage`. The former ensures that all dependencies are built with the same version of Qt. Applications that use Qt are also added to `qt5-packages.nix`. An alias is added
in the top-level `all-packages.nix` pointing to the package with the desired Qt5 version.
### Example Adding a QT application to all-packages.nix {#qt-application-all-packages-nix} ### Example adding a Qt application {#qt-application-all-packages-nix}
```nix
The following represents the contents of `qt5-packages.nix`.
```
{ {
# ... # ...
myapp = libsForQt5.callPackage ../path/to/myapp/ {}; myapp = callPackage ../path/to/myapp {};
# ...
}
```
The following represents the contents of `all-packages.nix`.
```
{
# ...
myapp = libsForQt5.myapp;
# ... # ...
} }

View file

@ -32,14 +32,12 @@ However, if you'd like to add a file to your project source to make the
environment available for other contributors, you can create a `default.nix` environment available for other contributors, you can create a `default.nix`
file like so: file like so:
```nix ```nix
let with import <nixpkgs> {};
pkgs = import <nixpkgs> {}; {
stdenv = pkgs.stdenv;
in with pkgs; {
myProject = stdenv.mkDerivation { myProject = stdenv.mkDerivation {
name = "myProject"; name = "myProject";
version = "1"; version = "1";
src = if pkgs.lib.inNixShell then null else nix; src = if lib.inNixShell then null else nix;
buildInputs = with rPackages; [ buildInputs = with rPackages; [
R R

View file

@ -232,7 +232,7 @@ If you want to package a specific version, you can use the standard Gemfile synt
Now you can also also make a `default.nix` that looks like this: Now you can also also make a `default.nix` that looks like this:
```nix ```nix
{ lib, bundlerApp }: { bundlerApp }:
bundlerApp { bundlerApp {
pname = "mdl"; pname = "mdl";

View file

@ -19,6 +19,8 @@ or use Mozilla's [Rust nightlies overlay](#using-the-rust-nightlies-overlay).
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`: Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
``` ```
{ lib, rustPlatform }:
rustPlatform.buildRustPackage rec { rustPlatform.buildRustPackage rec {
pname = "ripgrep"; pname = "ripgrep";
version = "12.1.1"; version = "12.1.1";
@ -226,8 +228,6 @@ source code in a reproducible way. If it is missing or out-of-date one can use
the `cargoPatches` attribute to update or add it. the `cargoPatches` attribute to update or add it.
``` ```
{ lib, rustPlatform, fetchFromGitHub }:
rustPlatform.buildRustPackage rec { rustPlatform.buildRustPackage rec {
(...) (...)
cargoPatches = [ cargoPatches = [
@ -263,7 +263,7 @@ Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
``` ```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone # Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }: { stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name; let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped) # ... (content skipped)
in in
@ -292,7 +292,7 @@ following nix file:
``` ```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone # Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }: { stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name; let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped) # ... (content skipped)
in in
@ -480,7 +480,7 @@ stdenv.mkDerivation {
rustc cargo rustc cargo
# Example Build-time Additional Dependencies # Example Build-time Additional Dependencies
pkgconfig pkg-config
]; ];
buildInputs = [ buildInputs = [
# Example Run-time Additional Dependencies # Example Run-time Additional Dependencies
@ -522,7 +522,7 @@ stdenv.mkDerivation {
latest.rustChannels.nightly.rust latest.rustChannels.nightly.rust
# Add some extra dependencies from `pkgs` # Add some extra dependencies from `pkgs`
pkgconfig openssl pkg-config openssl
]; ];
# Set Environment Variables # Set Environment Variables
@ -567,12 +567,13 @@ in the `~/.config/nixpkgs/overlays` directory.
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar: Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
``` ```
nixpkgs = { { pkgs ? import <nixpkgs> {
overlays = [ overlays = [
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz)) (import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
# Further overlays go here # Further overlays go here
]; ];
}; };
};
``` ```
Note that this will fetch the latest overlay version when rebuilding your system. Note that this will fetch the latest overlay version when rebuilding your system.

View file

@ -1,5 +1,3 @@
{ pkgs ? import ../. {} }: { pkgs ? import ../. { } }:
(import ./default.nix {}).overrideAttrs (x: { (import ./default.nix { }).overrideAttrs
buildInputs = x.buildInputs ++ [ pkgs.xmloscopy pkgs.ruby ]; (x: { buildInputs = (x.buildInputs or [ ]) ++ [ pkgs.xmloscopy pkgs.ruby ]; })
})

View file

@ -291,5 +291,40 @@ stdenv.mkDerivation {
} }
</programlisting> </programlisting>
</section> </section>
<section xml:id="sec-overlays-alternatives-mpi">
<title>Switching the MPI implementation</title>
<para>
All programs that are built with
<link xlink:href="https://en.wikipedia.org/wiki/Message_Passing_Interface">MPI</link>
support use the generic attribute <varname>mpi</varname>
as an input. At the moment Nixpkgs natively provides two different
MPI implementations:
<itemizedlist>
<listitem>
<para>
<link xlink:href="https://www.open-mpi.org/">Open MPI</link>
(default), attribute name <varname>openmpi</varname>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.mpich.org/">MPICH</link>,
attribute name <varname>mpich</varname>
</para>
</listitem>
</itemizedlist>
</para>
<para>
To provide MPI enabled applications that use <literal>MPICH</literal>, instead
of the default <literal>Open MPI</literal>, simply use the following overlay:
</para>
<programlisting>
self: super:
{
mpi = self.mpich;
}
</programlisting>
</section>
</section> </section>
</chapter> </chapter>

View file

@ -183,6 +183,24 @@ rec {
else else
[]; [];
/* Return the cartesian product of attribute set value combinations.
Example:
cartesianProductOfSets { a = [ 1 2 ]; b = [ 10 20 ]; }
=> [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
{ a = 2; b = 10; }
{ a = 2; b = 20; }
]
*/
cartesianProductOfSets = attrsOfLists:
lib.foldl' (listOfAttrs: attrName:
concatMap (attrs:
map (listValue: attrs // { ${attrName} = listValue; }) attrsOfLists.${attrName}
) listOfAttrs
) [{}] (attrNames attrsOfLists);
/* Utility function that creates a {name, value} pair as expected by /* Utility function that creates a {name, value} pair as expected by
builtins.listToAttrs. builtins.listToAttrs.
@ -493,5 +511,4 @@ rec {
zipWithNames = zipAttrsWithNames; zipWithNames = zipAttrsWithNames;
zip = builtins.trace zip = builtins.trace
"lib.zip is deprecated, use lib.zipAttrsWith instead" zipAttrsWith; "lib.zip is deprecated, use lib.zipAttrsWith instead" zipAttrsWith;
} }

View file

@ -148,6 +148,28 @@ rec {
/* A combination of `traceVal` and `traceSeqN`. */ /* A combination of `traceVal` and `traceSeqN`. */
traceValSeqN = traceValSeqNFn id; traceValSeqN = traceValSeqNFn id;
/* Trace the input and output of a function `f` named `name`,
both down to `depth`.
This is useful for adding around a function call,
to see the before/after of values as they are transformed.
Example:
traceFnSeqN 2 "id" (x: x) { a.b.c = 3; }
trace: { fn = "id"; from = { a.b = {}; }; to = { a.b = {}; }; }
=> { a.b.c = 3; }
*/
traceFnSeqN = depth: name: f: v:
let res = f v;
in lib.traceSeqN
(depth + 1)
{
fn = name;
from = v;
to = res;
}
res;
# -- TESTING -- # -- TESTING --

View file

@ -78,7 +78,7 @@ let
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
recursiveUpdate matchAttrs overrideExisting getOutput getBin recursiveUpdate matchAttrs overrideExisting getOutput getBin
getLib getDev getMan chooseDevOutputs zipWithNames zip getLib getDev getMan chooseDevOutputs zipWithNames zip
recurseIntoAttrs dontRecurseIntoAttrs; recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets;
inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1 inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1
concatMap flatten remove findSingle findFirst any all count concatMap flatten remove findSingle findFirst any all count
optional optionals toList range partition zipListsWith zipLists optional optionals toList range partition zipListsWith zipLists
@ -130,7 +130,7 @@ let
assertMsg assertOneOf; assertMsg assertOneOf;
inherit (self.debug) addErrorContextToAttrs traceIf traceVal traceValFn inherit (self.debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal traceValSeqFn traceValSeqN traceValSeqNFn traceFnSeqN traceShowVal
traceShowValMarked showVal traceCall traceCall2 traceCall3 traceShowValMarked showVal traceCall traceCall2 traceCall3
traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr; traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr;
inherit (self.misc) maybeEnv defaultMergeArg defaultMerge foldArgs inherit (self.misc) maybeEnv defaultMergeArg defaultMerge foldArgs

View file

@ -87,7 +87,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
beerware = spdx { beerware = spdx {
spdxId = "Beerware"; spdxId = "Beerware";
fullName = ''Beerware License''; fullName = "Beerware License";
}; };
blueOak100 = spdx { blueOak100 = spdx {
@ -100,6 +100,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
fullName = "BSD Zero Clause License"; fullName = "BSD Zero Clause License";
}; };
bsd1 = spdx {
spdxId = "BSD-1-Clause";
fullName = "BSD 1-Clause License";
};
bsd2 = spdx { bsd2 = spdx {
spdxId = "BSD-2-Clause"; spdxId = "BSD-2-Clause";
fullName = ''BSD 2-clause "Simplified" License''; fullName = ''BSD 2-clause "Simplified" License'';
@ -107,7 +112,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
bsd2Patent = spdx { bsd2Patent = spdx {
spdxId = "BSD-2-Clause-Patent"; spdxId = "BSD-2-Clause-Patent";
fullName = ''BSD-2-Clause Plus Patent License''; fullName = "BSD-2-Clause Plus Patent License";
}; };
bsd3 = spdx { bsd3 = spdx {

View file

@ -629,7 +629,9 @@ rec {
crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]] crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]]
=> [ "13" "14" "23" "24" ] => [ "13" "14" "23" "24" ]
*/ */
crossLists = f: foldl (fs: args: concatMap (f: map f args) fs) [f]; crossLists = builtins.trace
"lib.crossLists is deprecated, use lib.cartesianProductOfSets instead"
(f: foldl (fs: args: concatMap (f: map f args) fs) [f]);
/* Remove duplicate elements from the list. O(n^2) complexity. /* Remove duplicate elements from the list. O(n^2) complexity.

View file

@ -895,7 +895,7 @@ rec {
fromOpt = getAttrFromPath from options; fromOpt = getAttrFromPath from options;
toOf = attrByPath to toOf = attrByPath to
(abort "Renaming error: option `${showOption to}' does not exist."); (abort "Renaming error: option `${showOption to}' does not exist.");
toType = let opt = attrByPath to {} options; in opt.type or null; toType = let opt = attrByPath to {} options; in opt.type or (types.submodule {});
in in
{ {
options = setAttrByPath from (mkOption { options = setAttrByPath from (mkOption {

View file

@ -1,7 +1,7 @@
{ lib }: { lib }:
rec { rec {
# platform.gcc.arch to its features (as in /proc/cpuinfo) # gcc.arch to its features (as in /proc/cpuinfo)
features = { features = {
default = [ ]; default = [ ];
# x86_64 Intel # x86_64 Intel

View file

@ -24,8 +24,6 @@ rec {
# Either of these can be losslessly-extracted from `parsed` iff parsing succeeds. # Either of these can be losslessly-extracted from `parsed` iff parsing succeeds.
system = parse.doubleFromSystem final.parsed; system = parse.doubleFromSystem final.parsed;
config = parse.tripleFromSystem final.parsed; config = parse.tripleFromSystem final.parsed;
# Just a guess, based on `system`
platform = platforms.select final;
# Determine whether we are compatible with the provided CPU # Determine whether we are compatible with the provided CPU
isCompatible = platform: parse.isCompatible final.parsed.cpu platform.parsed.cpu; isCompatible = platform: parse.isCompatible final.parsed.cpu platform.parsed.cpu;
# Derived meta-data # Derived meta-data
@ -79,12 +77,23 @@ rec {
}; };
isStatic = final.isWasm || final.isRedox; isStatic = final.isWasm || final.isRedox;
kernelArch = # Just a guess, based on `system`
inherit
({
linux-kernel = args.linux-kernel or {};
gcc = args.gcc or {};
rustc = args.rust or {};
} // platforms.select final)
linux-kernel gcc rustc;
linuxArch =
if final.isAarch32 then "arm" if final.isAarch32 then "arm"
else if final.isAarch64 then "arm64" else if final.isAarch64 then "arm64"
else if final.isx86_32 then "x86" else if final.isx86_32 then "i386"
else if final.isx86_64 then "x86" else if final.isx86_64 then "x86_64"
else if final.isMips then "mips" else if final.isMips then "mips"
else if final.isPower then "powerpc"
else if final.isRiscV then "riscv"
else final.parsed.cpu.name; else final.parsed.cpu.name;
qemuArch = qemuArch =
@ -129,7 +138,7 @@ rec {
else throw "Don't know how to run ${final.config} executables."; else throw "Don't know how to run ${final.config} executables.";
} // mapAttrs (n: v: v final.parsed) inspect.predicates } // mapAttrs (n: v: v final.parsed) inspect.predicates
// mapAttrs (n: v: v final.platform.gcc.arch or "default") architectures.predicates // mapAttrs (n: v: v final.gcc.arch or "default") architectures.predicates
// args; // args;
in assert final.useAndroidPrebuilt -> final.isAndroid; in assert final.useAndroidPrebuilt -> final.isAndroid;
assert lib.foldl assert lib.foldl

View file

@ -24,6 +24,7 @@ let
"x86_64-redox" "x86_64-redox"
"powerpc64-linux"
"powerpc64le-linux" "powerpc64le-linux"
"riscv32-linux" "riscv64-linux" "riscv32-linux" "riscv64-linux"
@ -72,7 +73,7 @@ in {
darwin = filterDoubles predicates.isDarwin; darwin = filterDoubles predicates.isDarwin;
freebsd = filterDoubles predicates.isFreeBSD; freebsd = filterDoubles predicates.isFreeBSD;
# Should be better, but MinGW is unclear. # Should be better, but MinGW is unclear.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; }); gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.elfv1; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.elfv2; });
illumos = filterDoubles predicates.isSunOS; illumos = filterDoubles predicates.isSunOS;
linux = filterDoubles predicates.isLinux; linux = filterDoubles predicates.isLinux;
netbsd = filterDoubles predicates.isNetBSD; netbsd = filterDoubles predicates.isNetBSD;
@ -85,5 +86,5 @@ in {
embedded = filterDoubles predicates.isNone; embedded = filterDoubles predicates.isNone;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64le-linux"]; mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64-linux" "powerpc64le-linux"];
} }

View file

@ -7,7 +7,6 @@ let
riscv = bits: { riscv = bits: {
config = "riscv${bits}-unknown-linux-gnu"; config = "riscv${bits}-unknown-linux-gnu";
platform = platforms.riscv-multiplatform;
}; };
in in
@ -17,84 +16,81 @@ rec {
# #
powernv = { powernv = {
config = "powerpc64le-unknown-linux-gnu"; config = "powerpc64le-unknown-linux-gnu";
platform = platforms.powernv;
}; };
musl-power = { musl-power = {
config = "powerpc64le-unknown-linux-musl"; config = "powerpc64le-unknown-linux-musl";
platform = platforms.powernv; };
ppc64-elfv1 = {
config = "powerpc64-unknown-linux-elfv1";
};
ppc64-elfv2 = {
config = "powerpc64-unknown-linux-elfv2";
};
ppc64 = ppc64-elfv2; # default to modern elfv2
ppc64-musl = {
config = "powerpc64-unknown-linux-musl";
gcc = { abi = "elfv2"; }; # for gcc configuration
}; };
sheevaplug = { sheevaplug = {
config = "armv5tel-unknown-linux-gnueabi"; config = "armv5tel-unknown-linux-gnueabi";
platform = platforms.sheevaplug; } // platforms.sheevaplug;
};
raspberryPi = { raspberryPi = {
config = "armv6l-unknown-linux-gnueabihf"; config = "armv6l-unknown-linux-gnueabihf";
platform = platforms.raspberrypi; } // platforms.raspberrypi;
};
remarkable1 = { remarkable1 = {
config = "armv7l-unknown-linux-gnueabihf"; config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.zero-gravitas; } // platforms.zero-gravitas;
};
remarkable2 = { remarkable2 = {
config = "armv7l-unknown-linux-gnueabihf"; config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.zero-sugar; } // platforms.zero-sugar;
};
armv7l-hf-multiplatform = { armv7l-hf-multiplatform = {
config = "armv7l-unknown-linux-gnueabihf"; config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.armv7l-hf-multiplatform;
}; };
aarch64-multiplatform = { aarch64-multiplatform = {
config = "aarch64-unknown-linux-gnu"; config = "aarch64-unknown-linux-gnu";
platform = platforms.aarch64-multiplatform;
}; };
armv7a-android-prebuilt = { armv7a-android-prebuilt = {
config = "armv7a-unknown-linux-androideabi"; config = "armv7a-unknown-linux-androideabi";
sdkVer = "29"; sdkVer = "29";
ndkVer = "21"; ndkVer = "21";
platform = platforms.armv7a-android;
useAndroidPrebuilt = true; useAndroidPrebuilt = true;
}; } // platforms.armv7a-android;
aarch64-android-prebuilt = { aarch64-android-prebuilt = {
config = "aarch64-unknown-linux-android"; config = "aarch64-unknown-linux-android";
sdkVer = "29"; sdkVer = "29";
ndkVer = "21"; ndkVer = "21";
platform = platforms.aarch64-multiplatform;
useAndroidPrebuilt = true; useAndroidPrebuilt = true;
}; };
scaleway-c1 = armv7l-hf-multiplatform // rec { scaleway-c1 = armv7l-hf-multiplatform // platforms.scaleway-c1;
platform = platforms.scaleway-c1;
inherit (platform.gcc) fpu;
};
pogoplug4 = { pogoplug4 = {
config = "armv5tel-unknown-linux-gnueabi"; config = "armv5tel-unknown-linux-gnueabi";
platform = platforms.pogoplug4; } // platforms.pogoplug4;
};
ben-nanonote = { ben-nanonote = {
config = "mipsel-unknown-linux-uclibc"; config = "mipsel-unknown-linux-uclibc";
platform = platforms.ben_nanonote; } // platforms.ben_nanonote;
};
fuloongminipc = { fuloongminipc = {
config = "mipsel-unknown-linux-gnu"; config = "mipsel-unknown-linux-gnu";
platform = platforms.fuloong2f_n32; } // platforms.fuloong2f_n32;
};
muslpi = raspberryPi // { muslpi = raspberryPi // {
config = "armv6l-unknown-linux-musleabihf"; config = "armv6l-unknown-linux-musleabihf";
}; };
aarch64-multiplatform-musl = aarch64-multiplatform // { aarch64-multiplatform-musl = {
config = "aarch64-unknown-linux-musl"; config = "aarch64-unknown-linux-musl";
}; };
@ -110,13 +106,11 @@ rec {
riscv64-embedded = { riscv64-embedded = {
config = "riscv64-none-elf"; config = "riscv64-none-elf";
libc = "newlib"; libc = "newlib";
platform = platforms.riscv-multiplatform;
}; };
riscv32-embedded = { riscv32-embedded = {
config = "riscv32-none-elf"; config = "riscv32-none-elf";
libc = "newlib"; libc = "newlib";
platform = platforms.riscv-multiplatform;
}; };
mmix = { mmix = {
@ -136,13 +130,11 @@ rec {
vc4 = { vc4 = {
config = "vc4-elf"; config = "vc4-elf";
libc = "newlib"; libc = "newlib";
platform = {};
}; };
or1k = { or1k = {
config = "or1k-elf"; config = "or1k-elf";
libc = "newlib"; libc = "newlib";
platform = {};
}; };
arm-embedded = { arm-embedded = {
@ -152,6 +144,12 @@ rec {
armhf-embedded = { armhf-embedded = {
config = "arm-none-eabihf"; config = "arm-none-eabihf";
libc = "newlib"; libc = "newlib";
# GCC8+ does not build without this
# (https://www.mail-archive.com/gcc-bugs@gcc.gnu.org/msg552339.html):
gcc = {
arch = "armv5t";
fpu = "vfp";
};
}; };
aarch64-embedded = { aarch64-embedded = {
@ -200,41 +198,37 @@ rec {
iphone64 = { iphone64 = {
config = "aarch64-apple-ios"; config = "aarch64-apple-ios";
# config = "aarch64-apple-darwin14"; # config = "aarch64-apple-darwin14";
sdkVer = "13.2"; sdkVer = "14.3";
xcodeVer = "11.3.1"; xcodeVer = "12.3";
xcodePlatform = "iPhoneOS"; xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true; useiOSPrebuilt = true;
platform = {};
}; };
iphone32 = { iphone32 = {
config = "armv7a-apple-ios"; config = "armv7a-apple-ios";
# config = "arm-apple-darwin10"; # config = "arm-apple-darwin10";
sdkVer = "13.2"; sdkVer = "14.3";
xcodeVer = "11.3.1"; xcodeVer = "12.3";
xcodePlatform = "iPhoneOS"; xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true; useiOSPrebuilt = true;
platform = {};
}; };
iphone64-simulator = { iphone64-simulator = {
config = "x86_64-apple-ios"; config = "x86_64-apple-ios";
# config = "x86_64-apple-darwin14"; # config = "x86_64-apple-darwin14";
sdkVer = "13.2"; sdkVer = "14.3";
xcodeVer = "11.3.1"; xcodeVer = "12.3";
xcodePlatform = "iPhoneSimulator"; xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true; useiOSPrebuilt = true;
platform = {};
}; };
iphone32-simulator = { iphone32-simulator = {
config = "i686-apple-ios"; config = "i686-apple-ios";
# config = "i386-apple-darwin11"; # config = "i386-apple-darwin11";
sdkVer = "13.2"; sdkVer = "14.3";
xcodeVer = "11.3.1"; xcodeVer = "12.3";
xcodePlatform = "iPhoneSimulator"; xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true; useiOSPrebuilt = true;
platform = {};
}; };
# #
@ -245,7 +239,6 @@ rec {
mingw32 = { mingw32 = {
config = "i686-w64-mingw32"; config = "i686-w64-mingw32";
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
}; };
# 64 bit mingw-w64 # 64 bit mingw-w64
@ -253,7 +246,6 @@ rec {
# That's the triplet they use in the mingw-w64 docs. # That's the triplet they use in the mingw-w64 docs.
config = "x86_64-w64-mingw32"; config = "x86_64-w64-mingw32";
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
}; };
# BSDs # BSDs
@ -275,6 +267,5 @@ rec {
# Ghcjs # Ghcjs
ghcjs = { ghcjs = {
config = "js-unknown-ghcjs"; config = "js-unknown-ghcjs";
platform = {};
}; };
} }

View file

@ -337,10 +337,18 @@ rec {
The "gnu" ABI is ambiguous on 32-bit ARM. Use "gnueabi" or "gnueabihf" instead. The "gnu" ABI is ambiguous on 32-bit ARM. Use "gnueabi" or "gnueabihf" instead.
''; '';
} }
{ assertion = platform: platform.system != "powerpc64-linux";
message = ''
The "gnu" ABI is ambiguous on big-endian 64-bit PPC. Use "elfv1" or "elfv2" instead.
'';
}
]; ];
}; };
gnuabi64 = { abi = "64"; }; gnuabi64 = { abi = "64"; };
elfv1 = { abi = "elfv1"; };
elfv2 = { abi = "elfv2"; };
musleabi = { float = "soft"; }; musleabi = { float = "soft"; };
musleabihf = { float = "hard"; }; musleabihf = { float = "hard"; };
musl = {}; musl = {};
@ -444,6 +452,7 @@ rec {
if lib.versionAtLeast (parsed.cpu.version or "0") "6" if lib.versionAtLeast (parsed.cpu.version or "0") "6"
then abis.gnueabihf then abis.gnueabihf
else abis.gnueabi else abis.gnueabi
else if cpu == "powerpc64" then abis.elfv2
else abis.gnu else abis.gnu
else abis.unknown; else abis.unknown;
}; };

View file

@ -1,39 +1,36 @@
{ lib }: { lib }:
rec { rec {
pcBase = { pc = {
name = "pc"; linux-kernel = {
kernelBaseConfig = "defconfig"; name = "pc";
# Build whatever possible as a module, if not stated in the extra config.
kernelAutoModules = true; baseConfig = "defconfig";
kernelTarget = "bzImage"; # Build whatever possible as a module, if not stated in the extra config.
autoModules = true;
target = "bzImage";
};
}; };
pc64 = pcBase // { kernelArch = "x86_64"; }; pc_simplekernel = lib.recursiveUpdate pc {
linux-kernel.autoModules = false;
pc32 = pcBase // { kernelArch = "i386"; };
pc32_simplekernel = pc32 // {
kernelAutoModules = false;
};
pc64_simplekernel = pc64 // {
kernelAutoModules = false;
}; };
powernv = { powernv = {
name = "PowerNV"; linux-kernel = {
kernelArch = "powerpc"; name = "PowerNV";
kernelBaseConfig = "powernv_defconfig";
kernelTarget = "zImage"; baseConfig = "powernv_defconfig";
kernelInstallTarget = "install"; target = "zImage";
kernelFile = "vmlinux"; installTarget = "install";
kernelAutoModules = true; file = "vmlinux";
# avoid driver/FS trouble arising from unusual page size autoModules = true;
kernelExtraConfig = '' # avoid driver/FS trouble arising from unusual page size
PPC_64K_PAGES n extraConfig = ''
PPC_4K_PAGES y PPC_64K_PAGES n
IPV6 y PPC_4K_PAGES y
''; IPV6 y
'';
};
}; };
## ##
@ -41,18 +38,12 @@ rec {
## ##
pogoplug4 = { pogoplug4 = {
name = "pogoplug4"; linux-kernel = {
name = "pogoplug4";
gcc = { baseConfig = "multi_v5_defconfig";
arch = "armv5te"; autoModules = false;
}; extraConfig = ''
kernelMajor = "2.6";
kernelBaseConfig = "multi_v5_defconfig";
kernelArch = "arm";
kernelAutoModules = false;
kernelExtraConfig =
''
# Ubi for the mtd # Ubi for the mtd
MTD_UBI y MTD_UBI y
UBIFS_FS y UBIFS_FS y
@ -62,138 +53,144 @@ rec {
UBIFS_FS_ZLIB y UBIFS_FS_ZLIB y
UBIFS_FS_DEBUG n UBIFS_FS_DEBUG n
''; '';
kernelMakeFlags = [ "LOADADDR=0x8000" ]; makeFlags = [ "LOADADDR=0x8000" ];
kernelTarget = "uImage"; target = "uImage";
# TODO reenable once manual-config's config actually builds a .dtb and this is checked to be working # TODO reenable once manual-config's config actually builds a .dtb and this is checked to be working
#kernelDTB = true; #DTB = true;
};
gcc = {
arch = "armv5te";
};
}; };
sheevaplug = { sheevaplug = {
name = "sheevaplug"; linux-kernel = {
kernelMajor = "2.6"; name = "sheevaplug";
kernelBaseConfig = "multi_v5_defconfig";
kernelArch = "arm";
kernelAutoModules = false;
kernelExtraConfig = ''
BLK_DEV_RAM y
BLK_DEV_INITRD y
BLK_DEV_CRYPTOLOOP m
BLK_DEV_DM m
DM_CRYPT m
MD y
REISERFS_FS m
BTRFS_FS m
XFS_FS m
JFS_FS m
EXT4_FS m
USB_STORAGE_CYPRESS_ATACB m
# mv cesa requires this sw fallback, for mv-sha1 baseConfig = "multi_v5_defconfig";
CRYPTO_SHA1 y autoModules = false;
# Fast crypto extraConfig = ''
CRYPTO_TWOFISH y BLK_DEV_RAM y
CRYPTO_TWOFISH_COMMON y BLK_DEV_INITRD y
CRYPTO_BLOWFISH y BLK_DEV_CRYPTOLOOP m
CRYPTO_BLOWFISH_COMMON y BLK_DEV_DM m
DM_CRYPT m
MD y
REISERFS_FS m
BTRFS_FS m
XFS_FS m
JFS_FS m
EXT4_FS m
USB_STORAGE_CYPRESS_ATACB m
IP_PNP y # mv cesa requires this sw fallback, for mv-sha1
IP_PNP_DHCP y CRYPTO_SHA1 y
NFS_FS y # Fast crypto
ROOT_NFS y CRYPTO_TWOFISH y
TUN m CRYPTO_TWOFISH_COMMON y
NFS_V4 y CRYPTO_BLOWFISH y
NFS_V4_1 y CRYPTO_BLOWFISH_COMMON y
NFS_FSCACHE y
NFSD m
NFSD_V2_ACL y
NFSD_V3 y
NFSD_V3_ACL y
NFSD_V4 y
NETFILTER y
IP_NF_IPTABLES y
IP_NF_FILTER y
IP_NF_MATCH_ADDRTYPE y
IP_NF_TARGET_LOG y
IP_NF_MANGLE y
IPV6 m
VLAN_8021Q m
CIFS y IP_PNP y
CIFS_XATTR y IP_PNP_DHCP y
CIFS_POSIX y NFS_FS y
CIFS_FSCACHE y ROOT_NFS y
CIFS_ACL y TUN m
NFS_V4 y
NFS_V4_1 y
NFS_FSCACHE y
NFSD m
NFSD_V2_ACL y
NFSD_V3 y
NFSD_V3_ACL y
NFSD_V4 y
NETFILTER y
IP_NF_IPTABLES y
IP_NF_FILTER y
IP_NF_MATCH_ADDRTYPE y
IP_NF_TARGET_LOG y
IP_NF_MANGLE y
IPV6 m
VLAN_8021Q m
WATCHDOG y CIFS y
WATCHDOG_CORE y CIFS_XATTR y
ORION_WATCHDOG m CIFS_POSIX y
CIFS_FSCACHE y
CIFS_ACL y
ZRAM m WATCHDOG y
NETCONSOLE m WATCHDOG_CORE y
ORION_WATCHDOG m
# Disable OABI to have seccomp_filter (required for systemd) ZRAM m
# https://github.com/raspberrypi/firmware/issues/651 NETCONSOLE m
OABI_COMPAT n
# Fail to build # Disable OABI to have seccomp_filter (required for systemd)
DRM n # https://github.com/raspberrypi/firmware/issues/651
SCSI_ADVANSYS n OABI_COMPAT n
USB_ISP1362_HCD n
SND_SOC n
SND_ALI5451 n
FB_SAVAGE n
SCSI_NSP32 n
ATA_SFF n
SUNGEM n
IRDA n
ATM_HE n
SCSI_ACARD n
BLK_DEV_CMD640_ENHANCED n
FUSE_FS m # Fail to build
DRM n
SCSI_ADVANSYS n
USB_ISP1362_HCD n
SND_SOC n
SND_ALI5451 n
FB_SAVAGE n
SCSI_NSP32 n
ATA_SFF n
SUNGEM n
IRDA n
ATM_HE n
SCSI_ACARD n
BLK_DEV_CMD640_ENHANCED n
# systemd uses cgroups FUSE_FS m
CGROUPS y
# Latencytop # systemd uses cgroups
LATENCYTOP y CGROUPS y
# Ubi for the mtd # Latencytop
MTD_UBI y LATENCYTOP y
UBIFS_FS y
UBIFS_FS_XATTR y
UBIFS_FS_ADVANCED_COMPR y
UBIFS_FS_LZO y
UBIFS_FS_ZLIB y
UBIFS_FS_DEBUG n
# Kdb, for kernel troubles # Ubi for the mtd
KGDB y MTD_UBI y
KGDB_SERIAL_CONSOLE y UBIFS_FS y
KGDB_KDB y UBIFS_FS_XATTR y
''; UBIFS_FS_ADVANCED_COMPR y
kernelMakeFlags = [ "LOADADDR=0x0200000" ]; UBIFS_FS_LZO y
kernelTarget = "uImage"; UBIFS_FS_ZLIB y
kernelDTB = true; # Beyond 3.10 UBIFS_FS_DEBUG n
# Kdb, for kernel troubles
KGDB y
KGDB_SERIAL_CONSOLE y
KGDB_KDB y
'';
makeFlags = [ "LOADADDR=0x0200000" ];
target = "uImage";
DTB = true; # Beyond 3.10
};
gcc = { gcc = {
arch = "armv5te"; arch = "armv5te";
}; };
}; };
raspberrypi = { raspberrypi = {
name = "raspberrypi"; linux-kernel = {
kernelMajor = "2.6"; name = "raspberrypi";
kernelBaseConfig = "bcm2835_defconfig";
kernelDTB = true; baseConfig = "bcm2835_defconfig";
kernelArch = "arm"; DTB = true;
kernelAutoModules = true; autoModules = true;
kernelPreferBuiltin = true; preferBuiltin = true;
kernelExtraConfig = '' extraConfig = ''
# Disable OABI to have seccomp_filter (required for systemd) # Disable OABI to have seccomp_filter (required for systemd)
# https://github.com/raspberrypi/firmware/issues/651 # https://github.com/raspberrypi/firmware/issues/651
OABI_COMPAT n OABI_COMPAT n
''; '';
kernelTarget = "zImage"; target = "zImage";
};
gcc = { gcc = {
arch = "armv6"; arch = "armv6";
fpu = "vfp"; fpu = "vfp";
@ -204,13 +201,15 @@ rec {
raspberrypi2 = armv7l-hf-multiplatform; raspberrypi2 = armv7l-hf-multiplatform;
zero-gravitas = { zero-gravitas = {
name = "zero-gravitas"; linux-kernel = {
kernelBaseConfig = "zero-gravitas_defconfig"; name = "zero-gravitas";
kernelArch = "arm";
# kernelTarget verified by checking /boot on reMarkable 1 device baseConfig = "zero-gravitas_defconfig";
kernelTarget = "zImage"; # Target verified by checking /boot on reMarkable 1 device
kernelAutoModules = false; target = "zImage";
kernelDTB = true; autoModules = false;
DTB = true;
};
gcc = { gcc = {
fpu = "neon"; fpu = "neon";
cpu = "cortex-a9"; cpu = "cortex-a9";
@ -218,13 +217,15 @@ rec {
}; };
zero-sugar = { zero-sugar = {
name = "zero-sugar"; linux-kernel = {
kernelBaseConfig = "zero-sugar_defconfig"; name = "zero-sugar";
kernelArch = "arm";
kernelDTB = true; baseConfig = "zero-sugar_defconfig";
kernelAutoModules = false; DTB = true;
kernelPreferBuiltin = true; autoModules = false;
kernelTarget = "zImage"; preferBuiltin = true;
target = "zImage";
};
gcc = { gcc = {
cpu = "cortex-a7"; cpu = "cortex-a7";
fpu = "neon-vfpv4"; fpu = "neon-vfpv4";
@ -232,7 +233,7 @@ rec {
}; };
}; };
scaleway-c1 = armv7l-hf-multiplatform // { scaleway-c1 = lib.recursiveUpdate armv7l-hf-multiplatform {
gcc = { gcc = {
cpu = "cortex-a9"; cpu = "cortex-a9";
fpu = "vfpv3"; fpu = "vfpv3";
@ -240,13 +241,11 @@ rec {
}; };
utilite = { utilite = {
name = "utilite"; linux-kernel = {
kernelMajor = "2.6"; name = "utilite";
kernelBaseConfig = "multi_v7_defconfig"; maseConfig = "multi_v7_defconfig";
kernelArch = "arm"; autoModules = false;
kernelAutoModules = false; extraConfig = ''
kernelExtraConfig =
''
# Ubi for the mtd # Ubi for the mtd
MTD_UBI y MTD_UBI y
UBIFS_FS y UBIFS_FS y
@ -256,35 +255,37 @@ rec {
UBIFS_FS_ZLIB y UBIFS_FS_ZLIB y
UBIFS_FS_DEBUG n UBIFS_FS_DEBUG n
''; '';
kernelMakeFlags = [ "LOADADDR=0x10800000" ]; makeFlags = [ "LOADADDR=0x10800000" ];
kernelTarget = "uImage"; target = "uImage";
kernelDTB = true; DTB = true;
};
gcc = { gcc = {
cpu = "cortex-a9"; cpu = "cortex-a9";
fpu = "neon"; fpu = "neon";
}; };
}; };
guruplug = sheevaplug // { guruplug = lib.recursiveUpdate sheevaplug {
# Define `CONFIG_MACH_GURUPLUG' (see # Define `CONFIG_MACH_GURUPLUG' (see
# <http://kerneltrap.org/mailarchive/git-commits-head/2010/5/19/33618>) # <http://kerneltrap.org/mailarchive/git-commits-head/2010/5/19/33618>)
# and other GuruPlug-specific things. Requires the `guruplug-defconfig' # and other GuruPlug-specific things. Requires the `guruplug-defconfig'
# patch. # patch.
linux-kernel.baseConfig = "guruplug_defconfig";
kernelBaseConfig = "guruplug_defconfig";
}; };
beaglebone = armv7l-hf-multiplatform // { beaglebone = lib.recursiveUpdate armv7l-hf-multiplatform {
name = "beaglebone"; linux-kernel = {
kernelBaseConfig = "bb.org_defconfig"; name = "beaglebone";
kernelAutoModules = false; baseConfig = "bb.org_defconfig";
kernelExtraConfig = ""; # TBD kernel config autoModules = false;
kernelTarget = "zImage"; extraConfig = ""; # TBD kernel config
target = "zImage";
};
}; };
# https://developer.android.com/ndk/guides/abis#v7a # https://developer.android.com/ndk/guides/abis#v7a
armv7a-android = { armv7a-android = {
name = "armeabi-v7a"; linux-kernel.name = "armeabi-v7a";
gcc = { gcc = {
arch = "armv7-a"; arch = "armv7-a";
float-abi = "softfp"; float-abi = "softfp";
@ -293,30 +294,31 @@ rec {
}; };
armv7l-hf-multiplatform = { armv7l-hf-multiplatform = {
name = "armv7l-hf-multiplatform"; linux-kernel = {
kernelMajor = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc. name = "armv7l-hf-multiplatform";
kernelBaseConfig = "multi_v7_defconfig"; Major = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc.
kernelArch = "arm"; baseConfig = "multi_v7_defconfig";
kernelDTB = true; DTB = true;
kernelAutoModules = true; autoModules = true;
kernelPreferBuiltin = true; PreferBuiltin = true;
kernelTarget = "zImage"; target = "zImage";
kernelExtraConfig = '' extraConfig = ''
# Serial port for Raspberry Pi 3. Upstream forgot to add it to the ARMv7 defconfig. # Serial port for Raspberry Pi 3. Upstream forgot to add it to the ARMv7 defconfig.
SERIAL_8250_BCM2835AUX y SERIAL_8250_BCM2835AUX y
SERIAL_8250_EXTENDED y SERIAL_8250_EXTENDED y
SERIAL_8250_SHARE_IRQ y SERIAL_8250_SHARE_IRQ y
# Fix broken sunxi-sid nvmem driver. # Fix broken sunxi-sid nvmem driver.
TI_CPTS y TI_CPTS y
# Hangs ODROID-XU4 # Hangs ODROID-XU4
ARM_BIG_LITTLE_CPUIDLE n ARM_BIG_LITTLE_CPUIDLE n
# Disable OABI to have seccomp_filter (required for systemd) # Disable OABI to have seccomp_filter (required for systemd)
# https://github.com/raspberrypi/firmware/issues/651 # https://github.com/raspberrypi/firmware/issues/651
OABI_COMPAT n OABI_COMPAT n
''; '';
};
gcc = { gcc = {
# Some table about fpu flags: # Some table about fpu flags:
# http://community.arm.com/servlet/JiveServlet/showImage/38-1981-3827/blogentry-103749-004812900+1365712953_thumb.png # http://community.arm.com/servlet/JiveServlet/showImage/38-1981-3827/blogentry-103749-004812900+1365712953_thumb.png
@ -341,35 +343,35 @@ rec {
}; };
aarch64-multiplatform = { aarch64-multiplatform = {
name = "aarch64-multiplatform"; linux-kernel = {
kernelMajor = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc. name = "aarch64-multiplatform";
kernelBaseConfig = "defconfig"; baseConfig = "defconfig";
kernelArch = "arm64"; DTB = true;
kernelDTB = true; autoModules = true;
kernelAutoModules = true; preferBuiltin = true;
kernelPreferBuiltin = true; extraConfig = ''
kernelExtraConfig = '' # Raspberry Pi 3 stuff. Not needed for s >= 4.10.
# Raspberry Pi 3 stuff. Not needed for kernels >= 4.10. ARCH_BCM2835 y
ARCH_BCM2835 y BCM2835_MBOX y
BCM2835_MBOX y BCM2835_WDT y
BCM2835_WDT y RASPBERRYPI_FIRMWARE y
RASPBERRYPI_FIRMWARE y RASPBERRYPI_POWER y
RASPBERRYPI_POWER y SERIAL_8250_BCM2835AUX y
SERIAL_8250_BCM2835AUX y SERIAL_8250_EXTENDED y
SERIAL_8250_EXTENDED y SERIAL_8250_SHARE_IRQ y
SERIAL_8250_SHARE_IRQ y
# Cavium ThunderX stuff. # Cavium ThunderX stuff.
PCI_HOST_THUNDER_ECAM y PCI_HOST_THUNDER_ECAM y
# Nvidia Tegra stuff. # Nvidia Tegra stuff.
PCI_TEGRA y PCI_TEGRA y
# The default (=y) forces us to have the XHCI firmware available in initrd, # The default (=y) forces us to have the XHCI firmware available in initrd,
# which our initrd builder can't currently do easily. # which our initrd builder can't currently do easily.
USB_XHCI_TEGRA m USB_XHCI_TEGRA m
''; '';
kernelTarget = "Image"; target = "Image";
};
gcc = { gcc = {
arch = "armv8-a"; arch = "armv8-a";
}; };
@ -380,9 +382,9 @@ rec {
## ##
ben_nanonote = { ben_nanonote = {
name = "ben_nanonote"; linux-kernel = {
kernelMajor = "2.6"; name = "ben_nanonote";
kernelArch = "mips"; };
gcc = { gcc = {
arch = "mips32"; arch = "mips32";
float = "soft"; float = "soft";
@ -390,76 +392,76 @@ rec {
}; };
fuloong2f_n32 = { fuloong2f_n32 = {
name = "fuloong2f_n32"; linux-kernel = {
kernelMajor = "2.6"; name = "fuloong2f_n32";
kernelBaseConfig = "lemote2f_defconfig"; baseConfig = "lemote2f_defconfig";
kernelArch = "mips"; autoModules = false;
kernelAutoModules = false; extraConfig = ''
kernelExtraConfig = '' MIGRATION n
MIGRATION n COMPACTION n
COMPACTION n
# nixos mounts some cgroup # nixos mounts some cgroup
CGROUPS y CGROUPS y
BLK_DEV_RAM y BLK_DEV_RAM y
BLK_DEV_INITRD y BLK_DEV_INITRD y
BLK_DEV_CRYPTOLOOP m BLK_DEV_CRYPTOLOOP m
BLK_DEV_DM m BLK_DEV_DM m
DM_CRYPT m DM_CRYPT m
MD y MD y
REISERFS_FS m REISERFS_FS m
EXT4_FS m EXT4_FS m
USB_STORAGE_CYPRESS_ATACB m USB_STORAGE_CYPRESS_ATACB m
IP_PNP y IP_PNP y
IP_PNP_DHCP y IP_PNP_DHCP y
IP_PNP_BOOTP y IP_PNP_BOOTP y
NFS_FS y NFS_FS y
ROOT_NFS y ROOT_NFS y
TUN m TUN m
NFS_V4 y NFS_V4 y
NFS_V4_1 y NFS_V4_1 y
NFS_FSCACHE y NFS_FSCACHE y
NFSD m NFSD m
NFSD_V2_ACL y NFSD_V2_ACL y
NFSD_V3 y NFSD_V3 y
NFSD_V3_ACL y NFSD_V3_ACL y
NFSD_V4 y NFSD_V4 y
# Fail to build # Fail to build
DRM n DRM n
SCSI_ADVANSYS n SCSI_ADVANSYS n
USB_ISP1362_HCD n USB_ISP1362_HCD n
SND_SOC n SND_SOC n
SND_ALI5451 n SND_ALI5451 n
FB_SAVAGE n FB_SAVAGE n
SCSI_NSP32 n SCSI_NSP32 n
ATA_SFF n ATA_SFF n
SUNGEM n SUNGEM n
IRDA n IRDA n
ATM_HE n ATM_HE n
SCSI_ACARD n SCSI_ACARD n
BLK_DEV_CMD640_ENHANCED n BLK_DEV_CMD640_ENHANCED n
FUSE_FS m FUSE_FS m
# Needed for udev >= 150 # Needed for udev >= 150
SYSFS_DEPRECATED_V2 n SYSFS_DEPRECATED_V2 n
VGA_CONSOLE n VGA_CONSOLE n
VT_HW_CONSOLE_BINDING y VT_HW_CONSOLE_BINDING y
SERIAL_8250_CONSOLE y SERIAL_8250_CONSOLE y
FRAMEBUFFER_CONSOLE y FRAMEBUFFER_CONSOLE y
EXT2_FS y EXT2_FS y
EXT3_FS y EXT3_FS y
REISERFS_FS y REISERFS_FS y
MAGIC_SYSRQ y MAGIC_SYSRQ y
# The kernel doesn't boot at all, with FTRACE # The kernel doesn't boot at all, with FTRACE
FTRACE n FTRACE n
''; '';
kernelTarget = "vmlinux"; target = "vmlinux";
};
gcc = { gcc = {
arch = "loongson2f"; arch = "loongson2f";
float = "hard"; float = "hard";
@ -472,34 +474,36 @@ rec {
## ##
riscv-multiplatform = { riscv-multiplatform = {
name = "riscv-multiplatform"; linux-kernel = {
kernelArch = "riscv"; name = "riscv-multiplatform";
kernelTarget = "vmlinux"; target = "vmlinux";
kernelAutoModules = true; autoModules = true;
kernelBaseConfig = "defconfig"; baseConfig = "defconfig";
kernelExtraConfig = '' extraConfig = ''
FTRACE n FTRACE n
SERIAL_OF_PLATFORM y SERIAL_OF_PLATFORM y
''; '';
};
}; };
select = platform: select = platform:
# x86 # x86
/**/ if platform.isx86_32 then pc32 /**/ if platform.isx86 then pc
else if platform.isx86_64 then pc64
# ARM # ARM
else if platform.isAarch32 then let else if platform.isAarch32 then let
version = platform.parsed.cpu.version or null; version = platform.parsed.cpu.version or null;
in if version == null then pcBase in if version == null then pc
else if lib.versionOlder version "6" then sheevaplug else if lib.versionOlder version "6" then sheevaplug
else if lib.versionOlder version "7" then raspberrypi else if lib.versionOlder version "7" then raspberrypi
else armv7l-hf-multiplatform else armv7l-hf-multiplatform
else if platform.isAarch64 then aarch64-multiplatform else if platform.isAarch64 then aarch64-multiplatform
else if platform.isRiscV then riscv-multiplatform
else if platform.parsed.cpu == lib.systems.parse.cpuTypes.mipsel then fuloong2f_n32 else if platform.parsed.cpu == lib.systems.parse.cpuTypes.mipsel then fuloong2f_n32
else if platform.parsed.cpu == lib.systems.parse.cpuTypes.powerpc64le then powernv else if platform.parsed.cpu == lib.systems.parse.cpuTypes.powerpc64le then powernv
else pcBase; else pc;
} }

View file

@ -660,4 +660,71 @@ runTests {
expected = [ [ "foo" ] [ "foo" "<name>" "bar" ] [ "foo" "bar" ] ]; expected = [ [ "foo" ] [ "foo" "<name>" "bar" ] [ "foo" "bar" ] ];
}; };
testCartesianProductOfEmptySet = {
expr = cartesianProductOfSets {};
expected = [ {} ];
};
testCartesianProductOfOneSet = {
expr = cartesianProductOfSets { a = [ 1 2 3 ]; };
expected = [ { a = 1; } { a = 2; } { a = 3; } ];
};
testCartesianProductOfTwoSets = {
expr = cartesianProductOfSets { a = [ 1 ]; b = [ 10 20 ]; };
expected = [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
];
};
testCartesianProductOfTwoSetsWithOneEmpty = {
expr = cartesianProductOfSets { a = [ ]; b = [ 10 20 ]; };
expected = [ ];
};
testCartesianProductOfThreeSets = {
expr = cartesianProductOfSets {
a = [ 1 2 3 ];
b = [ 10 20 30 ];
c = [ 100 200 300 ];
};
expected = [
{ a = 1; b = 10; c = 100; }
{ a = 1; b = 10; c = 200; }
{ a = 1; b = 10; c = 300; }
{ a = 1; b = 20; c = 100; }
{ a = 1; b = 20; c = 200; }
{ a = 1; b = 20; c = 300; }
{ a = 1; b = 30; c = 100; }
{ a = 1; b = 30; c = 200; }
{ a = 1; b = 30; c = 300; }
{ a = 2; b = 10; c = 100; }
{ a = 2; b = 10; c = 200; }
{ a = 2; b = 10; c = 300; }
{ a = 2; b = 20; c = 100; }
{ a = 2; b = 20; c = 200; }
{ a = 2; b = 20; c = 300; }
{ a = 2; b = 30; c = 100; }
{ a = 2; b = 30; c = 200; }
{ a = 2; b = 30; c = 300; }
{ a = 3; b = 10; c = 100; }
{ a = 3; b = 10; c = 200; }
{ a = 3; b = 10; c = 300; }
{ a = 3; b = 20; c = 100; }
{ a = 3; b = 20; c = 200; }
{ a = 3; b = 20; c = 300; }
{ a = 3; b = 30; c = 100; }
{ a = 3; b = 30; c = 200; }
{ a = 3; b = 30; c = 300; }
];
};
} }

View file

@ -262,6 +262,13 @@ checkConfigOutput true config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.nested.foo ./types-anything/mk-mods.nix checkConfigOutput 1 config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput baz config.value.nested.bar.baz ./types-anything/mk-mods.nix checkConfigOutput baz config.value.nested.bar.baz ./types-anything/mk-mods.nix
## types.functionTo
checkConfigOutput "input is input" config.result ./functionTo/trivial.nix
checkConfigOutput "a b" config.result ./functionTo/merging-list.nix
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
checkConfigOutput "b a" config.result ./functionTo/list-order.nix
checkConfigOutput "a c" config.result ./functionTo/merging-attrs.nix
cat <<EOF cat <<EOF
====== module tests ====== ====== module tests ======
$pass Pass $pass Pass

View file

@ -0,0 +1,25 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.listOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (config.fun {
a = "a";
b = "b";
c = "c";
});
};
};
config.fun = lib.mkMerge [
(input: lib.mkAfter [ input.a ])
(input: [ input.b ])
];
}

View file

@ -0,0 +1,27 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.attrsOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (lib.attrValues (config.fun {
a = "a";
b = "b";
c = "c";
}));
};
};
config.fun = lib.mkMerge [
(input: { inherit (input) a; })
(input: { inherit (input) b; })
(input: {
b = lib.mkForce input.c;
})
];
}

View file

@ -0,0 +1,24 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.listOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (config.fun {
a = "a";
b = "b";
c = "c";
});
};
};
config.fun = lib.mkMerge [
(input: [ input.a ])
(input: [ input.b ])
];
}

View file

@ -0,0 +1,17 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo types.str;
};
result = lib.mkOption {
type = types.str;
default = config.fun "input";
};
};
config.fun = input: "input is ${input}";
}

View file

@ -0,0 +1,18 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo types.str;
};
result = lib.mkOption {
type = types.str;
default = config.fun 0;
};
};
config.fun = input: input + 1;
}

View file

@ -28,7 +28,7 @@ with lib.systems.doubles; lib.runTests {
testredox = mseteq redox [ "x86_64-redox" ]; testredox = mseteq redox [ "x86_64-redox" ];
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */); testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
testillumos = mseteq illumos [ "x86_64-solaris" ]; testillumos = mseteq illumos [ "x86_64-solaris" ];
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64le-linux" ]; testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" ];
testnetbsd = mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ]; testnetbsd = mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ];
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ]; testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ]; testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];

View file

@ -453,6 +453,17 @@ rec {
functor = (defaultFunctor name) // { wrapped = elemType; }; functor = (defaultFunctor name) // { wrapped = elemType; };
}; };
functionTo = elemType: mkOptionType {
name = "functionTo";
description = "function that evaluates to a(n) ${elemType.name}";
check = isFunction;
merge = loc: defs:
fnArgs: (mergeDefinitions (loc ++ [ "[function body]" ]) elemType (map (fn: { inherit (fn) file; value = fn.value fnArgs; }) defs)).mergedValue;
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: functionTo (elemType.substSubModules m);
};
# A submodule (like typed attribute set). See NixOS manual. # A submodule (like typed attribute set). See NixOS manual.
submodule = modules: submoduleWith { submodule = modules: submoduleWith {
shorthandOnlyDefinesConfig = true; shorthandOnlyDefinesConfig = true;

View file

@ -868,6 +868,12 @@
githubId = 706854; githubId = 706854;
name = "Etienne Laurin"; name = "Etienne Laurin";
}; };
attila-lendvai = {
name = "Attila Lendvai";
email = "attila@lendvai.name";
github = "attila-lendvai";
githubId = 840345;
};
auntie = { auntie = {
email = "auntieNeo@gmail.com"; email = "auntieNeo@gmail.com";
github = "auntieNeo"; github = "auntieNeo";
@ -970,6 +976,12 @@
email = "sivaraman.balaji@gmail.com"; email = "sivaraman.balaji@gmail.com";
name = "Balaji Sivaraman"; name = "Balaji Sivaraman";
}; };
baloo = {
email = "nixpkgs@superbaloo.net";
github = "baloo";
githubId = 59060;
name = "Arthur Gautier";
};
balsoft = { balsoft = {
email = "balsoft75@gmail.com"; email = "balsoft75@gmail.com";
github = "balsoft"; github = "balsoft";
@ -1030,6 +1042,12 @@
githubId = 1015044; githubId = 1015044;
name = "Brandon Carrell"; name = "Brandon Carrell";
}; };
bcc32 = {
email = "me@bcc32.com";
github = "bcc32";
githubId = 1239097;
name = "Aaron Zeng";
};
bcdarwin = { bcdarwin = {
email = "bcdarwin@gmail.com"; email = "bcdarwin@gmail.com";
github = "bcdarwin"; github = "bcdarwin";
@ -1078,6 +1096,12 @@
githubId = 75972; githubId = 75972;
name = "Ben Booth"; name = "Ben Booth";
}; };
berberman = {
email = "berberman@yandex.com";
github = "berberman";
githubId = 26041945;
name = "Potato Hatsue";
};
berce = { berce = {
email = "bert.moens@gmail.com"; email = "bert.moens@gmail.com";
github = "berce"; github = "berce";
@ -1577,6 +1601,12 @@
githubId = 33503784; githubId = 33503784;
name = "Yucheng Zhang"; name = "Yucheng Zhang";
}; };
cheriimoya = {
email = "github@hausch.xyz";
github = "cheriimoya";
githubId = 28303440;
name = "Max Hausch";
};
chessai = { chessai = {
email = "chessai1996@gmail.com"; email = "chessai1996@gmail.com";
github = "chessai"; github = "chessai";
@ -1693,16 +1723,6 @@
githubId = 46303707; githubId = 46303707;
name = "Christian Lütke-Stetzkamp"; name = "Christian Lütke-Stetzkamp";
}; };
kampka = {
email = "christian@kampka.net";
github = "kampka";
githubId = 422412;
name = "Christian Kampka";
keys = [{
longkeyid = "ed25519/0x1CBE9645DD68E915";
fingerprint = "F7FA 0BD0 8775 337C F6AB 4A14 1CBE 9645 DD68 E915";
}];
};
ckauhaus = { ckauhaus = {
email = "kc@flyingcircus.io"; email = "kc@flyingcircus.io";
github = "ckauhaus"; github = "ckauhaus";
@ -2031,6 +2051,12 @@
githubId = 23366017; githubId = 23366017;
name = "Dan Haraj"; name = "Dan Haraj";
}; };
danielbarter = {
email = "danielbarter@gmail.com";
github = "danielbarter";
githubId = 8081722;
name = "Daniel Barter";
};
danieldk = { danieldk = {
email = "me@danieldk.eu"; email = "me@danieldk.eu";
github = "danieldk"; github = "danieldk";
@ -2575,6 +2601,12 @@
githubId = 119483; githubId = 119483;
name = "Matthew Brown"; name = "Matthew Brown";
}; };
eduardosm = {
email = "esm@eduardosm.net";
github = "eduardosm";
githubId = 761151;
name = "Eduardo Sánchez Muñoz";
};
eduarrrd = { eduarrrd = {
email = "e.bachmakov@gmail.com"; email = "e.bachmakov@gmail.com";
github = "eduarrrd"; github = "eduarrrd";
@ -3127,6 +3159,12 @@
githubId = 92793; githubId = 92793;
name = "Friedrich von Never"; name = "Friedrich von Never";
}; };
fortuneteller2k = {
email = "lythe1107@gmail.com";
github = "fortuneteller2k";
githubId = 20619776;
name = "fortuneteller2k";
};
fpletz = { fpletz = {
email = "fpletz@fnordicwalking.de"; email = "fpletz@fnordicwalking.de";
github = "fpletz"; github = "fpletz";
@ -3453,6 +3491,12 @@
fingerprint = "7FC7 98AB 390E 1646 ED4D 8F1F 797F 6238 68CD 00C2"; fingerprint = "7FC7 98AB 390E 1646 ED4D 8F1F 797F 6238 68CD 00C2";
}]; }];
}; };
greizgh = {
email = "greizgh@ephax.org";
github = "greizgh";
githubId = 1313624;
name = "greizgh";
};
greydot = { greydot = {
email = "lanablack@amok.cc"; email = "lanablack@amok.cc";
github = "greydot"; github = "greydot";
@ -3637,6 +3681,12 @@
githubId = 3656888; githubId = 3656888;
name = "hhm"; name = "hhm";
}; };
higebu = {
name = "Yuya Kusakabe";
email = "yuya.kusakabe@gmail.com";
github = "higebu";
githubId = 733288;
};
hinton = { hinton = {
email = "t@larkery.com"; email = "t@larkery.com";
name = "Tom Hinton"; name = "Tom Hinton";
@ -3679,18 +3729,42 @@
fingerprint = "78C2 E81C 828A 420B 269A EBC1 49FA 39F8 A7F7 35F9"; fingerprint = "78C2 E81C 828A 420B 269A EBC1 49FA 39F8 A7F7 35F9";
}]; }];
}; };
humancalico = {
email = "humancalico@disroot.org";
github = "humancalico";
githubId = 51334444;
name = "Akshat Agarwal";
};
hodapp = { hodapp = {
email = "hodapp87@gmail.com"; email = "hodapp87@gmail.com";
github = "Hodapp87"; github = "Hodapp87";
githubId = 896431; githubId = 896431;
name = "Chris Hodapp"; name = "Chris Hodapp";
}; };
holymonson = {
email = "holymonson@gmail.com";
github = "holymonson";
githubId = 902012;
name = "Monson Shao";
};
hongchangwu = { hongchangwu = {
email = "wuhc85@gmail.com"; email = "wuhc85@gmail.com";
github = "hongchangwu"; github = "hongchangwu";
githubId = 362833; githubId = 362833;
name = "Hongchang Wu"; name = "Hongchang Wu";
}; };
hoverbear = {
email = "operator+nix@hoverbear.org";
github = "hoverbear";
githubId = 130903;
name = "Ana Hobden";
};
holgerpeters = {
name = "Holger Peters";
email = "holger.peters@posteo.de";
github = "HolgerPeters";
githubId = 4097049;
};
hrdinka = { hrdinka = {
email = "c.nix@hrdinka.at"; email = "c.nix@hrdinka.at";
github = "hrdinka"; github = "hrdinka";
@ -3879,6 +3953,12 @@
githubId = 4458; githubId = 4458;
name = "Ivan Kozik"; name = "Ivan Kozik";
}; };
ivan-babrou = {
email = "nixpkgs@ivan.computer";
name = "Ivan Babrou";
github = "bobrik";
githubId = 89186;
};
ivan-timokhin = { ivan-timokhin = {
email = "nixpkgs@ivan.timokhin.name"; email = "nixpkgs@ivan.timokhin.name";
name = "Ivan Timokhin"; name = "Ivan Timokhin";
@ -4023,6 +4103,12 @@
githubId = 45598; githubId = 45598;
name = "William Casarin"; name = "William Casarin";
}; };
jbcrail = {
name = "Joseph Crail";
email = "jbcrail@gmail.com";
github = "jbcrail";
githubId = 6038;
};
jbedo = { jbedo = {
email = "cu@cua0.org"; email = "cu@cua0.org";
github = "jbedo"; github = "jbedo";
@ -4815,6 +4901,12 @@
github = "kmein"; github = "kmein";
githubId = 10352507; githubId = 10352507;
}; };
kmicklas = {
email = "maintainer@kmicklas.com";
name = "Ken Micklas";
github = "kmicklas";
githubId = 929096;
};
knairda = { knairda = {
email = "adrian@kummerlaender.eu"; email = "adrian@kummerlaender.eu";
name = "Adrian Kummerlaender"; name = "Adrian Kummerlaender";
@ -4899,6 +4991,12 @@
githubId = 4032; githubId = 4032;
name = "Kristoffer Thømt Ravneberg"; name = "Kristoffer Thømt Ravneberg";
}; };
kritnich = {
email = "kritnich@kritni.ch";
github = "Kritnich";
githubId = 22116767;
name = "Kritnich";
};
kroell = { kroell = {
email = "nixosmainter@makroell.de"; email = "nixosmainter@makroell.de";
github = "rokk4"; github = "rokk4";
@ -4958,6 +5056,10 @@
github = "kyleondy"; github = "kyleondy";
githubId = 1640900; githubId = 1640900;
name = "Kyle Ondy"; name = "Kyle Ondy";
keys = [{
longkeyid = "rsa4096/0xDB0E3C33491F91C9";
fingerprint = "3C79 9D26 057B 64E6 D907 B0AC DB0E 3C33 491F 91C9";
}];
}; };
kylesferrazza = { kylesferrazza = {
name = "Kyle Sferrazza"; name = "Kyle Sferrazza";
@ -4971,6 +5073,16 @@
fingerprint = "5A9A 1C9B 2369 8049 3B48 CF5B 81A1 5409 4816 2372"; fingerprint = "5A9A 1C9B 2369 8049 3B48 CF5B 81A1 5409 4816 2372";
}]; }];
}; };
l-as = {
email = "las@protonmail.ch";
github = "L-as";
githubId = 22075344;
keys = [{
longkeyid = "rsa2048/0xAC458A7D1087D025";
fingerprint = "A093 EA17 F450 D4D1 60A0 1194 AC45 8A7D 1087 D025";
}];
name = "Las Safin";
};
laikq = { laikq = {
email = "gwen@quasebarth.de"; email = "gwen@quasebarth.de";
github = "laikq"; github = "laikq";
@ -5110,12 +5222,24 @@
githubId = 42153076; githubId = 42153076;
name = "Alexey Nikashkin"; name = "Alexey Nikashkin";
}; };
lesuisse = {
email = "thomas@gerbet.me";
github = "LeSuisse";
githubId = 737767;
name = "Thomas Gerbet";
};
lethalman = { lethalman = {
email = "lucabru@src.gnome.org"; email = "lucabru@src.gnome.org";
github = "lethalman"; github = "lethalman";
githubId = 480920; githubId = 480920;
name = "Luca Bruno"; name = "Luca Bruno";
}; };
leungbk = {
email = "leungbk@mailfence.com";
github = "leungbk";
githubId = 29217594;
name = "Brian Leung";
};
lewo = { lewo = {
email = "lewo@abesis.fr"; email = "lewo@abesis.fr";
github = "nlewo"; github = "nlewo";
@ -5150,6 +5274,12 @@
githubId = 307589; githubId = 307589;
name = "Nathaniel Baxter"; name = "Nathaniel Baxter";
}; };
liamdiprose = {
email = "liam@liamdiprose.com";
github = "liamdiprose";
githubId = 1769386;
name = "Liam Diprose";
};
liff = { liff = {
email = "liff@iki.fi"; email = "liff@iki.fi";
github = "liff"; github = "liff";
@ -5432,6 +5562,12 @@
githubId = 2057309; githubId = 2057309;
name = "Sergey Sofeychuk"; name = "Sergey Sofeychuk";
}; };
lxea = {
email = "nix@amk.ie";
github = "lxea";
githubId = 7910815;
name = "Alex McGrath";
};
lynty = { lynty = {
email = "ltdong93+nix@gmail.com"; email = "ltdong93+nix@gmail.com";
github = "lynty"; github = "lynty";
@ -5564,6 +5700,12 @@
email = "markus@wotringer.de"; email = "markus@wotringer.de";
name = "Markus Wotringer"; name = "Markus Wotringer";
}; };
marijanp = {
name = "Marijan Petričević";
email = "marijan.petricevic94@gmail.com";
github = "marijanp";
githubId = 13599169;
};
marius851000 = { marius851000 = {
email = "mariusdavid@laposte.net"; email = "mariusdavid@laposte.net";
name = "Marius David"; name = "Marius David";
@ -5598,6 +5740,12 @@
fingerprint = "B573 5118 0375 A872 FBBF 7770 B629 036B E399 EEE9"; fingerprint = "B573 5118 0375 A872 FBBF 7770 B629 036B E399 EEE9";
}]; }];
}; };
mausch = {
email = "mauricioscheffer@gmail.com";
github = "mausch";
githubId = 95194;
name = "Mauricio Scheffer";
};
matejc = { matejc = {
email = "cotman.matej@gmail.com"; email = "cotman.matej@gmail.com";
github = "matejc"; github = "matejc";
@ -6587,6 +6735,12 @@
githubId = 148037; githubId = 148037;
name = "Joachim Breitner"; name = "Joachim Breitner";
}; };
nomisiv = {
email = "simon@nomisiv.com";
github = "NomisIV";
githubId = 47303199;
name = "Simon Gutgesell";
};
noneucat = { noneucat = {
email = "andy@lolc.at"; email = "andy@lolc.at";
github = "noneucat"; github = "noneucat";
@ -6663,6 +6817,12 @@
githubId = 7677321; githubId = 7677321;
name = "Paul Trehiou"; name = "Paul Trehiou";
}; };
nyanotech = {
name = "nyanotech";
email = "nyanotechnology@gmail.com";
github = "nyanotech";
githubId = 33802077;
};
nyarly = { nyarly = {
email = "nyarly@gmail.com"; email = "nyarly@gmail.com";
github = "nyarly"; github = "nyarly";
@ -7101,6 +7261,16 @@
fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E"; fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E";
}]; }];
}; };
pinpox = {
email = "mail@pablo.tools";
github = "pinpox";
githubId = 1719781;
name = "Pablo Ovelleiro Corral";
keys = [{
longkeyid = "sa4096/0x823A6154426408D3";
fingerprint = "D03B 218C AE77 1F77 D7F9 20D9 823A 6154 4264 08D3";
}];
};
piotr = { piotr = {
email = "ppietrasa@gmail.com"; email = "ppietrasa@gmail.com";
name = "Piotr Pietraszkiewicz"; name = "Piotr Pietraszkiewicz";
@ -7147,6 +7317,12 @@
githubId = 13000278; githubId = 13000278;
name = "Maksim Bronsky"; name = "Maksim Bronsky";
}; };
PlushBeaver = {
name = "Dmitry Kozlyuk";
email = "dmitry.kozliuk+nixpkgs@gmail.com";
github = "PlushBeaver";
githubId = 8988269;
};
pmahoney = { pmahoney = {
email = "pat@polycrystal.org"; email = "pat@polycrystal.org";
github = "pmahoney"; github = "pmahoney";
@ -7597,6 +7773,12 @@
githubId = 42433779; githubId = 42433779;
name = "Rémy Grünblatt"; name = "Rémy Grünblatt";
}; };
rguevara84 = {
email = "fuzztkd@gmail.com";
github = "rguevara84";
githubId = 12279531;
name = "Ricardo Guevara";
};
rht = { rht = {
email = "rhtbot@protonmail.com"; email = "rhtbot@protonmail.com";
github = "rht"; github = "rht";
@ -8277,6 +8459,12 @@
githubId = 997855; githubId = 997855;
name = "Narazaki Shuji"; name = "Narazaki Shuji";
}; };
shofius = {
name = "Sam Hofius";
email = "sam@samhofi.us";
github = "kf5grd";
githubId = 18297490;
};
shou = { shou = {
email = "x+g@shou.io"; email = "x+g@shou.io";
github = "Shou"; github = "Shou";
@ -8515,6 +8703,12 @@
githubId = 7669898; githubId = 7669898;
name = "Katharina Fey"; name = "Katharina Fey";
}; };
spease = {
email = "peasteven@gmail.com";
github = "spease";
githubId = 2825204;
name = "Steven Pease";
};
spencerjanssen = { spencerjanssen = {
email = "spencerjanssen@gmail.com"; email = "spencerjanssen@gmail.com";
github = "spencerjanssen"; github = "spencerjanssen";
@ -8545,6 +8739,12 @@
githubId = 36899624; githubId = 36899624;
name = "squalus"; name = "squalus";
}; };
srapenne = {
email = "solene@perso.pw";
github = "rapenne-s";
githubId = 248016;
name = "Solène Rapenne";
};
srghma = { srghma = {
email = "srghma@gmail.com"; email = "srghma@gmail.com";
github = "srghma"; github = "srghma";
@ -8669,6 +8869,12 @@
githubId = 1315818; githubId = 1315818;
name = "Felix Bühler"; name = "Felix Bühler";
}; };
stupremee = {
email = "jutus.k@protonmail.com";
github = "Stupremee";
githubId = 39732259;
name = "Justus K";
};
suhr = { suhr = {
email = "suhr@i2pmail.org"; email = "suhr@i2pmail.org";
github = "suhr"; github = "suhr";
@ -8711,6 +8917,12 @@
githubId = 1040871; githubId = 1040871;
name = "Mathis Antony"; name = "Mathis Antony";
}; };
svend = {
email = "svend@svends.net";
github = "svend";
githubId = 306190;
name = "Svend Sorensen";
};
svrana = { svrana = {
email = "shaw@vranix.com"; email = "shaw@vranix.com";
github = "svrana"; github = "svrana";
@ -9055,6 +9267,12 @@
githubId = 844343; githubId = 844343;
name = "Thiago K. Okada"; name = "Thiago K. Okada";
}; };
thibautmarty = {
email = "github@thibautmarty.fr";
github = "ThibautMarty";
githubId = 3268082;
name = "Thibaut Marty";
};
thmzlt = { thmzlt = {
email = "git@thomazleite.com"; email = "git@thomazleite.com";
github = "thmzlt"; github = "thmzlt";
@ -9660,6 +9878,10 @@
email = "oliver.huntuk@gmail.com"; email = "oliver.huntuk@gmail.com";
name = "Oliver Hunt"; name = "Oliver Hunt";
}; };
vq = {
email = "vq@erq.se";
name = "Daniel Nilsson";
};
vrthra = { vrthra = {
email = "rahul@gopinath.org"; email = "rahul@gopinath.org";
github = "vrthra"; github = "vrthra";
@ -9766,6 +9988,12 @@
githubId = 43315; githubId = 43315;
name = "William Roe"; name = "William Roe";
}; };
wldhx = {
email = "wldhx+nixpkgs@wldhx.me";
github = "wldhx";
githubId = 15619766;
name = "wldhx";
};
wmertens = { wmertens = {
email = "Wout.Mertens@gmail.com"; email = "Wout.Mertens@gmail.com";
github = "wmertens"; github = "wmertens";

View file

@ -6,7 +6,7 @@ basexx,,,,,
binaryheap,,,,,vcunat binaryheap,,,,,vcunat
bit32,,,,lua5_1,lblasc bit32,,,,lua5_1,lblasc
busted,,,,, busted,,,,,
cassowary,,,,,marsam cassowary,,,,,marsam alerque
cjson,lua-cjson,,,, cjson,lua-cjson,,,,
compat53,,,,,vcunat compat53,,,,,vcunat
cosmo,,,,,marsam cosmo,,,,,marsam

1 # nix name luarocks name server version luaversion maintainers
6 binaryheap vcunat
7 bit32 lua5_1 lblasc
8 busted
9 cassowary marsam marsam alerque
10 cjson lua-cjson
11 compat53 vcunat
12 cosmo marsam

View file

@ -1,4 +1,4 @@
{ stdenv, makeWrapper, perl, perlPackages }: { stdenv, lib, makeWrapper, perl, perlPackages }:
stdenv.mkDerivation { stdenv.mkDerivation {
name = "nixpkgs-lint-1"; name = "nixpkgs-lint-1";
@ -15,9 +15,9 @@ stdenv.mkDerivation {
wrapProgram $out/bin/nixpkgs-lint --set PERL5LIB $PERL5LIB wrapProgram $out/bin/nixpkgs-lint --set PERL5LIB $PERL5LIB
''; '';
meta = { meta = with lib; {
maintainers = [ stdenv.lib.maintainers.eelco ]; maintainers = [ maintainers.eelco ];
description = "A utility for Nixpkgs contributors to check Nixpkgs for common errors"; description = "A utility for Nixpkgs contributors to check Nixpkgs for common errors";
platforms = stdenv.lib.platforms.unix; platforms = platforms.unix;
}; };
} }

View file

@ -66,7 +66,7 @@ nixpkgs$ ${0} ${GENERATED_NIXFILE}
These packages are manually refined in lua-overrides.nix These packages are manually refined in lua-overrides.nix
*/ */
{ self, stdenv, fetchurl, fetchgit, pkgs, ... } @ args: { self, stdenv, lib, fetchurl, fetchgit, pkgs, ... } @ args:
self: super: self: super:
with self; with self;
{ {

View file

@ -87,7 +87,7 @@ nixpkgs.config.packageOverrides = pkgs:
You can edit the config with this snippet (by default <command>make You can edit the config with this snippet (by default <command>make
menuconfig</command> won't work out of the box on nixos): menuconfig</command> won't work out of the box on nixos):
<screen><![CDATA[ <screen><![CDATA[
nix-shell -E 'with import <nixpkgs> {}; kernelToOverride.overrideAttrs (o: {nativeBuildInputs=o.nativeBuildInputs ++ [ pkgconfig ncurses ];})' nix-shell -E 'with import <nixpkgs> {}; kernelToOverride.overrideAttrs (o: {nativeBuildInputs=o.nativeBuildInputs ++ [ pkg-config ncurses ];})'
]]></screen> ]]></screen>
or you can let nixpkgs generate the configuration. Nixpkgs generates it via or you can let nixpkgs generate the configuration. Nixpkgs generates it via
answering the interactive kernel utility <command>make config</command>. The answering the interactive kernel utility <command>make config</command>. The

View file

@ -11,8 +11,7 @@
</para> </para>
<para> <para>
It makes virtio modules available on the initrd, sets the system time from It makes virtio modules available on the initrd and sets the system time from
the hardware clock to work around a bug in qemu-kvm, and the hardware clock to work around a bug in qemu-kvm.
<link linkend="opt-security.rngd.enable">enables rngd</link>.
</para> </para>
</section> </section>

View file

@ -186,7 +186,7 @@
The driver has many options (see <xref linkend="ch-options"/>). For The driver has many options (see <xref linkend="ch-options"/>). For
instance, the following disables tap-to-click behavior: instance, the following disables tap-to-click behavior:
<programlisting> <programlisting>
<xref linkend="opt-services.xserver.libinput.tapping"/> = false; <xref linkend="opt-services.xserver.libinput.touchpad.tapping"/> = false;
</programlisting> </programlisting>
Note: the use of <literal>services.xserver.synaptics</literal> is deprecated Note: the use of <literal>services.xserver.synaptics</literal> is deprecated
since NixOS 17.09. since NixOS 17.09.

View file

@ -1,7 +1,7 @@
<chapter xmlns="http://docbook.org/ns/docbook" <chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-contributing"> xml:id="chap-contributing">
<title>Contributing to this documentation</title> <title>Contributing to this manual</title>
<para> <para>
The DocBook sources of NixOS' manual are in the <filename The DocBook sources of NixOS' manual are in the <filename
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual"> xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual">

View file

@ -21,7 +21,11 @@
xlink:href="https://discourse.nixos.org">Discourse</literal> or xlink:href="https://discourse.nixos.org">Discourse</literal> or
on the <link on the <link
xlink:href="irc://irc.freenode.net/#nixos"> xlink:href="irc://irc.freenode.net/#nixos">
<literal>#nixos</literal> channel on Freenode</link>. Bugs should be <literal>#nixos</literal> channel on Freenode</link>, or
consider
<link
xlink:href="#chap-contributing">
contributing to this manual</link>. Bugs should be
reported in reported in
<link <link
xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS

View file

@ -43,6 +43,15 @@
Linux kernel is updated to branch 5.4 by default (from 4.19). Linux kernel is updated to branch 5.4 by default (from 4.19).
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Grub is updated to 2.04, adding support for booting from F2FS filesystems and
Btrfs volumes using zstd compression. Note that some users have been unable
to boot after upgrading to 2.04 - for more information, please see <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/61718#issuecomment-617618503">this
discussion</link>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Postgresql for NixOS service now defaults to v11. Postgresql for NixOS service now defaults to v11.

View file

@ -418,6 +418,26 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
SDK licenses if your project requires it. See the androidenv documentation for more details. SDK licenses if your project requires it. See the androidenv documentation for more details.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The attribute <varname>mpi</varname> is now consistently used to
provide a default, system-wide MPI implementation.
The default implementation is openmpi, which has been used before by
all derivations affects by this change.
Note that all packages that have used <varname>mpi ? null</varname> in the input
for optional MPI builds, have been changed to the boolean input paramater
<varname>useMpi</varname> to enable building with MPI.
Building all packages with <varname>mpich</varname> instead
of the default <varname>openmpi</varname> can now be achived like this:
<programlisting>
self: super:
{
mpi = super.mpich;
}
</programlisting>
</para>
</listitem>
<listitem> <listitem>
<para> <para>
The Searx module has been updated with the ability to configure the The Searx module has been updated with the ability to configure the
@ -430,6 +450,22 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
dynamically allocated uid. dynamically allocated uid.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The libinput module has been updated with the ability to configure mouse and touchpad settings separately.
The options in <literal>services.xserver.libinput</literal> have been renamed to <literal>services.xserver.libinput.touchpad</literal>,
while there is a new <literal>services.xserver.libinput.mouse</literal> for mouse related configuration.
</para>
<para>
Since touchpad options no longer apply to all devices, you may want to replicate your touchpad configuration in
mouse section.
</para>
</listitem>
<listitem>
<para>
ALSA OSS emulation (<varname>sound.enableOSSEmulation</varname>) is now disabled by default.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
@ -441,6 +477,14 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
<title>Other Notable Changes</title> <title>Other Notable Changes</title>
<itemizedlist> <itemizedlist>
<listitem>
<para>
<literal>stdenv.lib</literal> has been deprecated and will break
eval in 21.11. Please use <literal>pkgs.lib</literal> instead.
See <link xlink:href="https://github.com/NixOS/nixpkgs/issues/108938">#108938</link>
for details.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
The Mailman NixOS module (<literal>services.mailman</literal>) has a new The Mailman NixOS module (<literal>services.mailman</literal>) has a new
@ -574,6 +618,15 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
been dropped from upstream releases. been dropped from upstream releases.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
In the ACME module, the data used to build the hash for the account
directory has changed to accomodate new features to reduce account
rate limit issues. This will trigger new account creation on the first
rebuild following this update. No issues are expected to arise from this,
thanks to the new account creation handling.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<xref linkend="opt-users.users._name_.createHome" /> now always ensures home directory permissions to be <literal>0700</literal>. <xref linkend="opt-users.users._name_.createHome" /> now always ensures home directory permissions to be <literal>0700</literal>.
@ -592,6 +645,33 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
<literal>/etc/netgroup</literal> defines network-wide groups and may affect to setups using NIS. <literal>/etc/netgroup</literal> defines network-wide groups and may affect to setups using NIS.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Platforms, like <varname>stdenv.hostPlatform</varname>, no longer have a <varname>platform</varname> attribute.
It has been (mostly) flattoned away:
</para>
<itemizedlist>
<listitem><para><varname>platform.gcc</varname> is now <varname>gcc</varname></para></listitem>
<listitem><para><literal>platform.kernel*</literal> is now <literal>linux-kernel.*</literal></para></listitem>
</itemizedlist>
<para>
Additionally, <varname>platform.kernelArch</varname> moved to the top level as <varname>linuxArch</varname> to match the other <literal>*Arch</literal> variables.
</para>
<para>
The <varname>platform</varname> grouping of these things never meant anything, and was just a historial/implementation artifact that was overdue removal.
</para>
</listitem>
<listitem>
<para>
<varname>services.restic</varname> now uses a dedicated cache directory for every backup defined in <varname>services.restic.backups</varname>. The old global cache directory, <literal>/root/.cache/restic</literal>, is now unused and can be removed to free up disk space.
</para>
</listitem>
<listitem>
<para>
<literal>isync</literal>: The <literal>isync</literal> compatibility wrapper was removed and the Master/Slave
terminology has been deprecated and should be replaced with Far/Near in the configuration file.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View file

@ -257,7 +257,8 @@ let format' = format; in let
''} ''}
echo "copying staging root to image..." echo "copying staging root to image..."
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* / cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* / ||
(echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
''; '';
in pkgs.vmTools.runInLinuxVM ( in pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name pkgs.runCommand name

View file

@ -83,7 +83,7 @@ in
packages = mkOption { packages = mkOption {
type = types.listOf types.package; type = types.listOf types.package;
default = with pkgs.kbdKeymaps; [ dvp neo ]; default = with pkgs.kbdKeymaps; [ dvp neo ];
defaultText = ''with pkgs.kbdKeymaps; [ dvp neo ]''; defaultText = "with pkgs.kbdKeymaps; [ dvp neo ]";
description = '' description = ''
List of additional packages that provide console fonts, keymaps and List of additional packages that provide console fonts, keymaps and
other resources for virtual consoles use. other resources for virtual consoles use.

View file

@ -436,7 +436,7 @@ in
useEmbeddedBitmaps = mkOption { useEmbeddedBitmaps = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = ''Use embedded bitmaps in fonts like Calibri.''; description = "Use embedded bitmaps in fonts like Calibri.";
}; };
}; };

View file

@ -1,11 +1,9 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
{ {
options = { options = {
gnu = mkOption { gnu = lib.mkOption {
type = types.bool; type = lib.types.bool;
default = false; default = false;
description = '' description = ''
When enabled, GNU software is chosen by default whenever a there is When enabled, GNU software is chosen by default whenever a there is
@ -15,7 +13,7 @@ with lib;
}; };
}; };
config = mkIf config.gnu { config = lib.mkIf config.gnu {
environment.systemPackages = with pkgs; environment.systemPackages = with pkgs;
# TODO: Adjust `requiredPackages' from `system-path.nix'. # TODO: Adjust `requiredPackages' from `system-path.nix'.
@ -26,7 +24,7 @@ with lib;
nano zile nano zile
texinfo # for the stand-alone Info reader texinfo # for the stand-alone Info reader
] ]
++ stdenv.lib.optional (!stdenv.isAarch32) grub2; ++ lib.optional (!stdenv.isAarch32) grub2;
# GNU GRUB, where available. # GNU GRUB, where available.

View file

@ -84,7 +84,7 @@ with lib;
environment.etc."locale.conf".source = pkgs.writeText "locale.conf" environment.etc."locale.conf".source = pkgs.writeText "locale.conf"
'' ''
LANG=${config.i18n.defaultLocale} LANG=${config.i18n.defaultLocale}
${concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}=${v}'') config.i18n.extraLocaleSettings)} ${concatStringsSep "\n" (mapAttrsToList (n: v: "${n}=${v}") config.i18n.extraLocaleSettings)}
''; '';
}; };

View file

@ -58,6 +58,7 @@ in
"2.nixos.pool.ntp.org" "2.nixos.pool.ntp.org"
"3.nixos.pool.ntp.org" "3.nixos.pool.ntp.org"
]; ];
type = types.listOf types.str;
description = '' description = ''
The set of NTP servers from which to synchronise. The set of NTP servers from which to synchronise.
''; '';
@ -194,8 +195,7 @@ in
''; '';
# /etc/netgroup: Network-wide groups. # /etc/netgroup: Network-wide groups.
netgroup.text = mkDefault '' netgroup.text = mkDefault "";
'';
# /etc/host.conf: resolver configuration file # /etc/host.conf: resolver configuration file
"host.conf".text = '' "host.conf".text = ''

View file

@ -183,7 +183,7 @@ in {
config = mkOption { config = mkOption {
type = types.attrsOf types.unspecified; type = types.attrsOf types.unspecified;
default = {}; default = {};
description = ''Config of the pulse daemon. See <literal>man pulse-daemon.conf</literal>.''; description = "Config of the pulse daemon. See <literal>man pulse-daemon.conf</literal>.";
example = literalExample ''{ realtime-scheduling = "yes"; }''; example = literalExample ''{ realtime-scheduling = "yes"; }'';
}; };
}; };

View file

@ -364,7 +364,7 @@ let
count = mkOption { count = mkOption {
type = types.int; type = types.int;
default = 1; default = 1;
description = ''Count of subordinate user ids''; description = "Count of subordinate user ids";
}; };
}; };
}; };
@ -381,7 +381,7 @@ let
count = mkOption { count = mkOption {
type = types.int; type = types.int;
default = 1; default = 1;
description = ''Count of subordinate group ids''; description = "Count of subordinate group ids";
}; };
}; };
}; };

View file

@ -62,7 +62,7 @@ with lib;
services.dbus.packages = packages; services.dbus.packages = packages;
systemd.packages = packages; systemd.packages = packages;
environment.variables = { environment.sessionVariables = {
GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1"; GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1";
XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals"; XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals";
}; };

View file

@ -68,11 +68,11 @@ let
patchShebangs scripts/* patchShebangs scripts/*
substituteInPlace scripts/Makefile.lib \ substituteInPlace scripts/Makefile.lib \
--replace 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget))' 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget)) -@' --replace 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget))' 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget)) -@'
make ${pkgs.stdenv.hostPlatform.platform.kernelBaseConfig} ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}" make ${pkgs.stdenv.hostPlatform.linux-kernel.baseConfig} ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
make dtbs ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}" make dtbs ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
''; '';
installPhase = '' installPhase = ''
make dtbs_install INSTALL_DTBS_PATH=$out/dtbs ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}" make dtbs_install INSTALL_DTBS_PATH=$out/dtbs ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
''; '';
}; };
@ -115,7 +115,7 @@ in
options = { options = {
hardware.deviceTree = { hardware.deviceTree = {
enable = mkOption { enable = mkOption {
default = pkgs.stdenv.hostPlatform.platform.kernelDTB or false; default = pkgs.stdenv.hostPlatform.linux-kernel.DTB or false;
type = types.bool; type = types.bool;
description = '' description = ''
Build device tree files. These are used to describe the Build device tree files. These are used to describe the

View file

@ -0,0 +1,31 @@
{ config, lib, pkgs, ... }:
with lib;
let
kernelVersion = config.boot.kernelPackages.kernel.version;
linuxKernelMinVersion = "5.8";
kernelPatch = pkgs.kernelPatches.ath_regd_optional // {
extraConfig = ''
ATH_USER_REGD y
'';
};
in
{
options.networking.wireless.athUserRegulatoryDomain = mkOption {
default = false;
type = types.bool;
description = ''
If enabled, sets the ATH_USER_REGD kernel config switch to true to
disable the enforcement of EEPROM regulatory restrictions for ath
drivers. Requires at least Linux ${linuxKernelMinVersion}.
'';
};
config = mkIf config.networking.wireless.athUserRegulatoryDomain {
assertions = singleton {
assertion = lessThan 0 (builtins.compareVersions kernelVersion linuxKernelMinVersion);
message = "ATH_USER_REGD patch for kernels older than ${linuxKernelMinVersion} not ported yet!";
};
boot.kernelPatches = [ kernelPatch ];
};
}

View file

@ -19,23 +19,9 @@ in
nitrokey-app package, depending on your device and needs. nitrokey-app package, depending on your device and needs.
''; '';
}; };
group = mkOption {
type = types.str;
default = "nitrokey";
example = "wheel";
description = ''
Grant access to Nitrokey devices to users in this group.
'';
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.udev.packages = [ services.udev.packages = [ pkgs.nitrokey-udev-rules ];
(pkgs.nitrokey-udev-rules.override (attrs:
{ inherit (cfg) group; }
))
];
users.groups.${cfg.group} = {};
}; };
} }

View file

@ -0,0 +1,81 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkIf mkOption types;
cfg = config.hardware.sensor.hddtemp;
wrapper = pkgs.writeShellScript "hddtemp-wrapper" ''
set -eEuo pipefail
file=/var/lib/hddtemp/hddtemp.db
drives=(${toString (map (e: ''$(realpath ${lib.escapeShellArg e}) '') cfg.drives)})
cp ${pkgs.hddtemp}/share/hddtemp/hddtemp.db $file
${lib.concatMapStringsSep "\n" (e: "echo ${lib.escapeShellArg e} >> $file") cfg.dbEntries}
exec ${pkgs.hddtemp}/bin/hddtemp ${lib.escapeShellArgs cfg.extraArgs} \
--daemon \
--unit=${cfg.unit} \
--file=$file \
''${drives[@]}
'';
in
{
meta.maintainers = with lib.maintainers; [ peterhoeg ];
###### interface
options = {
hardware.sensor.hddtemp = {
enable = mkOption {
description = ''
Enable this option to support HDD/SSD temperature sensors.
'';
type = types.bool;
default = false;
};
drives = mkOption {
description = "List of drives to monitor. If you pass /dev/disk/by-path/* entries the symlinks will be resolved as hddtemp doesn't like names with colons.";
type = types.listOf types.str;
};
unit = mkOption {
description = "Celcius or Fahrenheit";
type = types.enum [ "C" "F" ];
default = "C";
};
dbEntries = mkOption {
description = "Additional DB entries";
type = types.listOf types.str;
default = [ ];
};
extraArgs = mkOption {
description = "Additional arguments passed to the daemon.";
type = types.listOf types.str;
default = [ ];
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.hddtemp = {
description = "HDD/SSD temperature";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStart = wrapper;
StateDirectory = "hddtemp";
PrivateTmp = true;
ProtectHome = "tmpfs";
ProtectSystem = "strict";
};
};
};
}

View file

@ -40,7 +40,7 @@ in
default = "wheel"; default = "wheel";
example = "video"; example = "video";
type = types.str; type = types.str;
description = ''Group for bumblebee socket''; description = "Group for bumblebee socket";
}; };
connectDisplay = mkOption { connectDisplay = mkOption {

View file

@ -5,36 +5,17 @@
with lib; with lib;
let let
nvidia_x11 = let
drivers = config.services.xserver.videoDrivers; drivers = config.services.xserver.videoDrivers;
isDeprecated = str: (hasPrefix "nvidia" str) && (str != "nvidia");
# FIXME: should introduce an option like hasDeprecated = drivers: any isDeprecated drivers;
# hardware.video.nvidia.package for overriding the default NVIDIA in if (hasDeprecated drivers) then
# driver. throw ''
nvidiaForKernel = kernelPackages: Selecting an nvidia driver has been modified for NixOS 19.03. The version is now set using `hardware.nvidia.package`.
if elem "nvidia" drivers then ''
kernelPackages.nvidia_x11 else if (elem "nvidia" drivers) then cfg.package else null;
else if elem "nvidiaBeta" drivers then
kernelPackages.nvidia_x11_beta
else if elem "nvidiaVulkanBeta" drivers then
kernelPackages.nvidia_x11_vulkan_beta
else if elem "nvidiaLegacy304" drivers then
kernelPackages.nvidia_x11_legacy304
else if elem "nvidiaLegacy340" drivers then
kernelPackages.nvidia_x11_legacy340
else if elem "nvidiaLegacy390" drivers then
kernelPackages.nvidia_x11_legacy390
else null;
nvidia_x11 = nvidiaForKernel config.boot.kernelPackages;
nvidia_libs32 =
if versionOlder nvidia_x11.version "391" then
((nvidiaForKernel pkgs.pkgsi686Linux.linuxPackages).override { libsOnly = true; kernel = null; }).out
else
(nvidiaForKernel config.boot.kernelPackages).lib32;
enabled = nvidia_x11 != null; enabled = nvidia_x11 != null;
cfg = config.hardware.nvidia; cfg = config.hardware.nvidia;
pCfg = cfg.prime; pCfg = cfg.prime;
@ -63,6 +44,15 @@ in
''; '';
}; };
hardware.nvidia.powerManagement.finegrained = mkOption {
type = types.bool;
default = false;
description = ''
Experimental power management of PRIME offload. For more information, see
the NVIDIA docs, chapter 22. PCI-Express runtime power management.
'';
};
hardware.nvidia.modesetting.enable = mkOption { hardware.nvidia.modesetting.enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -96,6 +86,16 @@ in
''; '';
}; };
hardware.nvidia.prime.amdgpuBusId = mkOption {
type = types.str;
default = "";
example = "PCI:4:0:0";
description = ''
Bus ID of the AMD APU. You can find it using lspci; for example if lspci
shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
'';
};
hardware.nvidia.prime.sync.enable = mkOption { hardware.nvidia.prime.sync.enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -151,9 +151,22 @@ in
GPUs stay awake even during headless mode. GPUs stay awake even during headless mode.
''; '';
}; };
hardware.nvidia.package = lib.mkOption {
type = lib.types.package;
default = config.boot.kernelPackages.nvidiaPackages.stable;
defaultText = "config.boot.kernelPackages.nvidiaPackages.stable";
description = ''
The NVIDIA X11 derivation to use.
'';
example = "config.boot.kernelPackages.nvidiaPackages.legacy340";
};
}; };
config = mkIf enabled { config = let
igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
in mkIf enabled {
assertions = [ assertions = [
{ {
assertion = with config.services.xserver.displayManager; gdm.nvidiaWayland -> cfg.modesetting.enable; assertion = with config.services.xserver.displayManager; gdm.nvidiaWayland -> cfg.modesetting.enable;
@ -161,7 +174,13 @@ in
} }
{ {
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && pCfg.intelBusId != ""; assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
message = ''
You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.
'';
}
{
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
message = '' message = ''
When NVIDIA PRIME is enabled, the GPU bus IDs must configured. When NVIDIA PRIME is enabled, the GPU bus IDs must configured.
''; '';
@ -174,6 +193,14 @@ in
assertion = !(syncCfg.enable && offloadCfg.enable); assertion = !(syncCfg.enable && offloadCfg.enable);
message = "Only one NVIDIA PRIME solution may be used at a time."; message = "Only one NVIDIA PRIME solution may be used at a time.";
} }
{
assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
message = "Sync precludes powering down the NVIDIA GPU.";
}
{
assertion = cfg.powerManagement.enable -> offloadCfg.enable;
message = "Fine-grained power management requires offload to be enabled.";
}
]; ];
# If Optimus/PRIME is enabled, we: # If Optimus/PRIME is enabled, we:
@ -183,18 +210,22 @@ in
# "nvidia" driver, in order to allow the X server to start without any outputs. # "nvidia" driver, in order to allow the X server to start without any outputs.
# - Add a separate Device section for the Intel GPU, using the "modesetting" # - Add a separate Device section for the Intel GPU, using the "modesetting"
# driver and with the configured BusID. # driver and with the configured BusID.
# - OR add a separate Device section for the AMD APU, using the "amdgpu"
# driver and with the configures BusID.
# - Reference that Device section from the ServerLayout section as an inactive # - Reference that Device section from the ServerLayout section as an inactive
# device. # device.
# - Configure the display manager to run specific `xrandr` commands which will # - Configure the display manager to run specific `xrandr` commands which will
# configure/enable displays connected to the Intel GPU. # configure/enable displays connected to the Intel iGPU / AMD APU.
services.xserver.useGlamor = mkDefault offloadCfg.enable; services.xserver.useGlamor = mkDefault offloadCfg.enable;
services.xserver.drivers = optional primeEnabled { services.xserver.drivers = let
name = "modesetting"; in optional primeEnabled {
name = igpuDriver;
display = offloadCfg.enable; display = offloadCfg.enable;
modules = optional (igpuDriver == "amdgpu") [ pkgs.xorg.xf86videoamdgpu ];
deviceSection = '' deviceSection = ''
BusID "${pCfg.intelBusId}" BusID "${igpuBusId}"
${optionalString syncCfg.enable ''Option "AccelMethod" "none"''} ${optionalString syncCfg.enable ''Option "AccelMethod" "none"''}
''; '';
} ++ singleton { } ++ singleton {
@ -205,6 +236,7 @@ in
'' ''
BusID "${pCfg.nvidiaBusId}" BusID "${pCfg.nvidiaBusId}"
${optionalString syncCfg.allowExternalGpu "Option \"AllowExternalGpus\""} ${optionalString syncCfg.allowExternalGpu "Option \"AllowExternalGpus\""}
${optionalString cfg.powerManagement.finegrained "Option \"NVreg_DynamicPowerManagement=0x02\""}
''; '';
screenSection = screenSection =
'' ''
@ -214,14 +246,14 @@ in
}; };
services.xserver.serverLayoutSection = optionalString syncCfg.enable '' services.xserver.serverLayoutSection = optionalString syncCfg.enable ''
Inactive "Device-modesetting[0]" Inactive "Device-${igpuDriver}[0]"
'' + optionalString offloadCfg.enable '' '' + optionalString offloadCfg.enable ''
Option "AllowNVIDIAGPUScreens" Option "AllowNVIDIAGPUScreens"
''; '';
services.xserver.displayManager.setupCommands = optionalString syncCfg.enable '' services.xserver.displayManager.setupCommands = optionalString syncCfg.enable ''
# Added by nvidia configuration module for Optimus/PRIME. # Added by nvidia configuration module for Optimus/PRIME.
${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource modesetting NVIDIA-0 ${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource ${igpuDriver} NVIDIA-0
${pkgs.xorg.xrandr}/bin/xrandr --auto ${pkgs.xorg.xrandr}/bin/xrandr --auto
''; '';
@ -230,9 +262,9 @@ in
}; };
hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out; hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_libs32; hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out; hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out;
hardware.opengl.extraPackages32 = optional offloadCfg.enable nvidia_libs32; hardware.opengl.extraPackages32 = optional offloadCfg.enable nvidia_x11.lib32;
environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ] environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ]; ++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ];
@ -292,16 +324,37 @@ in
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1" boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"; ++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
services.udev.extraRules = services.udev.extraRules =
'' ''
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'" KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'" KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'" KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'" KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'" KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
'' + optionalString cfg.powerManagement.finegrained ''
# Remove NVIDIA USB xHCI Host Controller devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
# Remove NVIDIA USB Type-C UCSI devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
# Remove NVIDIA Audio devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
# Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
# Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
''; '';
boot.extraModprobeConfig = mkIf cfg.powerManagement.finegrained ''
options nvidia "NVreg_DynamicPowerManagement=0x02"
'';
boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ]; boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];
services.acpid.enable = true; services.acpid.enable = true;

View file

@ -0,0 +1,18 @@
{ config, pkgs, lib, ... }:
with lib;
let
pkg = [ pkgs.switcheroo-control ];
cfg = config.services.switcherooControl;
in {
options.services.switcherooControl = {
enable = mkEnableOption "switcheroo-control, a D-Bus service to check the availability of dual-GPU";
};
config = mkIf cfg.enable {
services.dbus.packages = pkg;
environment.systemPackages = pkg;
systemd.packages = pkg;
systemd.targets.multi-user.wants = [ "switcheroo-control.service" ];
};
}

View file

@ -42,6 +42,7 @@ in
<itemizedlist> <itemizedlist>
<listitem><para>ibus: The intelligent input bus, extra input engines can be added using <literal>i18n.inputMethod.ibus.engines</literal>.</para></listitem> <listitem><para>ibus: The intelligent input bus, extra input engines can be added using <literal>i18n.inputMethod.ibus.engines</literal>.</para></listitem>
<listitem><para>fcitx: A customizable lightweight input method, extra input engines can be added using <literal>i18n.inputMethod.fcitx.engines</literal>.</para></listitem> <listitem><para>fcitx: A customizable lightweight input method, extra input engines can be added using <literal>i18n.inputMethod.fcitx.engines</literal>.</para></listitem>
<listitem><para>fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using <literal>i18n.inputMethod.fcitx5.addons</literal>.</para></listitem>
<listitem><para>nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.</para></listitem> <listitem><para>nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.</para></listitem>
<listitem><para>uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.</para></listitem> <listitem><para>uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.</para></listitem>
<listitem><para>hime: An extremely easy-to-use input method framework.</para></listitem> <listitem><para>hime: An extremely easy-to-use input method framework.</para></listitem>

View file

@ -48,7 +48,7 @@ in
panel = mkOption { panel = mkOption {
type = with types; nullOr path; type = with types; nullOr path;
default = null; default = null;
example = literalExample "''${pkgs.plasma5.plasma-desktop}/lib/libexec/kimpanel-ibus-panel"; example = literalExample "''${pkgs.plasma5Packages.plasma-desktop}/lib/libexec/kimpanel-ibus-panel";
description = "Replace the IBus panel with another panel."; description = "Replace the IBus panel with another panel.";
}; };
}; };

View file

@ -88,7 +88,7 @@ with lib;
system.build.netbootIpxeScript = pkgs.writeTextDir "netboot.ipxe" '' system.build.netbootIpxeScript = pkgs.writeTextDir "netboot.ipxe" ''
#!ipxe #!ipxe
kernel ${pkgs.stdenv.hostPlatform.platform.kernelTarget} init=${config.system.build.toplevel}/init initrd=initrd ${toString config.boot.kernelParams} kernel ${pkgs.stdenv.hostPlatform.linux-kernel.target} init=${config.system.build.toplevel}/init initrd=initrd ${toString config.boot.kernelParams}
initrd initrd initrd initrd
boot boot
''; '';

View file

@ -1,11 +1,11 @@
{lib, stdenv, boost, cmake, pkgconfig, nix, ... }: {lib, stdenv, boost, cmake, pkg-config, nix, ... }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "nixos-option"; name = "nixos-option";
src = ./.; src = ./.;
nativeBuildInputs = [ cmake pkgconfig ]; nativeBuildInputs = [ cmake pkg-config ];
buildInputs = [ boost nix ]; buildInputs = [ boost nix ];
meta = { meta = with lib; {
license = stdenv.lib.licenses.lgpl2Plus; license = licenses.lgpl2Plus;
maintainers = with lib.maintainers; [ chkno ]; maintainers = with maintainers; [ chkno ];
}; };
} }

View file

@ -26,6 +26,7 @@ in
}; };
reservedMemory = mkOption { reservedMemory = mkOption {
default = "128M"; default = "128M";
type = types.str;
description = '' description = ''
The amount of memory reserved for the crashdump kernel. The amount of memory reserved for the crashdump kernel.
If you choose a too high value, dmesg will mention If you choose a too high value, dmesg will mention

View file

@ -71,7 +71,7 @@ in
#utmp = 29; # unused #utmp = 29; # unused
# ddclient = 30; # converted to DynamicUser = true # ddclient = 30; # converted to DynamicUser = true
davfs2 = 31; davfs2 = 31;
#disnix = 33; # unused #disnix = 33; # module removed
osgi = 34; osgi = 34;
tor = 35; tor = 35;
cups = 36; cups = 36;
@ -387,7 +387,7 @@ in
utmp = 29; utmp = 29;
# ddclient = 30; # converted to DynamicUser = true # ddclient = 30; # converted to DynamicUser = true
davfs2 = 31; davfs2 = 31;
disnix = 33; #disnix = 33; # module removed
osgi = 34; osgi = 34;
tor = 35; tor = 35;
#cups = 36; # unused #cups = 36; # unused

View file

@ -215,7 +215,7 @@ in {
'' ''
else '' else ''
exec ${cfg.locate}/bin/updatedb \ exec ${cfg.locate}/bin/updatedb \
${optionalString (cfg.localuser != null && ! isMLocate) ''--localuser=${cfg.localuser}''} \ ${optionalString (cfg.localuser != null && ! isMLocate) "--localuser=${cfg.localuser}"} \
--output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags} --output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
''; '';
environment = optionalAttrs (!isMLocate) { environment = optionalAttrs (!isMLocate) {

View file

@ -73,7 +73,7 @@ in
} }
''; '';
type = pkgsType; type = pkgsType;
example = literalExample ''import <nixpkgs> {}''; example = literalExample "import <nixpkgs> {}";
description = '' description = ''
If set, the pkgs argument to all NixOS modules is the value of If set, the pkgs argument to all NixOS modules is the value of
this option, extended with <code>nixpkgs.overlays</code>, if this option, extended with <code>nixpkgs.overlays</code>, if

View file

@ -46,12 +46,14 @@
./hardware/cpu/intel-microcode.nix ./hardware/cpu/intel-microcode.nix
./hardware/digitalbitbox.nix ./hardware/digitalbitbox.nix
./hardware/device-tree.nix ./hardware/device-tree.nix
./hardware/sensor/hddtemp.nix
./hardware/sensor/iio.nix ./hardware/sensor/iio.nix
./hardware/keyboard/zsa.nix ./hardware/keyboard/zsa.nix
./hardware/ksm.nix ./hardware/ksm.nix
./hardware/ledger.nix ./hardware/ledger.nix
./hardware/logitech.nix ./hardware/logitech.nix
./hardware/mcelog.nix ./hardware/mcelog.nix
./hardware/network/ath-user-regd.nix
./hardware/network/b43.nix ./hardware/network/b43.nix
./hardware/network/intel-2200bg.nix ./hardware/network/intel-2200bg.nix
./hardware/nitrokey.nix ./hardware/nitrokey.nix
@ -169,6 +171,7 @@
./programs/sway.nix ./programs/sway.nix
./programs/system-config-printer.nix ./programs/system-config-printer.nix
./programs/thefuck.nix ./programs/thefuck.nix
./programs/tilp2.nix
./programs/tmux.nix ./programs/tmux.nix
./programs/traceroute.nix ./programs/traceroute.nix
./programs/tsm-client.nix ./programs/tsm-client.nix
@ -348,6 +351,7 @@
./services/editors/emacs.nix ./services/editors/emacs.nix
./services/editors/infinoted.nix ./services/editors/infinoted.nix
./services/games/factorio.nix ./services/games/factorio.nix
./services/games/freeciv.nix
./services/games/minecraft-server.nix ./services/games/minecraft-server.nix
./services/games/minetest-server.nix ./services/games/minetest-server.nix
./services/games/openarena.nix ./services/games/openarena.nix
@ -448,8 +452,6 @@
./services/misc/devmon.nix ./services/misc/devmon.nix
./services/misc/dictd.nix ./services/misc/dictd.nix
./services/misc/dwm-status.nix ./services/misc/dwm-status.nix
./services/misc/dysnomia.nix
./services/misc/disnix.nix
./services/misc/docker-registry.nix ./services/misc/docker-registry.nix
./services/misc/domoticz.nix ./services/misc/domoticz.nix
./services/misc/errbot.nix ./services/misc/errbot.nix
@ -608,6 +610,8 @@
./services/networking/atftpd.nix ./services/networking/atftpd.nix
./services/networking/avahi-daemon.nix ./services/networking/avahi-daemon.nix
./services/networking/babeld.nix ./services/networking/babeld.nix
./services/networking/bee.nix
./services/networking/bee-clef.nix
./services/networking/biboumi.nix ./services/networking/biboumi.nix
./services/networking/bind.nix ./services/networking/bind.nix
./services/networking/bitcoind.nix ./services/networking/bitcoind.nix
@ -633,6 +637,7 @@
./services/networking/dnsdist.nix ./services/networking/dnsdist.nix
./services/networking/dnsmasq.nix ./services/networking/dnsmasq.nix
./services/networking/ncdns.nix ./services/networking/ncdns.nix
./services/networking/nomad.nix
./services/networking/ejabberd.nix ./services/networking/ejabberd.nix
./services/networking/epmd.nix ./services/networking/epmd.nix
./services/networking/ergo.nix ./services/networking/ergo.nix
@ -724,6 +729,7 @@
./services/networking/owamp.nix ./services/networking/owamp.nix
./services/networking/pdnsd.nix ./services/networking/pdnsd.nix
./services/networking/pixiecore.nix ./services/networking/pixiecore.nix
./services/networking/pleroma.nix
./services/networking/polipo.nix ./services/networking/polipo.nix
./services/networking/powerdns.nix ./services/networking/powerdns.nix
./services/networking/pdns-recursor.nix ./services/networking/pdns-recursor.nix
@ -870,10 +876,12 @@
./services/web-apps/documize.nix ./services/web-apps/documize.nix
./services/web-apps/dokuwiki.nix ./services/web-apps/dokuwiki.nix
./services/web-apps/engelsystem.nix ./services/web-apps/engelsystem.nix
./services/web-apps/galene.nix
./services/web-apps/gerrit.nix ./services/web-apps/gerrit.nix
./services/web-apps/gotify-server.nix ./services/web-apps/gotify-server.nix
./services/web-apps/grocy.nix ./services/web-apps/grocy.nix
./services/web-apps/hedgedoc.nix ./services/web-apps/hedgedoc.nix
./services/web-apps/hledger-web.nix
./services/web-apps/icingaweb2/icingaweb2.nix ./services/web-apps/icingaweb2/icingaweb2.nix
./services/web-apps/icingaweb2/module-monitoring.nix ./services/web-apps/icingaweb2/module-monitoring.nix
./services/web-apps/ihatemoney ./services/web-apps/ihatemoney

View file

@ -1,7 +1,7 @@
# Common configuration for virtual machines running under QEMU (using # Common configuration for virtual machines running under QEMU (using
# virtio). # virtio).
{ lib, ... }: { ... }:
{ {
boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ]; boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ];
@ -14,6 +14,4 @@
# to the *boot time* of the host). # to the *boot time* of the host).
hwclock -s hwclock -s
''; '';
security.rngd.enable = lib.mkDefault false;
} }

View file

@ -27,14 +27,14 @@ in
# the options below are the same as in "captive-browser.toml" # the options below are the same as in "captive-browser.toml"
browser = mkOption { browser = mkOption {
type = types.str; type = types.str;
default = concatStringsSep " " [ ''${pkgs.chromium}/bin/chromium'' default = concatStringsSep " " [ "${pkgs.chromium}/bin/chromium"
''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive'' "--user-data-dir=\${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive"
''--proxy-server="socks5://$PROXY"'' ''--proxy-server="socks5://$PROXY"''
''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"'' ''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"''
''--no-first-run'' "--no-first-run"
''--new-window'' "--new-window"
''--incognito'' "--incognito"
''http://cache.nixos.org/'' "http://cache.nixos.org/"
]; ];
description = '' description = ''
The shell (/bin/sh) command executed once the proxy starts. The shell (/bin/sh) command executed once the proxy starts.
@ -62,7 +62,7 @@ in
socks5-addr = mkOption { socks5-addr = mkOption {
type = types.str; type = types.str;
default = "localhost:1666"; default = "localhost:1666";
description = ''the listen address for the SOCKS5 proxy server''; description = "the listen address for the SOCKS5 proxy server";
}; };
bindInterface = mkOption { bindInterface = mkOption {

View file

@ -16,18 +16,21 @@ in {
''; '';
}; };
group = mkOption { group = mkOption {
type = types.str;
default = "cdrom"; default = "cdrom";
description = '' description = ''
Group that users must be in to use <command>cdemu</command>. Group that users must be in to use <command>cdemu</command>.
''; '';
}; };
gui = mkOption { gui = mkOption {
type = types.bool;
default = true; default = true;
description = '' description = ''
Whether to install the <command>cdemu</command> GUI (gCDEmu). Whether to install the <command>cdemu</command> GUI (gCDEmu).
''; '';
}; };
image-analyzer = mkOption { image-analyzer = mkOption {
type = types.bool;
default = true; default = true;
description = '' description = ''
Whether to install the image analyzer. Whether to install the image analyzer.

View file

@ -80,6 +80,8 @@ in
# Retry the command if we just installed it. # Retry the command if we just installed it.
if [ $? = 126 ]; then if [ $? = 126 ]; then
"$@" "$@"
else
return 127
fi fi
else else
# Indicate than there was an error so ZSH falls back to its default handler # Indicate than there was an error so ZSH falls back to its default handler

View file

@ -13,6 +13,27 @@ let
(filterAttrs (k: v: v != null) cfg.shellAliases) (filterAttrs (k: v: v != null) cfg.shellAliases)
); );
envShellInit = pkgs.writeText "shellInit" cfge.shellInit;
envLoginShellInit = pkgs.writeText "loginShellInit" cfge.loginShellInit;
envInteractiveShellInit = pkgs.writeText "interactiveShellInit" cfge.interactiveShellInit;
sourceEnv = file:
if cfg.useBabelfish then
"source /etc/fish/${file}.fish"
else
''
set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $fish_function_path
fenv source /etc/fish/foreign-env/${file} > /dev/null
set -e fish_function_path[1]
'';
babelfishTranslate = path: name:
pkgs.runCommand "${name}.fish" {
nativeBuildInputs = [ pkgs.babelfish ];
} "${pkgs.babelfish}/bin/babelfish < ${path} > $out;";
in in
{ {
@ -29,6 +50,15 @@ in
type = types.bool; type = types.bool;
}; };
useBabelfish = mkOption {
type = types.bool;
default = false;
description = ''
If enabled, the configured environment will be translated to native fish using <link xlink:href="https://github.com/bouk/babelfish">babelfish</link>.
Otherwise, <link xlink:href="https://github.com/oh-my-fish/plugin-foreign-env">foreign-env</link> will be used.
'';
};
vendor.config.enable = mkOption { vendor.config.enable = mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
@ -105,72 +135,152 @@ in
# Required for man completions # Required for man completions
documentation.man.generateCaches = lib.mkDefault true; documentation.man.generateCaches = lib.mkDefault true;
environment.etc."fish/foreign-env/shellInit".text = cfge.shellInit; environment = mkMerge [
environment.etc."fish/foreign-env/loginShellInit".text = cfge.loginShellInit; (mkIf cfg.useBabelfish
environment.etc."fish/foreign-env/interactiveShellInit".text = cfge.interactiveShellInit; {
etc."fish/setEnvironment.fish".source = babelfishTranslate config.system.build.setEnvironment "setEnvironment";
etc."fish/shellInit.fish".source = babelfishTranslate envShellInit "shellInit";
etc."fish/loginShellInit.fish".source = babelfishTranslate envLoginShellInit "loginShellInit";
etc."fish/interactiveShellInit.fish".source = babelfishTranslate envInteractiveShellInit "interactiveShellInit";
})
environment.etc."fish/nixos-env-preinit.fish".text = '' (mkIf (!cfg.useBabelfish)
# This happens before $__fish_datadir/config.fish sets fish_function_path, so it is currently {
# unset. We set it and then completely erase it, leaving its configuration to $__fish_datadir/config.fish etc."fish/foreign-env/shellInit".source = envShellInit;
set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $__fish_datadir/functions etc."fish/foreign-env/loginShellInit".source = envLoginShellInit;
etc."fish/foreign-env/interactiveShellInit".source = envInteractiveShellInit;
})
# source the NixOS environment config {
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ] etc."fish/nixos-env-preinit.fish".text =
fenv source ${config.system.build.setEnvironment} if cfg.useBabelfish
end then ''
# source the NixOS environment config
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
source /etc/fish/setEnvironment.fish
end
''
else ''
# This happens before $__fish_datadir/config.fish sets fish_function_path, so it is currently
# unset. We set it and then completely erase it, leaving its configuration to $__fish_datadir/config.fish
set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $__fish_datadir/functions
# clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish # source the NixOS environment config
set -e fish_function_path if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
''; fenv source ${config.system.build.setEnvironment}
end
environment.etc."fish/config.fish".text = '' # clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish
# /etc/fish/config.fish: DO NOT EDIT -- this file has been generated automatically. set -e fish_function_path
'';
}
# if we haven't sourced the general config, do it {
if not set -q __fish_nixos_general_config_sourced etc."fish/config.fish".text = ''
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d # /etc/fish/config.fish: DO NOT EDIT -- this file has been generated automatically.
fenv source /etc/fish/foreign-env/shellInit > /dev/null
set -e fish_function_path[1]
${cfg.shellInit} # if we haven't sourced the general config, do it
if not set -q __fish_nixos_general_config_sourced
${sourceEnv "shellInit"}
# and leave a note so we don't source this config section again from ${cfg.shellInit}
# this very shell (children will source the general config anew)
set -g __fish_nixos_general_config_sourced 1
end
# if we haven't sourced the login config, do it # and leave a note so we don't source this config section again from
status --is-login; and not set -q __fish_nixos_login_config_sourced # this very shell (children will source the general config anew)
and begin set -g __fish_nixos_general_config_sourced 1
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d end
fenv source /etc/fish/foreign-env/loginShellInit > /dev/null
set -e fish_function_path[1]
${cfg.loginShellInit} # if we haven't sourced the login config, do it
status --is-login; and not set -q __fish_nixos_login_config_sourced
and begin
${sourceEnv "loginShellInit"}
# and leave a note so we don't source this config section again from ${cfg.loginShellInit}
# this very shell (children will source the general config anew)
set -g __fish_nixos_login_config_sourced 1
end
# if we haven't sourced the interactive config, do it # and leave a note so we don't source this config section again from
status --is-interactive; and not set -q __fish_nixos_interactive_config_sourced # this very shell (children will source the general config anew)
and begin set -g __fish_nixos_login_config_sourced 1
${fishAliases} end
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d # if we haven't sourced the interactive config, do it
fenv source /etc/fish/foreign-env/interactiveShellInit > /dev/null status --is-interactive; and not set -q __fish_nixos_interactive_config_sourced
set -e fish_function_path[1] and begin
${fishAliases}
${cfg.promptInit} ${sourceEnv "interactiveShellInit"}
${cfg.interactiveShellInit}
# and leave a note so we don't source this config section again from ${cfg.promptInit}
# this very shell (children will source the general config anew, ${cfg.interactiveShellInit}
# allowing configuration changes in, e.g, aliases, to propagate)
set -g __fish_nixos_interactive_config_sourced 1 # and leave a note so we don't source this config section again from
end # this very shell (children will source the general config anew,
''; # allowing configuration changes in, e.g, aliases, to propagate)
set -g __fish_nixos_interactive_config_sourced 1
end
'';
}
{
etc."fish/generated_completions".source =
let
patchedGenerator = pkgs.stdenv.mkDerivation {
name = "fish_patched-completion-generator";
srcs = [
"${pkgs.fish}/share/fish/tools/create_manpage_completions.py"
"${pkgs.fish}/share/fish/tools/deroff.py"
];
unpackCmd = "cp $curSrc $(basename $curSrc)";
sourceRoot = ".";
patches = [ ./fish_completion-generator.patch ]; # to prevent collisions of identical completion files
dontBuild = true;
installPhase = ''
mkdir -p $out
cp * $out/
'';
preferLocalBuild = true;
allowSubstitutes = false;
};
generateCompletions = package: pkgs.runCommand
"${package.name}_fish-completions"
(
{
inherit package;
preferLocalBuild = true;
allowSubstitutes = false;
}
// optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; }
)
''
mkdir -p $out
if [ -d $package/share/man ]; then
find $package/share/man -type f | xargs ${pkgs.python3.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
fi
'';
in
pkgs.buildEnv {
name = "system_fish-completions";
ignoreCollisions = true;
paths = map generateCompletions config.environment.systemPackages;
};
}
# include programs that bring their own completions
{
pathsToLink = []
++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"
++ optional cfg.vendor.completions.enable "/share/fish/vendor_completions.d"
++ optional cfg.vendor.functions.enable "/share/fish/vendor_functions.d";
}
{ systemPackages = [ pkgs.fish ]; }
{
shells = [
"/run/current-system/sw/bin/fish"
"${pkgs.fish}/bin/fish"
];
}
];
programs.fish.interactiveShellInit = '' programs.fish.interactiveShellInit = ''
# add completions generated by NixOS to $fish_complete_path # add completions generated by NixOS to $fish_complete_path
@ -187,61 +297,6 @@ in
end end
''; '';
environment.etc."fish/generated_completions".source =
let
patchedGenerator = pkgs.stdenv.mkDerivation {
name = "fish_patched-completion-generator";
srcs = [
"${pkgs.fish}/share/fish/tools/create_manpage_completions.py"
"${pkgs.fish}/share/fish/tools/deroff.py"
];
unpackCmd = "cp $curSrc $(basename $curSrc)";
sourceRoot = ".";
patches = [ ./fish_completion-generator.patch ]; # to prevent collisions of identical completion files
dontBuild = true;
installPhase = ''
mkdir -p $out
cp * $out/
'';
preferLocalBuild = true;
allowSubstitutes = false;
};
generateCompletions = package: pkgs.runCommand
"${package.name}_fish-completions"
(
{
inherit package;
preferLocalBuild = true;
allowSubstitutes = false;
}
// optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; }
)
''
mkdir -p $out
if [ -d $package/share/man ]; then
find $package/share/man -type f | xargs ${pkgs.python3.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
fi
'';
in
pkgs.buildEnv {
name = "system_fish-completions";
ignoreCollisions = true;
paths = map generateCompletions config.environment.systemPackages;
};
# include programs that bring their own completions
environment.pathsToLink = []
++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"
++ optional cfg.vendor.completions.enable "/share/fish/vendor_completions.d"
++ optional cfg.vendor.functions.enable "/share/fish/vendor_functions.d";
environment.systemPackages = [ pkgs.fish ];
environment.shells = [
"/run/current-system/sw/bin/fish"
"${pkgs.fish}/bin/fish"
];
}; };
} }

View file

@ -36,7 +36,7 @@ in
askPassword = mkOption { askPassword = mkOption {
type = types.str; type = types.str;
default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass"; default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
description = ''Program used by SSH to ask for passwords.''; description = "Program used by SSH to ask for passwords.";
}; };
forwardX11 = mkOption { forwardX11 = mkOption {

View file

@ -0,0 +1,28 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.programs.tilp2;
in {
options.programs.tilp2 = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable tilp2 and udev rules for supported calculators.
'';
};
};
config = mkIf cfg.enable {
services.udev.packages = [
pkgs.libticables2
];
environment.systemPackages = [
pkgs.tilp2
];
};
}

View file

@ -11,7 +11,7 @@ in
lockerCommand = mkOption { lockerCommand = mkOption {
default = "${pkgs.i3lock}/bin/i3lock"; default = "${pkgs.i3lock}/bin/i3lock";
example = literalExample ''''${pkgs.i3lock-fancy}/bin/i3lock-fancy''; example = literalExample "\${pkgs.i3lock-fancy}/bin/i3lock-fancy";
type = types.separatedString " "; type = types.separatedString " ";
description = "Locker to be used with xsslock"; description = "Locker to be used with xsslock";
}; };

View file

@ -7,6 +7,11 @@ let
numCerts = length (builtins.attrNames cfg.certs); numCerts = length (builtins.attrNames cfg.certs);
_24hSecs = 60 * 60 * 24; _24hSecs = 60 * 60 * 24;
# Used to make unique paths for each cert/account config set
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
accountDirRoot = "/var/lib/acme/.lego/accounts/";
# There are many services required to make cert renewals work. # There are many services required to make cert renewals work.
# They all follow a common structure: # They all follow a common structure:
# - They inherit this commonServiceConfig # - They inherit this commonServiceConfig
@ -19,7 +24,7 @@ let
Type = "oneshot"; Type = "oneshot";
User = "acme"; User = "acme";
Group = mkDefault "acme"; Group = mkDefault "acme";
UMask = 0027; UMask = 0023;
StateDirectoryMode = 750; StateDirectoryMode = 750;
ProtectSystem = "full"; ProtectSystem = "full";
PrivateTmp = true; PrivateTmp = true;
@ -54,23 +59,35 @@ let
''; '';
}; };
# Previously, all certs were owned by whatever user was configured in # Ensures that directories which are shared across all certs
# config.security.acme.certs.<cert>.user. Now everything is owned by and # exist and have the correct user and group, since group
# run by the acme user. # is configurable on a per-cert basis.
userMigrationService = { userMigrationService = let
description = "Fix owner and group of all ACME certificates"; script = with builtins; ''
chown -R acme .lego/accounts
script = with builtins; concatStringsSep "\n" (mapAttrsToList (cert: data: '' '' + (concatStringsSep "\n" (mapAttrsToList (cert: data: ''
for fixpath in /var/lib/acme/${escapeShellArg cert} /var/lib/acme/.lego/${escapeShellArg cert}; do for fixpath in ${escapeShellArg cert} .lego/${escapeShellArg cert}; do
if [ -d "$fixpath" ]; then if [ -d "$fixpath" ]; then
chmod -R u=rwX,g=rX,o= "$fixpath" chmod -R u=rwX,g=rX,o= "$fixpath"
chown -R acme:${data.group} "$fixpath" chown -R acme:${data.group} "$fixpath"
fi fi
done done
'') certConfigs); '') certConfigs));
in {
description = "Fix owner and group of all ACME certificates";
# We don't want this to run every time a renewal happens serviceConfig = commonServiceConfig // {
serviceConfig.RemainAfterExit = true; # We don't want this to run every time a renewal happens
RemainAfterExit = true;
# These StateDirectory entries negate the need for tmpfiles
StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
StateDirectoryMode = 755;
WorkingDirectory = "/var/lib/acme";
# Run the start script as root
ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
};
}; };
certToConfig = cert: data: let certToConfig = cert: data: let
@ -101,11 +118,10 @@ let
${toString acmeServer} ${toString data.dnsProvider} ${toString acmeServer} ${toString data.dnsProvider}
${toString data.ocspMustStaple} ${data.keyType} ${toString data.ocspMustStaple} ${data.keyType}
''; '';
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
certDir = mkHash hashData; certDir = mkHash hashData;
domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}"; domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}";
othersHash = mkHash "${toString acmeServer} ${data.keyType} ${data.email}"; accountHash = (mkAccountHash acmeServer data);
accountDir = "/var/lib/acme/.lego/accounts/" + othersHash; accountDir = accountDirRoot + accountHash;
protocolOpts = if useDns then ( protocolOpts = if useDns then (
[ "--dns" data.dnsProvider ] [ "--dns" data.dnsProvider ]
@ -142,9 +158,8 @@ let
); );
in { in {
inherit accountDir selfsignedDeps; inherit accountHash cert selfsignedDeps;
webroot = data.webroot;
group = data.group; group = data.group;
renewTimer = { renewTimer = {
@ -184,7 +199,10 @@ let
StateDirectory = "acme/${cert}"; StateDirectory = "acme/${cert}";
BindPaths = "/var/lib/acme/.minica:/tmp/ca /var/lib/acme/${cert}:/tmp/${keyName}"; BindPaths = [
"/var/lib/acme/.minica:/tmp/ca"
"/var/lib/acme/${cert}:/tmp/${keyName}"
];
}; };
# Working directory will be /tmp # Working directory will be /tmp
@ -222,16 +240,22 @@ let
serviceConfig = commonServiceConfig // { serviceConfig = commonServiceConfig // {
Group = data.group; Group = data.group;
# AccountDir dir will be created by tmpfiles to ensure correct permissions # Keep in mind that these directories will be deleted if the user runs
# And to avoid deletion during systemctl clean # systemctl clean --what=state
# acme/.lego/${cert} is listed so that it is deleted during systemctl clean # acme/.lego/${cert} is listed for this reason.
StateDirectory = "acme/${cert} acme/.lego/${cert} acme/.lego/${cert}/${certDir}"; StateDirectory = [
"acme/${cert}"
"acme/.lego/${cert}"
"acme/.lego/${cert}/${certDir}"
"acme/.lego/accounts/${accountHash}"
];
# Needs to be space separated, but can't use a multiline string because that'll include newlines # Needs to be space separated, but can't use a multiline string because that'll include newlines
BindPaths = BindPaths = [
"${accountDir}:/tmp/accounts " + "${accountDir}:/tmp/accounts"
"/var/lib/acme/${cert}:/tmp/out " + "/var/lib/acme/${cert}:/tmp/out"
"/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates "; "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
];
# Only try loading the credentialsFile if the dns challenge is enabled # Only try loading the credentialsFile if the dns challenge is enabled
EnvironmentFile = mkIf useDns data.credentialsFile; EnvironmentFile = mkIf useDns data.credentialsFile;
@ -248,13 +272,18 @@ let
# Working directory will be /tmp # Working directory will be /tmp
script = '' script = ''
set -euo pipefail set -euxo pipefail
${optionalString (data.webroot != null) ''
# Ensure the webroot exists
mkdir -p '${data.webroot}/.well-known/acme-challenge'
chown 'acme:${data.group}' ${data.webroot}/{.well-known,.well-known/acme-challenge}
''}
echo '${domainHash}' > domainhash.txt echo '${domainHash}' > domainhash.txt
# Check if we can renew # Check if we can renew
# Certificates and account credentials must exist if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a -n "$(ls -1 accounts)" ]; then
if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a "$(ls -1 accounts)" ]; then
# When domains are updated, there's no need to do a full # When domains are updated, there's no need to do a full
# Lego run, but it's likely renew won't work if days is too low. # Lego run, but it's likely renew won't work if days is too low.
@ -317,7 +346,7 @@ let
webroot = mkOption { webroot = mkOption {
type = types.nullOr types.str; type = types.nullOr types.str;
default = null; default = null;
example = "/var/lib/acme/acme-challenges"; example = "/var/lib/acme/acme-challenge";
description = '' description = ''
Where the webroot of the HTTP vhost is located. Where the webroot of the HTTP vhost is located.
<filename>.well-known/acme-challenge/</filename> directory <filename>.well-known/acme-challenge/</filename> directory
@ -550,12 +579,12 @@ in {
example = literalExample '' example = literalExample ''
{ {
"example.com" = { "example.com" = {
webroot = "/var/www/challenges/"; webroot = "/var/lib/acme/acme-challenge/";
email = "foo@example.com"; email = "foo@example.com";
extraDomainNames = [ "www.example.com" "foo.example.com" ]; extraDomainNames = [ "www.example.com" "foo.example.com" ];
}; };
"bar.example.com" = { "bar.example.com" = {
webroot = "/var/www/challenges/"; webroot = "/var/lib/acme/acme-challenge/";
email = "bar@example.com"; email = "bar@example.com";
}; };
} }
@ -664,21 +693,33 @@ in {
systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs; systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
# .lego and .lego/accounts specified to fix any incorrect permissions systemd.targets = let
systemd.tmpfiles.rules = [ # Create some targets which can be depended on to be "active" after cert renewals
"d /var/lib/acme/.lego - acme acme" finishedTargets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" {
"d /var/lib/acme/.lego/accounts - acme acme" wantedBy = [ "default.target" ];
] ++ (unique (concatMap (conf: [ requires = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
"d ${conf.accountDir} - acme acme" after = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
] ++ (optional (conf.webroot != null) "d ${conf.webroot}/.well-known/acme-challenge - acme ${conf.group}") }) certConfigs;
) (attrValues certConfigs)));
# Create some targets which can be depended on to be "active" after cert renewals # Create targets to limit the number of simultaneous account creations
systemd.targets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" { # How it works:
wantedBy = [ "default.target" ]; # - Pick a "leader" cert service, which will be in charge of creating the account,
requires = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps; # and run first (requires + after)
after = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps; # - Make all other cert services sharing the same account wait for the leader to
}) certConfigs; # finish before starting (requiredBy + before).
# Using a target here is fine - account creation is a one time event. Even if
# systemd clean --what=state is used to delete the account, so long as the user
# then runs one of the cert services, there won't be any issues.
accountTargets = mapAttrs' (hash: confs: let
leader = "acme-${(builtins.head confs).cert}.service";
dependantServices = map (conf: "acme-${conf.cert}.service") (builtins.tail confs);
in nameValuePair "acme-account-${hash}" {
requiredBy = dependantServices;
before = dependantServices;
requires = [ leader ];
after = [ leader ];
}) (groupBy (conf: conf.accountHash) (attrValues certConfigs));
in finishedTargets // accountTargets;
}) })
]; ];

View file

@ -115,15 +115,18 @@ services.nginx = {
<programlisting> <programlisting>
<xref linkend="opt-security.acme.acceptTerms" /> = true; <xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com"; <xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
<link linkend="opt-users.users._name_.extraGroups">users.users.nginx.extraGroups</link> = [ "acme" ];
services.nginx = { services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true; <link linkend="opt-services.nginx.enable">enable</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = { <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
"acmechallenge.example.com" = { "acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts # Catchall vhost, will redirect users to HTTPS for all vhosts
<link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ]; <link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user.
# By default, this is the case.
locations."/.well-known/acme-challenge" = { locations."/.well-known/acme-challenge" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges"; <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges";
}; };
@ -134,6 +137,7 @@ services.nginx = {
}; };
} }
# Alternative config for Apache # Alternative config for Apache
<link linkend="opt-users.users._name_.extraGroups">users.users.wwwrun.extraGroups</link> = [ "acme" ];
services.httpd = { services.httpd = {
<link linkend="opt-services.httpd.enable">enable = true;</link> <link linkend="opt-services.httpd.enable">enable = true;</link>
<link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = { <link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = {
@ -162,6 +166,9 @@ services.httpd = {
<xref linkend="opt-security.acme.certs"/>."foo.example.com" = { <xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
<link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges"; <link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
<link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com"; <link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
# Ensure that the web server you use can read the generated certs
# Take a look at the <link linkend="opt-services.nginx.group">group</link> option for the web server you choose.
<link linkend="opt-security.acme.certs._name_.group">group</link> = "nginx";
# Since we have a wildcard vhost to handle port 80, # Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything! # we can generate certs for anything!
# Just make sure your DNS resolves them. # Just make sure your DNS resolves them.
@ -257,10 +264,11 @@ chmod 400 /var/lib/secrets/certs.secret
<para> <para>
Should you need to regenerate a particular certificate in a hurry, such Should you need to regenerate a particular certificate in a hurry, such
as when a vulnerability is found in Let's Encrypt, there is now a convenient as when a vulnerability is found in Let's Encrypt, there is now a convenient
mechanism for doing so. Running <literal>systemctl clean acme-example.com.service</literal> mechanism for doing so. Running
will remove all certificate files for the given domain, allowing you to then <literal>systemctl clean --what=state acme-example.com.service</literal>
<literal>systemctl start acme-example.com.service</literal> to generate fresh will remove all certificate files and the account data for the given domain,
ones. allowing you to then <literal>systemctl start acme-example.com.service</literal>
to generate fresh ones.
</para> </para>
</section> </section>
<section xml:id="module-security-acme-fix-jws"> <section xml:id="module-security-acme-fix-jws">

View file

@ -430,8 +430,8 @@ let
${optionalString cfg.pamMount ${optionalString cfg.pamMount
"auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so"} "auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
${optionalString cfg.enableKwallet ${optionalString cfg.enableKwallet
("auth optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" + ("auth optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.kdeFrameworks.kwallet.bin}/bin/kwalletd5")} " kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5")}
${optionalString cfg.enableGnomeKeyring ${optionalString cfg.enableGnomeKeyring
"auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so"} "auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so"}
${optionalString cfg.gnupg.enable ${optionalString cfg.gnupg.enable
@ -509,8 +509,8 @@ let
${optionalString (cfg.enableAppArmor && config.security.apparmor.enable) ${optionalString (cfg.enableAppArmor && config.security.apparmor.enable)
"session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug"} "session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug"}
${optionalString (cfg.enableKwallet) ${optionalString (cfg.enableKwallet)
("session optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" + ("session optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.kdeFrameworks.kwallet.bin}/bin/kwalletd5")} " kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5")}
${optionalString (cfg.enableGnomeKeyring) ${optionalString (cfg.enableGnomeKeyring)
"session optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start"} "session optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start"}
${optionalString cfg.gnupg.enable ${optionalString cfg.gnupg.enable

View file

@ -10,16 +10,8 @@ let
(n: v: (if v ? program then v else v // {program=n;})) (n: v: (if v ? program then v else v // {program=n;}))
wrappers); wrappers);
securityWrapper = pkgs.stdenv.mkDerivation { securityWrapper = pkgs.callPackage ./wrapper.nix {
name = "security-wrapper"; inherit parentWrapperDir;
phases = [ "installPhase" "fixupPhase" ];
buildInputs = [ pkgs.libcap pkgs.libcap_ng pkgs.linuxHeaders ];
hardeningEnable = [ "pie" ];
installPhase = ''
mkdir -p $out/bin
$CC -Wall -O2 -DWRAPPER_DIR=\"${parentWrapperDir}\" \
-lcap-ng -lcap ${./wrapper.c} -o $out/bin/security-wrapper
'';
}; };
###### Activation script for the setcap wrappers ###### Activation script for the setcap wrappers

View file

@ -4,15 +4,17 @@
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <sys/xattr.h>
#include <fcntl.h> #include <fcntl.h>
#include <dirent.h> #include <dirent.h>
#include <assert.h> #include <assert.h>
#include <errno.h> #include <errno.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <sys/capability.h>
#include <sys/prctl.h> #include <sys/prctl.h>
#include <limits.h> #include <limits.h>
#include <cap-ng.h> #include <stdint.h>
#include <syscall.h>
#include <byteswap.h>
// Make sure assertions are not compiled out, we use them to codify // Make sure assertions are not compiled out, we use them to codify
// invariants about this program and we want it to fail fast and // invariants about this program and we want it to fail fast and
@ -23,182 +25,172 @@ extern char **environ;
// The WRAPPER_DIR macro is supplied at compile time so that it cannot // The WRAPPER_DIR macro is supplied at compile time so that it cannot
// be changed at runtime // be changed at runtime
static char * wrapperDir = WRAPPER_DIR; static char *wrapper_dir = WRAPPER_DIR;
// Wrapper debug variable name // Wrapper debug variable name
static char * wrapperDebug = "WRAPPER_DEBUG"; static char *wrapper_debug = "WRAPPER_DEBUG";
// Update the capabilities of the running process to include the given #define CAP_SETPCAP 8
// capability in the Ambient set.
static void set_ambient_cap(cap_value_t cap)
{
capng_get_caps_process();
if (capng_update(CAPNG_ADD, CAPNG_INHERITABLE, (unsigned long) cap)) #if __BYTE_ORDER == __BIG_ENDIAN
{ #define LE32_TO_H(x) bswap_32(x)
perror("cannot raise the capability into the Inheritable set\n"); #else
exit(1); #define LE32_TO_H(x) (x)
#endif
int get_last_cap(unsigned *last_cap) {
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
if (file == NULL) {
int saved_errno = errno;
fprintf(stderr, "failed to open /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
return -saved_errno;
} }
int res = fscanf(file, "%u", last_cap);
capng_apply(CAPNG_SELECT_CAPS); if (res == EOF) {
int saved_errno = errno;
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) fprintf(stderr, "could not read number from /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
{ return -saved_errno;
perror("cannot raise the capability into the Ambient set\n");
exit(1);
} }
fclose(file);
return 0;
} }
// Given the path to this program, fetch its configured capability set // Given the path to this program, fetch its configured capability set
// (as set by `setcap ... /path/to/file`) and raise those capabilities // (as set by `setcap ... /path/to/file`) and raise those capabilities
// into the Ambient set. // into the Ambient set.
static int make_caps_ambient(const char *selfPath) static int make_caps_ambient(const char *self_path) {
{ struct vfs_ns_cap_data data = {};
cap_t caps = cap_get_file(selfPath); int r = getxattr(self_path, "security.capability", &data, sizeof(data));
if(!caps)
{
if(getenv(wrapperDebug))
fprintf(stderr, "no caps set or could not retrieve the caps for this file, not doing anything...");
if (r < 0) {
if (errno == ENODATA) {
// no capabilities set
return 0;
}
fprintf(stderr, "cannot get capabilities for %s: %s", self_path, strerror(errno));
return 1; return 1;
} }
// We use `cap_to_text` and iteration over the tokenized result size_t size;
// string because, as of libcap's current release, there is no uint32_t version = LE32_TO_H(data.magic_etc) & VFS_CAP_REVISION_MASK;
// facility for retrieving an array of `cap_value_t`'s that can be switch (version) {
// given to `prctl` in order to lift that capability into the case VFS_CAP_REVISION_1:
// Ambient set. size = VFS_CAP_U32_1;
// break;
// Some discussion was had around shot-gunning all of the case VFS_CAP_REVISION_2:
// capabilities we know about into the Ambient set but that has a case VFS_CAP_REVISION_3:
// security smell and I deemed the risk of the current size = VFS_CAP_U32_3;
// implementation crashing the program to be lower than the risk break;
// of a privilege escalation security hole being introduced by default:
// raising all capabilities, even ones we didn't intend for the fprintf(stderr, "BUG! Unsupported capability version 0x%x on %s. Report to NixOS bugtracker\n", version, self_path);
// program, into the Ambient set. return 1;
// }
// `cap_t` which is returned by `cap_get_*` is an opaque type and
// even if we could retrieve the bitmasks (which, as far as I can const struct __user_cap_header_struct header = {
// tell we cannot) in order to get the `cap_value_t` .version = _LINUX_CAPABILITY_VERSION_3,
// representation for each capability we would have to take the .pid = getpid(),
// total number of capabilities supported and iterate over the };
// sequence of integers up-to that maximum total, testing each one struct __user_cap_data_struct user_data[2] = {};
// against the bitmask ((bitmask >> n) & 1) to see if it's set and
// aggregating each "capability integer n" that is set in the for (size_t i = 0; i < size; i++) {
// bitmask. // merge inheritable & permitted into one
// user_data[i].permitted = user_data[i].inheritable =
// That, combined with the fact that we can't easily get the LE32_TO_H(data.data[i].inheritable) | LE32_TO_H(data.data[i].permitted);
// bitmask anyway seemed much more brittle than fetching the }
// `cap_t`, transforming it into a textual representation,
// tokenizing the string, and using `cap_from_name` on the token if (syscall(SYS_capset, &header, &user_data) < 0) {
// to get the `cap_value_t` that we need for `prctl`. There is fprintf(stderr, "failed to inherit capabilities: %s", strerror(errno));
// indeed risk involved if the output string format of return 1;
// `cap_to_text` ever changes but at this time the combination of }
// factors involving the below list have led me to the conclusion unsigned last_cap;
// that the best implementation at this time is reading then r = get_last_cap(&last_cap);
// parsing with *lots of documentation* about why we're doing it if (r < 0) {
// this way. return 1;
// }
// 1. No explicit API for fetching an array of `cap_value_t`'s or uint64_t set = user_data[0].permitted | (uint64_t)user_data[1].permitted << 32;
// for transforming a `cap_t` into such a representation for (unsigned cap = 0; cap < last_cap; cap++) {
// 2. The risk of a crash is lower than lifting all capabilities if (!(set & (1ULL << cap))) {
// into the Ambient set continue;
// 3. libcap is depended on heavily in the Linux ecosystem so }
// there is a high chance that the output representation of
// `cap_to_text` will not change which reduces our risk that // Check for the cap_setpcap capability, we set this on the
// this parsing step will cause a crash // wrapper so it can elevate the capabilities to the Ambient
// // set but we do not want to propagate it down into the
// The preferred method, should it ever be available in the // wrapped program.
// future, would be to use libcap API's to transform the result //
// from a `cap_get_*` into an array of `cap_value_t`'s that can // TODO: what happens if that's the behavior you want
// then be given to prctl. // though???? I'm preferring a strict vs. loose policy here.
// if (cap == CAP_SETPCAP) {
// - Parnell if(getenv(wrapper_debug)) {
ssize_t capLen; fprintf(stderr, "cap_setpcap in set, skipping it\n");
char* capstr = cap_to_text(caps, &capLen); }
cap_free(caps); continue;
}
// TODO: For now, we assume that cap_to_text always starts its if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) {
// result string with " =" and that the first capability is listed fprintf(stderr, "cannot raise the capability %d into the ambient set: %s\n", cap, strerror(errno));
// immediately after that. We should verify this. return 1;
assert(capLen >= 2); }
capstr += 2; if (getenv(wrapper_debug)) {
fprintf(stderr, "raised %d into the ambient capability set\n", cap);
char* saveptr = NULL; }
for(char* tok = strtok_r(capstr, ",", &saveptr); tok; tok = strtok_r(NULL, ",", &saveptr))
{
cap_value_t capnum;
if (cap_from_name(tok, &capnum))
{
if(getenv(wrapperDebug))
fprintf(stderr, "cap_from_name failed, skipping: %s", tok);
}
else if (capnum == CAP_SETPCAP)
{
// Check for the cap_setpcap capability, we set this on the
// wrapper so it can elevate the capabilities to the Ambient
// set but we do not want to propagate it down into the
// wrapped program.
//
// TODO: what happens if that's the behavior you want
// though???? I'm preferring a strict vs. loose policy here.
if(getenv(wrapperDebug))
fprintf(stderr, "cap_setpcap in set, skipping it\n");
}
else
{
set_ambient_cap(capnum);
if(getenv(wrapperDebug))
fprintf(stderr, "raised %s into the Ambient capability set\n", tok);
}
} }
cap_free(capstr);
return 0; return 0;
} }
int main(int argc, char * * argv) int readlink_malloc(const char *p, char **ret) {
{ size_t l = FILENAME_MAX+1;
// I *think* it's safe to assume that a path from a symbolic link int r;
// should safely fit within the PATH_MAX system limit. Though I'm
// not positive it's safe...
char selfPath[PATH_MAX];
int selfPathSize = readlink("/proc/self/exe", selfPath, sizeof(selfPath));
assert(selfPathSize > 0); for (;;) {
char *c = calloc(l, sizeof(char));
if (!c) {
return -ENOMEM;
}
// Assert we have room for the zero byte, this ensures the path ssize_t n = readlink(p, c, l-1);
// isn't being truncated because it's too big for the buffer. if (n < 0) {
// r = -errno;
// A better way to handle this might be to use something like the free(c);
// whereami library (https://github.com/gpakosz/whereami) or a return r;
// loop that resizes the buffer and re-reads the link if the }
// contents are being truncated.
assert(selfPathSize < sizeof(selfPath));
// Set the zero byte since readlink doesn't do that for us. if ((size_t) n < l-1) {
selfPath[selfPathSize] = '\0'; c[n] = 0;
*ret = c;
return 0;
}
free(c);
l *= 2;
}
}
int main(int argc, char **argv) {
char *self_path = NULL;
int self_path_size = readlink_malloc("/proc/self/exe", &self_path);
if (self_path_size < 0) {
fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
}
// Make sure that we are being executed from the right location, // Make sure that we are being executed from the right location,
// i.e., `safeWrapperDir'. This is to prevent someone from creating // i.e., `safe_wrapper_dir'. This is to prevent someone from creating
// hard link `X' from some other location, along with a false // hard link `X' from some other location, along with a false
// `X.real' file, to allow arbitrary programs from being executed // `X.real' file, to allow arbitrary programs from being executed
// with elevated capabilities. // with elevated capabilities.
int len = strlen(wrapperDir); int len = strlen(wrapper_dir);
if (len > 0 && '/' == wrapperDir[len - 1]) if (len > 0 && '/' == wrapper_dir[len - 1])
--len; --len;
assert(!strncmp(selfPath, wrapperDir, len)); assert(!strncmp(self_path, wrapper_dir, len));
assert('/' == wrapperDir[0]); assert('/' == wrapper_dir[0]);
assert('/' == selfPath[len]); assert('/' == self_path[len]);
// Make *really* *really* sure that we were executed as // Make *really* *really* sure that we were executed as
// `selfPath', and not, say, as some other setuid program. That // `self_path', and not, say, as some other setuid program. That
// is, our effective uid/gid should match the uid/gid of // is, our effective uid/gid should match the uid/gid of
// `selfPath'. // `self_path'.
struct stat st; struct stat st;
assert(lstat(selfPath, &st) != -1); assert(lstat(self_path, &st) != -1);
assert(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid())); assert(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid()));
assert(!(st.st_mode & S_ISGID) || (st.st_gid == getegid())); assert(!(st.st_mode & S_ISGID) || (st.st_gid == getegid()));
@ -207,33 +199,35 @@ int main(int argc, char * * argv)
assert(!(st.st_mode & (S_IWGRP | S_IWOTH))); assert(!(st.st_mode & (S_IWGRP | S_IWOTH)));
// Read the path of the real (wrapped) program from <self>.real. // Read the path of the real (wrapped) program from <self>.real.
char realFN[PATH_MAX + 10]; char real_fn[PATH_MAX + 10];
int realFNSize = snprintf (realFN, sizeof(realFN), "%s.real", selfPath); int real_fn_size = snprintf(real_fn, sizeof(real_fn), "%s.real", self_path);
assert (realFNSize < sizeof(realFN)); assert(real_fn_size < sizeof(real_fn));
int fdSelf = open(realFN, O_RDONLY); int fd_self = open(real_fn, O_RDONLY);
assert (fdSelf != -1); assert(fd_self != -1);
char sourceProg[PATH_MAX]; char source_prog[PATH_MAX];
len = read(fdSelf, sourceProg, PATH_MAX); len = read(fd_self, source_prog, PATH_MAX);
assert (len != -1); assert(len != -1);
assert (len < sizeof(sourceProg)); assert(len < sizeof(source_prog));
assert (len > 0); assert(len > 0);
sourceProg[len] = 0; source_prog[len] = 0;
close(fdSelf); close(fd_self);
// Read the capabilities set on the wrapper and raise them in to // Read the capabilities set on the wrapper and raise them in to
// the Ambient set so the program we're wrapping receives the // the ambient set so the program we're wrapping receives the
// capabilities too! // capabilities too!
make_caps_ambient(selfPath); if (make_caps_ambient(self_path) != 0) {
free(self_path);
return 1;
}
free(self_path);
execve(sourceProg, argv, environ); execve(source_prog, argv, environ);
fprintf(stderr, "%s: cannot run `%s': %s\n", fprintf(stderr, "%s: cannot run `%s': %s\n",
argv[0], sourceProg, strerror(errno)); argv[0], source_prog, strerror(errno));
exit(1); return 1;
} }

View file

@ -0,0 +1,21 @@
{ stdenv, linuxHeaders, parentWrapperDir, debug ? false }:
# For testing:
# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
stdenv.mkDerivation {
name = "security-wrapper";
buildInputs = [ linuxHeaders ];
dontUnpack = true;
hardeningEnable = [ "pie" ];
CFLAGS = [
''-DWRAPPER_DIR="${parentWrapperDir}"''
] ++ (if debug then [
"-Werror" "-Og" "-g"
] else [
"-Wall" "-O2"
]);
dontStrip = debug;
installPhase = ''
mkdir -p $out/bin
$CC $CFLAGS ${./wrapper.c} -o $out/bin/security-wrapper
'';
}

View file

@ -33,6 +33,7 @@ in {
}; };
configurationDir = mkOption { configurationDir = mkOption {
default = "${activemq}/conf"; default = "${activemq}/conf";
type = types.str;
description = '' description = ''
The base directory for ActiveMQ's configuration. The base directory for ActiveMQ's configuration.
By default, this directory is searched for a file named activemq.xml, By default, this directory is searched for a file named activemq.xml,

View file

@ -32,7 +32,7 @@ in
enableOSSEmulation = mkOption { enableOSSEmulation = mkOption {
type = types.bool; type = types.bool;
default = true; default = false;
description = '' description = ''
Whether to enable ALSA OSS emulation (with certain cards sound mixing may not work!). Whether to enable ALSA OSS emulation (with certain cards sound mixing may not work!).
''; '';

View file

@ -74,7 +74,7 @@ in {
musicDirectory = mkOption { musicDirectory = mkOption {
type = with types; either path (strMatching "(http|https|nfs|smb)://.+"); type = with types; either path (strMatching "(http|https|nfs|smb)://.+");
default = "${cfg.dataDir}/music"; default = "${cfg.dataDir}/music";
defaultText = ''''${dataDir}/music''; defaultText = "\${dataDir}/music";
description = '' description = ''
The directory or NFS/SMB network share where MPD reads music from. If left The directory or NFS/SMB network share where MPD reads music from. If left
as the default value this directory will automatically be created before as the default value this directory will automatically be created before
@ -86,7 +86,7 @@ in {
playlistDirectory = mkOption { playlistDirectory = mkOption {
type = types.path; type = types.path;
default = "${cfg.dataDir}/playlists"; default = "${cfg.dataDir}/playlists";
defaultText = ''''${dataDir}/playlists''; defaultText = "\${dataDir}/playlists";
description = '' description = ''
The directory where MPD stores playlists. If left as the default value The directory where MPD stores playlists. If left as the default value
this directory will automatically be created before the MPD server starts, this directory will automatically be created before the MPD server starts,
@ -155,7 +155,7 @@ in {
dbFile = mkOption { dbFile = mkOption {
type = types.nullOr types.str; type = types.nullOr types.str;
default = "${cfg.dataDir}/tag_cache"; default = "${cfg.dataDir}/tag_cache";
defaultText = ''''${dataDir}/tag_cache''; defaultText = "\${dataDir}/tag_cache";
description = '' description = ''
The path to MPD's database. If set to <literal>null</literal> the The path to MPD's database. If set to <literal>null</literal> the
parameter is omitted from the configuration. parameter is omitted from the configuration.

View file

@ -1,5 +1,6 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
# TODO: test configuration when building nixexpr (use -t parameter) # TODO: test configuration when building nixexpr (use -t parameter)
# TODO: support sqlite3 (it's deprecate?) and mysql # TODO: support sqlite3 (it's deprecate?) and mysql
@ -111,6 +112,7 @@ let
{ {
options = { options = {
password = mkOption { password = mkOption {
type = types.str;
# TODO: required? # TODO: required?
description = '' description = ''
Specifies the password that must be supplied for the default Bacula Specifies the password that must be supplied for the default Bacula
@ -130,6 +132,7 @@ let
}; };
monitor = mkOption { monitor = mkOption {
type = types.enum [ "no" "yes" ];
default = "no"; default = "no";
example = "yes"; example = "yes";
description = '' description = ''
@ -150,6 +153,7 @@ let
{ {
options = { options = {
changerDevice = mkOption { changerDevice = mkOption {
type = types.str;
description = '' description = ''
The specified name-string must be the generic SCSI device name of the The specified name-string must be the generic SCSI device name of the
autochanger that corresponds to the normal read/write Archive Device autochanger that corresponds to the normal read/write Archive Device
@ -168,6 +172,7 @@ let
}; };
changerCommand = mkOption { changerCommand = mkOption {
type = types.str;
description = '' description = ''
The name-string specifies an external program to be called that will The name-string specifies an external program to be called that will
automatically change volumes as required by Bacula. Normally, this automatically change volumes as required by Bacula. Normally, this
@ -190,12 +195,13 @@ let
}; };
devices = mkOption { devices = mkOption {
description = '' description = "";
''; type = types.listOf types.str;
}; };
extraAutochangerConfig = mkOption { extraAutochangerConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Autochanger directive. Extra configuration to be passed in Autochanger directive.
''; '';
@ -212,6 +218,7 @@ let
options = { options = {
archiveDevice = mkOption { archiveDevice = mkOption {
# TODO: required? # TODO: required?
type = types.str;
description = '' description = ''
The specified name-string gives the system file name of the storage The specified name-string gives the system file name of the storage
device managed by this storage daemon. This will usually be the device managed by this storage daemon. This will usually be the
@ -228,6 +235,7 @@ let
mediaType = mkOption { mediaType = mkOption {
# TODO: required? # TODO: required?
type = types.str;
description = '' description = ''
The specified name-string names the type of media supported by this The specified name-string names the type of media supported by this
device, for example, <literal>DLT7000</literal>. Media type names are device, for example, <literal>DLT7000</literal>. Media type names are
@ -265,6 +273,7 @@ let
extraDeviceConfig = mkOption { extraDeviceConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Device directive. Extra configuration to be passed in Device directive.
''; '';
@ -293,6 +302,7 @@ in {
name = mkOption { name = mkOption {
default = "${config.networking.hostName}-fd"; default = "${config.networking.hostName}-fd";
type = types.str;
description = '' description = ''
The client name that must be used by the Director when connecting. The client name that must be used by the Director when connecting.
Generally, it is a good idea to use a name related to the machine so Generally, it is a good idea to use a name related to the machine so
@ -321,6 +331,7 @@ in {
extraClientConfig = mkOption { extraClientConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Client directive. Extra configuration to be passed in Client directive.
''; '';
@ -332,6 +343,7 @@ in {
extraMessagesConfig = mkOption { extraMessagesConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Messages directive. Extra configuration to be passed in Messages directive.
''; '';
@ -352,6 +364,7 @@ in {
name = mkOption { name = mkOption {
default = "${config.networking.hostName}-sd"; default = "${config.networking.hostName}-sd";
type = types.str;
description = '' description = ''
Specifies the Name of the Storage daemon. Specifies the Name of the Storage daemon.
''; '';
@ -392,6 +405,7 @@ in {
extraStorageConfig = mkOption { extraStorageConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Storage directive. Extra configuration to be passed in Storage directive.
''; '';
@ -403,6 +417,7 @@ in {
extraMessagesConfig = mkOption { extraMessagesConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Messages directive. Extra configuration to be passed in Messages directive.
''; '';
@ -424,6 +439,7 @@ in {
name = mkOption { name = mkOption {
default = "${config.networking.hostName}-dir"; default = "${config.networking.hostName}-dir";
type = types.str;
description = '' description = ''
The director name used by the system administrator. This directive is The director name used by the system administrator. This directive is
required. required.
@ -445,6 +461,7 @@ in {
password = mkOption { password = mkOption {
# TODO: required? # TODO: required?
type = types.str;
description = '' description = ''
Specifies the password that must be supplied for a Director. Specifies the password that must be supplied for a Director.
''; '';
@ -452,6 +469,7 @@ in {
extraMessagesConfig = mkOption { extraMessagesConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Messages directive. Extra configuration to be passed in Messages directive.
''; '';
@ -462,6 +480,7 @@ in {
extraDirectorConfig = mkOption { extraDirectorConfig = mkOption {
default = ""; default = "";
type = types.lines;
description = '' description = ''
Extra configuration to be passed in Director directive. Extra configuration to be passed in Director directive.
''; '';

View file

@ -48,6 +48,7 @@ in
}; };
user = mkOption { user = mkOption {
type = types.str;
default = defaultUser; default = defaultUser;
description = '' description = ''
User to be used to perform backup. User to be used to perform backup.
@ -56,12 +57,14 @@ in
databases = mkOption { databases = mkOption {
default = []; default = [];
type = types.listOf types.str;
description = '' description = ''
List of database names to dump. List of database names to dump.
''; '';
}; };
location = mkOption { location = mkOption {
type = types.path;
default = "/var/backup/mysql"; default = "/var/backup/mysql";
description = '' description = ''
Location to put the gzipped MySQL database dumps. Location to put the gzipped MySQL database dumps.
@ -70,6 +73,7 @@ in
singleTransaction = mkOption { singleTransaction = mkOption {
default = false; default = false;
type = types.bool;
description = '' description = ''
Whether to create database dump in a single transaction Whether to create database dump in a single transaction
''; '';

View file

@ -48,6 +48,7 @@ in {
startAt = mkOption { startAt = mkOption {
default = "*-*-* 01:15:00"; default = "*-*-* 01:15:00";
type = types.str;
description = '' description = ''
This option defines (see <literal>systemd.time</literal> for format) when the This option defines (see <literal>systemd.time</literal> for format) when the
databases should be dumped. databases should be dumped.
@ -70,6 +71,7 @@ in {
databases = mkOption { databases = mkOption {
default = []; default = [];
type = types.listOf types.str;
description = '' description = ''
List of database names to dump. List of database names to dump.
''; '';
@ -77,6 +79,7 @@ in {
location = mkOption { location = mkOption {
default = "/var/backup/postgresql"; default = "/var/backup/postgresql";
type = types.path;
description = '' description = ''
Location to put the gzipped PostgreSQL database dumps. Location to put the gzipped PostgreSQL database dumps.
''; '';

View file

@ -243,9 +243,11 @@ in
restartIfChanged = false; restartIfChanged = false;
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
ExecStart = [ "${resticCmd} backup ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ] ++ pruneCmd; ExecStart = [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ] ++ pruneCmd;
User = backup.user; User = backup.user;
RuntimeDirectory = "restic-backups-${name}"; RuntimeDirectory = "restic-backups-${name}";
CacheDirectory = "restic-backups-${name}";
CacheDirectoryMode = "0700";
} // optionalAttrs (backup.s3CredentialsFile != null) { } // optionalAttrs (backup.s3CredentialsFile != null) {
EnvironmentFile = backup.s3CredentialsFile; EnvironmentFile = backup.s3CredentialsFile;
}; };

View file

@ -354,7 +354,7 @@ in
script = let script = let
tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"''; tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
lastArchive = ''$(${tarsnap} --list-archives | sort | tail -1)''; lastArchive = "$(${tarsnap} --list-archives | sort | tail -1)";
run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}''; run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}'';
in if (cfg.cachedir != null) then '' in if (cfg.cachedir != null) then ''

Some files were not shown because too many files have changed in this diff Show more