Project import generated by Copybara.

GitOrigin-RevId: 536fe36e23ab0fc8b7f35c24603422eee9fc17a2
This commit is contained in:
Default email 2021-02-05 18:12:51 +01:00
parent f55f861e17
commit ae91cbe6cc
10137 changed files with 93303 additions and 73740 deletions

View file

@ -47,27 +47,17 @@ indent_style = space
insert_final_newline = unset
trim_trailing_whitespace = unset
[*.{key,ovpn}]
[*.{asc,key,ovpn}]
insert_final_newline = unset
end_of_line = unset
trim_trailing_whitespace = unset
[*.lock]
indent_size = unset
[deps.nix]
insert_final_newline = unset
[pkgs/tools/networking/dd-agent/*-deps.nix]
insert_final_newline = unset
[eggs.nix]
trim_trailing_whitespace = unset
[gemset.nix]
insert_final_newline = unset
[node-{composition,packages,packages-generated}.nix]
insert_final_newline = unset
[nixos/modules/services/networking/ircd-hybrid/*.{conf,in}]
trim_trailing_whitespace = unset
@ -92,15 +82,6 @@ insert_final_newline = unset
indent_style = unset
trim_trailing_whitespace = unset
[pkgs/development/mobile/androidenv/generated/{addons,packages}.nix]
trim_trailing_whitespace = unset
[pkgs/development/node-packages/composition.nix]
insert_final_newline = unset
[pkgs/development/{perl-modules,ocaml-modules,tools/ocaml}/**]
indent_style = unset
[pkgs/servers/dict/wordnet_structures.py]
trim_trailing_whitespace = unset

View file

@ -76,6 +76,7 @@
/pkgs/development/interpreters/python @FRidh
/pkgs/development/python-modules @FRidh @jonringer
/doc/languages-frameworks/python.section.md @FRidh
/pkgs/development/tools/poetry2nix @adisbladis
# Haskell
/pkgs/development/compilers/ghc @cdepillabout

View file

@ -37,7 +37,7 @@ under the terms of [COPYING](../COPYING), which is an MIT-like license.
* Not start with the package name.
* Not have a period at the end.
* `meta.license` must be set and fit the upstream license.
* If there is no upstream license, `meta.license` should default to `stdenv.lib.licenses.unfree`.
* If there is no upstream license, `meta.license` should default to `lib.licenses.unfree`.
* `meta.maintainers` must be set.
See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes) and on how to [submit changes to nixpkgs](https://nixos.org/nixpkgs/manual/#chap-submitting-changes).

View file

@ -16,5 +16,5 @@ jobs:
-X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token $GITHUB_TOKEN" \
-d '{"state": "failure", "target_url": " ", "description": "This failed status will be cleared when ofborg finishes eval.", "context": "Wait for ofborg"}' \
-d '{"state": "pending", "target_url": " ", "description": "This pending status will be cleared when ofborg starts eval.", "context": "Wait for ofborg"}' \
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.pull_request.head.sha }}"

View file

@ -178,6 +178,12 @@ args.stdenv.mkDerivation (args // {
</programlisting>
</para>
</listitem>
<listitem>
<para>
Arguments should be listed in the order they are used, with the
exception of <varname>lib</varname>, which always goes first.
</para>
</listitem>
<listitem>
<para>
Prefer using the top-level <varname>lib</varname> over its alias

View file

@ -12,7 +12,7 @@ xlink:href="https://github.com/NixOS/nixpkgs/tree/master/doc">doc</filename> sub
<screen>
<prompt>$ </prompt>cd /path/to/nixpkgs/doc
<prompt>$ </prompt>nix-shell
<prompt>[nix-shell]$ </prompt>make
<prompt>[nix-shell]$ </prompt>make $makeFlags
</screen>
<para>
If you experience problems, run <command>make debug</command> to help understand the docbook errors.

View file

@ -1711,4 +1711,43 @@ recursiveUpdate
</example>
</section>
<section xml:id="function-library-lib.attrsets.cartesianProductOfSets">
<title><function>lib.attrsets.cartesianProductOfSets</function></title>
<subtitle><literal>cartesianProductOfSets :: AttrSet -> [ AttrSet ]</literal>
</subtitle>
<xi:include href="./locations.xml" xpointer="lib.attrsets.cartesianProductOfSets" />
<para>
Return the cartesian product of attribute set value combinations.
</para>
<variablelist>
<varlistentry>
<term>
<varname>set</varname>
</term>
<listitem>
<para>
An attribute set with attributes that carry lists of values.
</para>
</listitem>
</varlistentry>
</variablelist>
<example xml:id="function-library-lib.attrsets.cartesianProductOfSets-example">
<title>Creating the cartesian product of a list of attribute values</title>
<programlisting><![CDATA[
cartesianProductOfSets { a = [ 1 2 ]; b = [ 10 20 ]; }
=> [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
{ a = 2; b = 10; }
{ a = 2; b = 20; }
]
]]></programlisting>
</example>
</section>
</section>

View file

@ -46,7 +46,7 @@ depend: standard-library
More information can be found in the [official Agda documentation on library management](https://agda.readthedocs.io/en/v2.6.1/tools/package-system.html).
## Compiling Agda
Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee` is made available to the Agda program via the `--with-compiler` flag.
Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee754` is made available to the Agda program via the `--with-compiler` flag.
This can be overridden by a different version of `ghc` as follows:
```

View file

@ -42,8 +42,8 @@ It also takes other standard `mkDerivation` attributes, they are added as such,
Here is a simple package example. It is a pure Coq library, thus it depends on Coq. It builds on the Mathematical Components library, thus it also takes some `mathcomp` derivations as `extraBuildInputs`.
```nix
{ coq, mkCoqDerivation, mathcomp, mathcomp-finmap, mathcomp-bigenough,
lib, version ? null }:
{ lib, mkCoqDerivation, version ? null
, coq, mathcomp, mathcomp-finmap, mathcomp-bigenough }:
with lib; mkCoqDerivation {
/* namePrefix leads to e.g. `name = coq8.11-mathcomp1.11-multinomials-1.5.2` */
namePrefix = [ "coq" "mathcomp" ];

View file

@ -60,7 +60,7 @@ See the `zlib` example:
stdenv = pkgs.emscriptenStdenv;
}).overrideDerivation
(old: rec {
buildInputs = old.buildInputs ++ [ pkgconfig ];
buildInputs = old.buildInputs ++ [ pkg-config ];
# we need to reset this setting!
NIX_CFLAGS_COMPILE="";
configurePhase = ''
@ -117,8 +117,8 @@ This `xmlmirror` example features a emscriptenPackage which is defined completel
xmlmirror = pkgs.buildEmscriptenPackage rec {
name = "xmlmirror";
buildInputs = [ pkgconfig autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ];
nativeBuildInputs = [ pkgconfig zlib ];
buildInputs = [ pkg-config autoconf automake libtool gnumake libxml2 nodejs openjdk json_c ];
nativeBuildInputs = [ pkg-config zlib ];
src = pkgs.fetchgit {
url = "https://gitlab.com/odfplugfest/xmlmirror.git";

View file

@ -69,11 +69,11 @@ prelude
As an example of how a Nix expression for an Idris package can be created, here is the one for `idrisPackages.yaml`:
```nix
{ build-idris-package
{ lib
, build-idris-package
, fetchFromGitHub
, contrib
, lightyear
, lib
}:
build-idris-package {
name = "yaml";
@ -94,11 +94,11 @@ build-idris-package {
sha256 = "1g4pi0swmg214kndj85hj50ccmckni7piprsxfdzdfhg87s0avw7";
};
meta = {
meta = with lib; {
description = "Idris YAML lib";
homepage = "https://github.com/Heather/Idris.Yaml";
license = lib.licenses.mit;
maintainers = [ lib.maintainers.brainrape ];
license = licenses.mit;
maintainers = [ maintainers.brainrape ];
};
}
```

View file

@ -116,7 +116,7 @@ The first step will be to build the Maven project as a fixed-output derivation i
> Traditionally the Maven repository is at `~/.m2/repository`. We will override this to be the `$out` directory.
```nix
{ stdenv, lib, maven }:
{ lib, stdenv, maven }:
stdenv.mkDerivation {
name = "maven-repository";
buildInputs = [ maven ];
@ -168,7 +168,7 @@ If your package uses _SNAPSHOT_ dependencies or _version ranges_; there is a str
Regardless of which strategy is chosen above, the step to build the derivation is the same.
```nix
{ stdenv, lib, maven, callPackage }:
{ stdenv, maven, callPackage }:
# pick a repository derivation, here we will use buildMaven
let repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec {
@ -222,7 +222,7 @@ We will read the Maven repository and flatten it to a single list. This list wil
We make sure to provide this classpath to the `makeWrapper`.
```nix
{ stdenv, lib, maven, callPackage, makeWrapper, jre }:
{ stdenv, maven, callPackage, makeWrapper, jre }:
let
repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec {
@ -298,7 +298,7 @@ Main-Class: Main
We will modify the derivation above to add a symlink to our repository so that it's accessible to our JAR during the `installPhase`.
```nix
{ stdenv, lib, maven, callPackage, makeWrapper, jre }:
{ stdenv, maven, callPackage, makeWrapper, jre }:
# pick a repository derivation, here we will use buildMaven
let repository = callPackage ./build-maven-repository.nix { };
in stdenv.mkDerivation rec {

View file

@ -32,11 +32,11 @@ buildDunePackage rec {
propagatedBuildInputs = [ bigstringaf result ];
doCheck = true;
meta = {
meta = with lib; {
homepage = "https://github.com/inhabitedtype/angstrom";
description = "OCaml parser combinators built for speed and memory efficiency";
license = lib.licenses.bsd3;
maintainers = with lib.maintainers; [ sternenseemann ];
license = licenses.bsd3;
maintainers = with maintainers; [ sternenseemann ];
};
}
```

View file

@ -110,7 +110,7 @@ ClassC3Componentised = buildPerlPackage rec {
On Darwin, if a script has too many `-Idir` flags in its first line (its “shebang line”), it will not run. This can be worked around by calling the `shortenPerlShebang` function from the `postInstall` phase:
```nix
{ stdenv, lib, buildPerlPackage, fetchurl, shortenPerlShebang }:
{ lib, stdenv, buildPerlPackage, fetchurl, shortenPerlShebang }:
ImageExifTool = buildPerlPackage {
pname = "Image-ExifTool";

View file

@ -610,6 +610,10 @@ Using the example above, the analagous pytestCheckHook usage would be:
"download"
"update"
];
disabledTestFiles = [
"tests/test_failing.py"
];
```
This is expecially useful when tests need to be conditionallydisabled,

View file

@ -8,7 +8,7 @@ There are primarily two problems which the Qt infrastructure is designed to addr
```{=docbook}
<programlisting>
{ mkDerivation, lib, qtbase }: <co xml:id='qt-default-nix-co-1' />
{ mkDerivation, qtbase }: <co xml:id='qt-default-nix-co-1' />
mkDerivation { <co xml:id='qt-default-nix-co-2' />
pname = "myapp";
@ -92,32 +92,43 @@ mkDerivation {
}
```
## Adding a library to Nixpkgs
Add a Qt library to all-packages.nix by adding it to the collection inside `mkLibsForQt5`. This ensures that the library is built with every available version of Qt as needed.
### Example Adding a Qt library to all-packages.nix {#qt-library-all-packages-nix}
Qt libraries are added to `qt5-packages.nix` and are made available for every Qt
version supported.
### Example adding a Qt library {#qt-library-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
```
{
# ...
mkLibsForQt5 = self: with self; {
# ...
mylib = callPackage ../path/to/mylib {};
};
# ...
}
```
## Adding an application to Nixpkgs
Add a Qt application to *all-packages.nix* using `libsForQt5.callPackage` instead of the usual `callPackage`. The former ensures that all dependencies are built with the same version of Qt.
Applications that use Qt are also added to `qt5-packages.nix`. An alias is added
in the top-level `all-packages.nix` pointing to the package with the desired Qt5 version.
### Example Adding a QT application to all-packages.nix {#qt-application-all-packages-nix}
```nix
### Example adding a Qt application {#qt-application-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
```
{
# ...
myapp = libsForQt5.callPackage ../path/to/myapp/ {};
myapp = callPackage ../path/to/myapp {};
# ...
}
```
The following represents the contents of `all-packages.nix`.
```
{
# ...
myapp = libsForQt5.myapp;
# ...
}

View file

@ -32,14 +32,12 @@ However, if you'd like to add a file to your project source to make the
environment available for other contributors, you can create a `default.nix`
file like so:
```nix
let
pkgs = import <nixpkgs> {};
stdenv = pkgs.stdenv;
in with pkgs; {
with import <nixpkgs> {};
{
myProject = stdenv.mkDerivation {
name = "myProject";
version = "1";
src = if pkgs.lib.inNixShell then null else nix;
src = if lib.inNixShell then null else nix;
buildInputs = with rPackages; [
R

View file

@ -232,7 +232,7 @@ If you want to package a specific version, you can use the standard Gemfile synt
Now you can also also make a `default.nix` that looks like this:
```nix
{ lib, bundlerApp }:
{ bundlerApp }:
bundlerApp {
pname = "mdl";

View file

@ -19,6 +19,8 @@ or use Mozilla's [Rust nightlies overlay](#using-the-rust-nightlies-overlay).
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
```
{ lib, rustPlatform }:
rustPlatform.buildRustPackage rec {
pname = "ripgrep";
version = "12.1.1";
@ -226,8 +228,6 @@ source code in a reproducible way. If it is missing or out-of-date one can use
the `cargoPatches` attribute to update or add it.
```
{ lib, rustPlatform, fetchFromGitHub }:
rustPlatform.buildRustPackage rec {
(...)
cargoPatches = [
@ -263,7 +263,7 @@ Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }:
{ stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
@ -292,7 +292,7 @@ following nix file:
```
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
{ lib, stdenv, buildRustCrate, fetchgit }:
{ stdenv, buildRustCrate, fetchgit }:
let kernel = stdenv.buildPlatform.parsed.kernel.name;
# ... (content skipped)
in
@ -480,7 +480,7 @@ stdenv.mkDerivation {
rustc cargo
# Example Build-time Additional Dependencies
pkgconfig
pkg-config
];
buildInputs = [
# Example Run-time Additional Dependencies
@ -522,7 +522,7 @@ stdenv.mkDerivation {
latest.rustChannels.nightly.rust
# Add some extra dependencies from `pkgs`
pkgconfig openssl
pkg-config openssl
];
# Set Environment Variables
@ -567,12 +567,13 @@ in the `~/.config/nixpkgs/overlays` directory.
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
```
nixpkgs = {
{ pkgs ? import <nixpkgs> {
overlays = [
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
# Further overlays go here
];
};
};
```
Note that this will fetch the latest overlay version when rebuilding your system.

View file

@ -1,5 +1,3 @@
{ pkgs ? import ../. {} }:
(import ./default.nix {}).overrideAttrs (x: {
buildInputs = x.buildInputs ++ [ pkgs.xmloscopy pkgs.ruby ];
})
{ pkgs ? import ../. { } }:
(import ./default.nix { }).overrideAttrs
(x: { buildInputs = (x.buildInputs or [ ]) ++ [ pkgs.xmloscopy pkgs.ruby ]; })

View file

@ -291,5 +291,40 @@ stdenv.mkDerivation {
}
</programlisting>
</section>
<section xml:id="sec-overlays-alternatives-mpi">
<title>Switching the MPI implementation</title>
<para>
All programs that are built with
<link xlink:href="https://en.wikipedia.org/wiki/Message_Passing_Interface">MPI</link>
support use the generic attribute <varname>mpi</varname>
as an input. At the moment Nixpkgs natively provides two different
MPI implementations:
<itemizedlist>
<listitem>
<para>
<link xlink:href="https://www.open-mpi.org/">Open MPI</link>
(default), attribute name <varname>openmpi</varname>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.mpich.org/">MPICH</link>,
attribute name <varname>mpich</varname>
</para>
</listitem>
</itemizedlist>
</para>
<para>
To provide MPI enabled applications that use <literal>MPICH</literal>, instead
of the default <literal>Open MPI</literal>, simply use the following overlay:
</para>
<programlisting>
self: super:
{
mpi = self.mpich;
}
</programlisting>
</section>
</section>
</chapter>

View file

@ -183,6 +183,24 @@ rec {
else
[];
/* Return the cartesian product of attribute set value combinations.
Example:
cartesianProductOfSets { a = [ 1 2 ]; b = [ 10 20 ]; }
=> [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
{ a = 2; b = 10; }
{ a = 2; b = 20; }
]
*/
cartesianProductOfSets = attrsOfLists:
lib.foldl' (listOfAttrs: attrName:
concatMap (attrs:
map (listValue: attrs // { ${attrName} = listValue; }) attrsOfLists.${attrName}
) listOfAttrs
) [{}] (attrNames attrsOfLists);
/* Utility function that creates a {name, value} pair as expected by
builtins.listToAttrs.
@ -493,5 +511,4 @@ rec {
zipWithNames = zipAttrsWithNames;
zip = builtins.trace
"lib.zip is deprecated, use lib.zipAttrsWith instead" zipAttrsWith;
}

View file

@ -148,6 +148,28 @@ rec {
/* A combination of `traceVal` and `traceSeqN`. */
traceValSeqN = traceValSeqNFn id;
/* Trace the input and output of a function `f` named `name`,
both down to `depth`.
This is useful for adding around a function call,
to see the before/after of values as they are transformed.
Example:
traceFnSeqN 2 "id" (x: x) { a.b.c = 3; }
trace: { fn = "id"; from = { a.b = {}; }; to = { a.b = {}; }; }
=> { a.b.c = 3; }
*/
traceFnSeqN = depth: name: f: v:
let res = f v;
in lib.traceSeqN
(depth + 1)
{
fn = name;
from = v;
to = res;
}
res;
# -- TESTING --

View file

@ -78,7 +78,7 @@ let
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
recursiveUpdate matchAttrs overrideExisting getOutput getBin
getLib getDev getMan chooseDevOutputs zipWithNames zip
recurseIntoAttrs dontRecurseIntoAttrs;
recurseIntoAttrs dontRecurseIntoAttrs cartesianProductOfSets;
inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1
concatMap flatten remove findSingle findFirst any all count
optional optionals toList range partition zipListsWith zipLists
@ -130,7 +130,7 @@ let
assertMsg assertOneOf;
inherit (self.debug) addErrorContextToAttrs traceIf traceVal traceValFn
traceXMLVal traceXMLValMarked traceSeq traceSeqN traceValSeq
traceValSeqFn traceValSeqN traceValSeqNFn traceShowVal
traceValSeqFn traceValSeqN traceValSeqNFn traceFnSeqN traceShowVal
traceShowValMarked showVal traceCall traceCall2 traceCall3
traceValIfNot runTests testAllTrue traceCallXml attrNamesToStr;
inherit (self.misc) maybeEnv defaultMergeArg defaultMerge foldArgs

View file

@ -87,7 +87,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
beerware = spdx {
spdxId = "Beerware";
fullName = ''Beerware License'';
fullName = "Beerware License";
};
blueOak100 = spdx {
@ -100,6 +100,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
fullName = "BSD Zero Clause License";
};
bsd1 = spdx {
spdxId = "BSD-1-Clause";
fullName = "BSD 1-Clause License";
};
bsd2 = spdx {
spdxId = "BSD-2-Clause";
fullName = ''BSD 2-clause "Simplified" License'';
@ -107,7 +112,7 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
bsd2Patent = spdx {
spdxId = "BSD-2-Clause-Patent";
fullName = ''BSD-2-Clause Plus Patent License'';
fullName = "BSD-2-Clause Plus Patent License";
};
bsd3 = spdx {

View file

@ -629,7 +629,9 @@ rec {
crossLists (x:y: "${toString x}${toString y}") [[1 2] [3 4]]
=> [ "13" "14" "23" "24" ]
*/
crossLists = f: foldl (fs: args: concatMap (f: map f args) fs) [f];
crossLists = builtins.trace
"lib.crossLists is deprecated, use lib.cartesianProductOfSets instead"
(f: foldl (fs: args: concatMap (f: map f args) fs) [f]);
/* Remove duplicate elements from the list. O(n^2) complexity.

View file

@ -895,7 +895,7 @@ rec {
fromOpt = getAttrFromPath from options;
toOf = attrByPath to
(abort "Renaming error: option `${showOption to}' does not exist.");
toType = let opt = attrByPath to {} options; in opt.type or null;
toType = let opt = attrByPath to {} options; in opt.type or (types.submodule {});
in
{
options = setAttrByPath from (mkOption {

View file

@ -1,7 +1,7 @@
{ lib }:
rec {
# platform.gcc.arch to its features (as in /proc/cpuinfo)
# gcc.arch to its features (as in /proc/cpuinfo)
features = {
default = [ ];
# x86_64 Intel

View file

@ -24,8 +24,6 @@ rec {
# Either of these can be losslessly-extracted from `parsed` iff parsing succeeds.
system = parse.doubleFromSystem final.parsed;
config = parse.tripleFromSystem final.parsed;
# Just a guess, based on `system`
platform = platforms.select final;
# Determine whether we are compatible with the provided CPU
isCompatible = platform: parse.isCompatible final.parsed.cpu platform.parsed.cpu;
# Derived meta-data
@ -79,12 +77,23 @@ rec {
};
isStatic = final.isWasm || final.isRedox;
kernelArch =
# Just a guess, based on `system`
inherit
({
linux-kernel = args.linux-kernel or {};
gcc = args.gcc or {};
rustc = args.rust or {};
} // platforms.select final)
linux-kernel gcc rustc;
linuxArch =
if final.isAarch32 then "arm"
else if final.isAarch64 then "arm64"
else if final.isx86_32 then "x86"
else if final.isx86_64 then "x86"
else if final.isx86_32 then "i386"
else if final.isx86_64 then "x86_64"
else if final.isMips then "mips"
else if final.isPower then "powerpc"
else if final.isRiscV then "riscv"
else final.parsed.cpu.name;
qemuArch =
@ -129,7 +138,7 @@ rec {
else throw "Don't know how to run ${final.config} executables.";
} // mapAttrs (n: v: v final.parsed) inspect.predicates
// mapAttrs (n: v: v final.platform.gcc.arch or "default") architectures.predicates
// mapAttrs (n: v: v final.gcc.arch or "default") architectures.predicates
// args;
in assert final.useAndroidPrebuilt -> final.isAndroid;
assert lib.foldl

View file

@ -24,6 +24,7 @@ let
"x86_64-redox"
"powerpc64-linux"
"powerpc64le-linux"
"riscv32-linux" "riscv64-linux"
@ -72,7 +73,7 @@ in {
darwin = filterDoubles predicates.isDarwin;
freebsd = filterDoubles predicates.isFreeBSD;
# Should be better, but MinGW is unclear.
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; });
gnu = filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnu; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabi; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.gnueabihf; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.elfv1; }) ++ filterDoubles (matchAttrs { kernel = parse.kernels.linux; abi = parse.abis.elfv2; });
illumos = filterDoubles predicates.isSunOS;
linux = filterDoubles predicates.isLinux;
netbsd = filterDoubles predicates.isNetBSD;
@ -85,5 +86,5 @@ in {
embedded = filterDoubles predicates.isNone;
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64le-linux"];
mesaPlatforms = ["i686-linux" "x86_64-linux" "x86_64-darwin" "armv5tel-linux" "armv6l-linux" "armv7l-linux" "armv7a-linux" "aarch64-linux" "powerpc64-linux" "powerpc64le-linux"];
}

View file

@ -7,7 +7,6 @@ let
riscv = bits: {
config = "riscv${bits}-unknown-linux-gnu";
platform = platforms.riscv-multiplatform;
};
in
@ -17,84 +16,81 @@ rec {
#
powernv = {
config = "powerpc64le-unknown-linux-gnu";
platform = platforms.powernv;
};
musl-power = {
config = "powerpc64le-unknown-linux-musl";
platform = platforms.powernv;
};
ppc64-elfv1 = {
config = "powerpc64-unknown-linux-elfv1";
};
ppc64-elfv2 = {
config = "powerpc64-unknown-linux-elfv2";
};
ppc64 = ppc64-elfv2; # default to modern elfv2
ppc64-musl = {
config = "powerpc64-unknown-linux-musl";
gcc = { abi = "elfv2"; }; # for gcc configuration
};
sheevaplug = {
config = "armv5tel-unknown-linux-gnueabi";
platform = platforms.sheevaplug;
};
} // platforms.sheevaplug;
raspberryPi = {
config = "armv6l-unknown-linux-gnueabihf";
platform = platforms.raspberrypi;
};
} // platforms.raspberrypi;
remarkable1 = {
config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.zero-gravitas;
};
} // platforms.zero-gravitas;
remarkable2 = {
config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.zero-sugar;
};
} // platforms.zero-sugar;
armv7l-hf-multiplatform = {
config = "armv7l-unknown-linux-gnueabihf";
platform = platforms.armv7l-hf-multiplatform;
};
aarch64-multiplatform = {
config = "aarch64-unknown-linux-gnu";
platform = platforms.aarch64-multiplatform;
};
armv7a-android-prebuilt = {
config = "armv7a-unknown-linux-androideabi";
sdkVer = "29";
ndkVer = "21";
platform = platforms.armv7a-android;
useAndroidPrebuilt = true;
};
} // platforms.armv7a-android;
aarch64-android-prebuilt = {
config = "aarch64-unknown-linux-android";
sdkVer = "29";
ndkVer = "21";
platform = platforms.aarch64-multiplatform;
useAndroidPrebuilt = true;
};
scaleway-c1 = armv7l-hf-multiplatform // rec {
platform = platforms.scaleway-c1;
inherit (platform.gcc) fpu;
};
scaleway-c1 = armv7l-hf-multiplatform // platforms.scaleway-c1;
pogoplug4 = {
config = "armv5tel-unknown-linux-gnueabi";
platform = platforms.pogoplug4;
};
} // platforms.pogoplug4;
ben-nanonote = {
config = "mipsel-unknown-linux-uclibc";
platform = platforms.ben_nanonote;
};
} // platforms.ben_nanonote;
fuloongminipc = {
config = "mipsel-unknown-linux-gnu";
platform = platforms.fuloong2f_n32;
};
} // platforms.fuloong2f_n32;
muslpi = raspberryPi // {
config = "armv6l-unknown-linux-musleabihf";
};
aarch64-multiplatform-musl = aarch64-multiplatform // {
aarch64-multiplatform-musl = {
config = "aarch64-unknown-linux-musl";
};
@ -110,13 +106,11 @@ rec {
riscv64-embedded = {
config = "riscv64-none-elf";
libc = "newlib";
platform = platforms.riscv-multiplatform;
};
riscv32-embedded = {
config = "riscv32-none-elf";
libc = "newlib";
platform = platforms.riscv-multiplatform;
};
mmix = {
@ -136,13 +130,11 @@ rec {
vc4 = {
config = "vc4-elf";
libc = "newlib";
platform = {};
};
or1k = {
config = "or1k-elf";
libc = "newlib";
platform = {};
};
arm-embedded = {
@ -152,6 +144,12 @@ rec {
armhf-embedded = {
config = "arm-none-eabihf";
libc = "newlib";
# GCC8+ does not build without this
# (https://www.mail-archive.com/gcc-bugs@gcc.gnu.org/msg552339.html):
gcc = {
arch = "armv5t";
fpu = "vfp";
};
};
aarch64-embedded = {
@ -200,41 +198,37 @@ rec {
iphone64 = {
config = "aarch64-apple-ios";
# config = "aarch64-apple-darwin14";
sdkVer = "13.2";
xcodeVer = "11.3.1";
sdkVer = "14.3";
xcodeVer = "12.3";
xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true;
platform = {};
};
iphone32 = {
config = "armv7a-apple-ios";
# config = "arm-apple-darwin10";
sdkVer = "13.2";
xcodeVer = "11.3.1";
sdkVer = "14.3";
xcodeVer = "12.3";
xcodePlatform = "iPhoneOS";
useiOSPrebuilt = true;
platform = {};
};
iphone64-simulator = {
config = "x86_64-apple-ios";
# config = "x86_64-apple-darwin14";
sdkVer = "13.2";
xcodeVer = "11.3.1";
sdkVer = "14.3";
xcodeVer = "12.3";
xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true;
platform = {};
};
iphone32-simulator = {
config = "i686-apple-ios";
# config = "i386-apple-darwin11";
sdkVer = "13.2";
xcodeVer = "11.3.1";
sdkVer = "14.3";
xcodeVer = "12.3";
xcodePlatform = "iPhoneSimulator";
useiOSPrebuilt = true;
platform = {};
};
#
@ -245,7 +239,6 @@ rec {
mingw32 = {
config = "i686-w64-mingw32";
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
};
# 64 bit mingw-w64
@ -253,7 +246,6 @@ rec {
# That's the triplet they use in the mingw-w64 docs.
config = "x86_64-w64-mingw32";
libc = "msvcrt"; # This distinguishes the mingw (non posix) toolchain
platform = {};
};
# BSDs
@ -275,6 +267,5 @@ rec {
# Ghcjs
ghcjs = {
config = "js-unknown-ghcjs";
platform = {};
};
}

View file

@ -337,10 +337,18 @@ rec {
The "gnu" ABI is ambiguous on 32-bit ARM. Use "gnueabi" or "gnueabihf" instead.
'';
}
{ assertion = platform: platform.system != "powerpc64-linux";
message = ''
The "gnu" ABI is ambiguous on big-endian 64-bit PPC. Use "elfv1" or "elfv2" instead.
'';
}
];
};
gnuabi64 = { abi = "64"; };
elfv1 = { abi = "elfv1"; };
elfv2 = { abi = "elfv2"; };
musleabi = { float = "soft"; };
musleabihf = { float = "hard"; };
musl = {};
@ -444,6 +452,7 @@ rec {
if lib.versionAtLeast (parsed.cpu.version or "0") "6"
then abis.gnueabihf
else abis.gnueabi
else if cpu == "powerpc64" then abis.elfv2
else abis.gnu
else abis.unknown;
};

View file

@ -1,58 +1,49 @@
{ lib }:
rec {
pcBase = {
pc = {
linux-kernel = {
name = "pc";
kernelBaseConfig = "defconfig";
baseConfig = "defconfig";
# Build whatever possible as a module, if not stated in the extra config.
kernelAutoModules = true;
kernelTarget = "bzImage";
autoModules = true;
target = "bzImage";
};
};
pc64 = pcBase // { kernelArch = "x86_64"; };
pc32 = pcBase // { kernelArch = "i386"; };
pc32_simplekernel = pc32 // {
kernelAutoModules = false;
};
pc64_simplekernel = pc64 // {
kernelAutoModules = false;
pc_simplekernel = lib.recursiveUpdate pc {
linux-kernel.autoModules = false;
};
powernv = {
linux-kernel = {
name = "PowerNV";
kernelArch = "powerpc";
kernelBaseConfig = "powernv_defconfig";
kernelTarget = "zImage";
kernelInstallTarget = "install";
kernelFile = "vmlinux";
kernelAutoModules = true;
baseConfig = "powernv_defconfig";
target = "zImage";
installTarget = "install";
file = "vmlinux";
autoModules = true;
# avoid driver/FS trouble arising from unusual page size
kernelExtraConfig = ''
extraConfig = ''
PPC_64K_PAGES n
PPC_4K_PAGES y
IPV6 y
'';
};
};
##
## ARM
##
pogoplug4 = {
linux-kernel = {
name = "pogoplug4";
gcc = {
arch = "armv5te";
};
kernelMajor = "2.6";
kernelBaseConfig = "multi_v5_defconfig";
kernelArch = "arm";
kernelAutoModules = false;
kernelExtraConfig =
''
baseConfig = "multi_v5_defconfig";
autoModules = false;
extraConfig = ''
# Ubi for the mtd
MTD_UBI y
UBIFS_FS y
@ -62,19 +53,23 @@ rec {
UBIFS_FS_ZLIB y
UBIFS_FS_DEBUG n
'';
kernelMakeFlags = [ "LOADADDR=0x8000" ];
kernelTarget = "uImage";
makeFlags = [ "LOADADDR=0x8000" ];
target = "uImage";
# TODO reenable once manual-config's config actually builds a .dtb and this is checked to be working
#kernelDTB = true;
#DTB = true;
};
gcc = {
arch = "armv5te";
};
};
sheevaplug = {
linux-kernel = {
name = "sheevaplug";
kernelMajor = "2.6";
kernelBaseConfig = "multi_v5_defconfig";
kernelArch = "arm";
kernelAutoModules = false;
kernelExtraConfig = ''
baseConfig = "multi_v5_defconfig";
autoModules = false;
extraConfig = ''
BLK_DEV_RAM y
BLK_DEV_INITRD y
BLK_DEV_CRYPTOLOOP m
@ -172,28 +167,30 @@ rec {
KGDB_SERIAL_CONSOLE y
KGDB_KDB y
'';
kernelMakeFlags = [ "LOADADDR=0x0200000" ];
kernelTarget = "uImage";
kernelDTB = true; # Beyond 3.10
makeFlags = [ "LOADADDR=0x0200000" ];
target = "uImage";
DTB = true; # Beyond 3.10
};
gcc = {
arch = "armv5te";
};
};
raspberrypi = {
linux-kernel = {
name = "raspberrypi";
kernelMajor = "2.6";
kernelBaseConfig = "bcm2835_defconfig";
kernelDTB = true;
kernelArch = "arm";
kernelAutoModules = true;
kernelPreferBuiltin = true;
kernelExtraConfig = ''
baseConfig = "bcm2835_defconfig";
DTB = true;
autoModules = true;
preferBuiltin = true;
extraConfig = ''
# Disable OABI to have seccomp_filter (required for systemd)
# https://github.com/raspberrypi/firmware/issues/651
OABI_COMPAT n
'';
kernelTarget = "zImage";
target = "zImage";
};
gcc = {
arch = "armv6";
fpu = "vfp";
@ -204,13 +201,15 @@ rec {
raspberrypi2 = armv7l-hf-multiplatform;
zero-gravitas = {
linux-kernel = {
name = "zero-gravitas";
kernelBaseConfig = "zero-gravitas_defconfig";
kernelArch = "arm";
# kernelTarget verified by checking /boot on reMarkable 1 device
kernelTarget = "zImage";
kernelAutoModules = false;
kernelDTB = true;
baseConfig = "zero-gravitas_defconfig";
# Target verified by checking /boot on reMarkable 1 device
target = "zImage";
autoModules = false;
DTB = true;
};
gcc = {
fpu = "neon";
cpu = "cortex-a9";
@ -218,13 +217,15 @@ rec {
};
zero-sugar = {
linux-kernel = {
name = "zero-sugar";
kernelBaseConfig = "zero-sugar_defconfig";
kernelArch = "arm";
kernelDTB = true;
kernelAutoModules = false;
kernelPreferBuiltin = true;
kernelTarget = "zImage";
baseConfig = "zero-sugar_defconfig";
DTB = true;
autoModules = false;
preferBuiltin = true;
target = "zImage";
};
gcc = {
cpu = "cortex-a7";
fpu = "neon-vfpv4";
@ -232,7 +233,7 @@ rec {
};
};
scaleway-c1 = armv7l-hf-multiplatform // {
scaleway-c1 = lib.recursiveUpdate armv7l-hf-multiplatform {
gcc = {
cpu = "cortex-a9";
fpu = "vfpv3";
@ -240,13 +241,11 @@ rec {
};
utilite = {
linux-kernel = {
name = "utilite";
kernelMajor = "2.6";
kernelBaseConfig = "multi_v7_defconfig";
kernelArch = "arm";
kernelAutoModules = false;
kernelExtraConfig =
''
maseConfig = "multi_v7_defconfig";
autoModules = false;
extraConfig = ''
# Ubi for the mtd
MTD_UBI y
UBIFS_FS y
@ -256,35 +255,37 @@ rec {
UBIFS_FS_ZLIB y
UBIFS_FS_DEBUG n
'';
kernelMakeFlags = [ "LOADADDR=0x10800000" ];
kernelTarget = "uImage";
kernelDTB = true;
makeFlags = [ "LOADADDR=0x10800000" ];
target = "uImage";
DTB = true;
};
gcc = {
cpu = "cortex-a9";
fpu = "neon";
};
};
guruplug = sheevaplug // {
guruplug = lib.recursiveUpdate sheevaplug {
# Define `CONFIG_MACH_GURUPLUG' (see
# <http://kerneltrap.org/mailarchive/git-commits-head/2010/5/19/33618>)
# and other GuruPlug-specific things. Requires the `guruplug-defconfig'
# patch.
kernelBaseConfig = "guruplug_defconfig";
linux-kernel.baseConfig = "guruplug_defconfig";
};
beaglebone = armv7l-hf-multiplatform // {
beaglebone = lib.recursiveUpdate armv7l-hf-multiplatform {
linux-kernel = {
name = "beaglebone";
kernelBaseConfig = "bb.org_defconfig";
kernelAutoModules = false;
kernelExtraConfig = ""; # TBD kernel config
kernelTarget = "zImage";
baseConfig = "bb.org_defconfig";
autoModules = false;
extraConfig = ""; # TBD kernel config
target = "zImage";
};
};
# https://developer.android.com/ndk/guides/abis#v7a
armv7a-android = {
name = "armeabi-v7a";
linux-kernel.name = "armeabi-v7a";
gcc = {
arch = "armv7-a";
float-abi = "softfp";
@ -293,15 +294,15 @@ rec {
};
armv7l-hf-multiplatform = {
linux-kernel = {
name = "armv7l-hf-multiplatform";
kernelMajor = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc.
kernelBaseConfig = "multi_v7_defconfig";
kernelArch = "arm";
kernelDTB = true;
kernelAutoModules = true;
kernelPreferBuiltin = true;
kernelTarget = "zImage";
kernelExtraConfig = ''
Major = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc.
baseConfig = "multi_v7_defconfig";
DTB = true;
autoModules = true;
PreferBuiltin = true;
target = "zImage";
extraConfig = ''
# Serial port for Raspberry Pi 3. Upstream forgot to add it to the ARMv7 defconfig.
SERIAL_8250_BCM2835AUX y
SERIAL_8250_EXTENDED y
@ -317,6 +318,7 @@ rec {
# https://github.com/raspberrypi/firmware/issues/651
OABI_COMPAT n
'';
};
gcc = {
# Some table about fpu flags:
# http://community.arm.com/servlet/JiveServlet/showImage/38-1981-3827/blogentry-103749-004812900+1365712953_thumb.png
@ -341,15 +343,14 @@ rec {
};
aarch64-multiplatform = {
linux-kernel = {
name = "aarch64-multiplatform";
kernelMajor = "2.6"; # Using "2.6" enables 2.6 kernel syscalls in glibc.
kernelBaseConfig = "defconfig";
kernelArch = "arm64";
kernelDTB = true;
kernelAutoModules = true;
kernelPreferBuiltin = true;
kernelExtraConfig = ''
# Raspberry Pi 3 stuff. Not needed for kernels >= 4.10.
baseConfig = "defconfig";
DTB = true;
autoModules = true;
preferBuiltin = true;
extraConfig = ''
# Raspberry Pi 3 stuff. Not needed for s >= 4.10.
ARCH_BCM2835 y
BCM2835_MBOX y
BCM2835_WDT y
@ -369,7 +370,8 @@ rec {
# which our initrd builder can't currently do easily.
USB_XHCI_TEGRA m
'';
kernelTarget = "Image";
target = "Image";
};
gcc = {
arch = "armv8-a";
};
@ -380,9 +382,9 @@ rec {
##
ben_nanonote = {
linux-kernel = {
name = "ben_nanonote";
kernelMajor = "2.6";
kernelArch = "mips";
};
gcc = {
arch = "mips32";
float = "soft";
@ -390,12 +392,11 @@ rec {
};
fuloong2f_n32 = {
linux-kernel = {
name = "fuloong2f_n32";
kernelMajor = "2.6";
kernelBaseConfig = "lemote2f_defconfig";
kernelArch = "mips";
kernelAutoModules = false;
kernelExtraConfig = ''
baseConfig = "lemote2f_defconfig";
autoModules = false;
extraConfig = ''
MIGRATION n
COMPACTION n
@ -459,7 +460,8 @@ rec {
# The kernel doesn't boot at all, with FTRACE
FTRACE n
'';
kernelTarget = "vmlinux";
target = "vmlinux";
};
gcc = {
arch = "loongson2f";
float = "hard";
@ -472,34 +474,36 @@ rec {
##
riscv-multiplatform = {
linux-kernel = {
name = "riscv-multiplatform";
kernelArch = "riscv";
kernelTarget = "vmlinux";
kernelAutoModules = true;
kernelBaseConfig = "defconfig";
kernelExtraConfig = ''
target = "vmlinux";
autoModules = true;
baseConfig = "defconfig";
extraConfig = ''
FTRACE n
SERIAL_OF_PLATFORM y
'';
};
};
select = platform:
# x86
/**/ if platform.isx86_32 then pc32
else if platform.isx86_64 then pc64
/**/ if platform.isx86 then pc
# ARM
else if platform.isAarch32 then let
version = platform.parsed.cpu.version or null;
in if version == null then pcBase
in if version == null then pc
else if lib.versionOlder version "6" then sheevaplug
else if lib.versionOlder version "7" then raspberrypi
else armv7l-hf-multiplatform
else if platform.isAarch64 then aarch64-multiplatform
else if platform.isRiscV then riscv-multiplatform
else if platform.parsed.cpu == lib.systems.parse.cpuTypes.mipsel then fuloong2f_n32
else if platform.parsed.cpu == lib.systems.parse.cpuTypes.powerpc64le then powernv
else pcBase;
else pc;
}

View file

@ -660,4 +660,71 @@ runTests {
expected = [ [ "foo" ] [ "foo" "<name>" "bar" ] [ "foo" "bar" ] ];
};
testCartesianProductOfEmptySet = {
expr = cartesianProductOfSets {};
expected = [ {} ];
};
testCartesianProductOfOneSet = {
expr = cartesianProductOfSets { a = [ 1 2 3 ]; };
expected = [ { a = 1; } { a = 2; } { a = 3; } ];
};
testCartesianProductOfTwoSets = {
expr = cartesianProductOfSets { a = [ 1 ]; b = [ 10 20 ]; };
expected = [
{ a = 1; b = 10; }
{ a = 1; b = 20; }
];
};
testCartesianProductOfTwoSetsWithOneEmpty = {
expr = cartesianProductOfSets { a = [ ]; b = [ 10 20 ]; };
expected = [ ];
};
testCartesianProductOfThreeSets = {
expr = cartesianProductOfSets {
a = [ 1 2 3 ];
b = [ 10 20 30 ];
c = [ 100 200 300 ];
};
expected = [
{ a = 1; b = 10; c = 100; }
{ a = 1; b = 10; c = 200; }
{ a = 1; b = 10; c = 300; }
{ a = 1; b = 20; c = 100; }
{ a = 1; b = 20; c = 200; }
{ a = 1; b = 20; c = 300; }
{ a = 1; b = 30; c = 100; }
{ a = 1; b = 30; c = 200; }
{ a = 1; b = 30; c = 300; }
{ a = 2; b = 10; c = 100; }
{ a = 2; b = 10; c = 200; }
{ a = 2; b = 10; c = 300; }
{ a = 2; b = 20; c = 100; }
{ a = 2; b = 20; c = 200; }
{ a = 2; b = 20; c = 300; }
{ a = 2; b = 30; c = 100; }
{ a = 2; b = 30; c = 200; }
{ a = 2; b = 30; c = 300; }
{ a = 3; b = 10; c = 100; }
{ a = 3; b = 10; c = 200; }
{ a = 3; b = 10; c = 300; }
{ a = 3; b = 20; c = 100; }
{ a = 3; b = 20; c = 200; }
{ a = 3; b = 20; c = 300; }
{ a = 3; b = 30; c = 100; }
{ a = 3; b = 30; c = 200; }
{ a = 3; b = 30; c = 300; }
];
};
}

View file

@ -262,6 +262,13 @@ checkConfigOutput true config.value.mkbefore ./types-anything/mk-mods.nix
checkConfigOutput 1 config.value.nested.foo ./types-anything/mk-mods.nix
checkConfigOutput baz config.value.nested.bar.baz ./types-anything/mk-mods.nix
## types.functionTo
checkConfigOutput "input is input" config.result ./functionTo/trivial.nix
checkConfigOutput "a b" config.result ./functionTo/merging-list.nix
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
checkConfigOutput "b a" config.result ./functionTo/list-order.nix
checkConfigOutput "a c" config.result ./functionTo/merging-attrs.nix
cat <<EOF
====== module tests ======
$pass Pass

View file

@ -0,0 +1,25 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.listOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (config.fun {
a = "a";
b = "b";
c = "c";
});
};
};
config.fun = lib.mkMerge [
(input: lib.mkAfter [ input.a ])
(input: [ input.b ])
];
}

View file

@ -0,0 +1,27 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.attrsOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (lib.attrValues (config.fun {
a = "a";
b = "b";
c = "c";
}));
};
};
config.fun = lib.mkMerge [
(input: { inherit (input) a; })
(input: { inherit (input) b; })
(input: {
b = lib.mkForce input.c;
})
];
}

View file

@ -0,0 +1,24 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo (types.listOf types.str);
};
result = lib.mkOption {
type = types.str;
default = toString (config.fun {
a = "a";
b = "b";
c = "c";
});
};
};
config.fun = lib.mkMerge [
(input: [ input.a ])
(input: [ input.b ])
];
}

View file

@ -0,0 +1,17 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo types.str;
};
result = lib.mkOption {
type = types.str;
default = config.fun "input";
};
};
config.fun = input: "input is ${input}";
}

View file

@ -0,0 +1,18 @@
{ lib, config, ... }:
let
inherit (lib) types;
in {
options = {
fun = lib.mkOption {
type = types.functionTo types.str;
};
result = lib.mkOption {
type = types.str;
default = config.fun 0;
};
};
config.fun = input: input + 1;
}

View file

@ -28,7 +28,7 @@ with lib.systems.doubles; lib.runTests {
testredox = mseteq redox [ "x86_64-redox" ];
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
testillumos = mseteq illumos [ "x86_64-solaris" ];
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64le-linux" ];
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" ];
testnetbsd = mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ];
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];

View file

@ -453,6 +453,17 @@ rec {
functor = (defaultFunctor name) // { wrapped = elemType; };
};
functionTo = elemType: mkOptionType {
name = "functionTo";
description = "function that evaluates to a(n) ${elemType.name}";
check = isFunction;
merge = loc: defs:
fnArgs: (mergeDefinitions (loc ++ [ "[function body]" ]) elemType (map (fn: { inherit (fn) file; value = fn.value fnArgs; }) defs)).mergedValue;
getSubOptions = elemType.getSubOptions;
getSubModules = elemType.getSubModules;
substSubModules = m: functionTo (elemType.substSubModules m);
};
# A submodule (like typed attribute set). See NixOS manual.
submodule = modules: submoduleWith {
shorthandOnlyDefinesConfig = true;

View file

@ -868,6 +868,12 @@
githubId = 706854;
name = "Etienne Laurin";
};
attila-lendvai = {
name = "Attila Lendvai";
email = "attila@lendvai.name";
github = "attila-lendvai";
githubId = 840345;
};
auntie = {
email = "auntieNeo@gmail.com";
github = "auntieNeo";
@ -970,6 +976,12 @@
email = "sivaraman.balaji@gmail.com";
name = "Balaji Sivaraman";
};
baloo = {
email = "nixpkgs@superbaloo.net";
github = "baloo";
githubId = 59060;
name = "Arthur Gautier";
};
balsoft = {
email = "balsoft75@gmail.com";
github = "balsoft";
@ -1030,6 +1042,12 @@
githubId = 1015044;
name = "Brandon Carrell";
};
bcc32 = {
email = "me@bcc32.com";
github = "bcc32";
githubId = 1239097;
name = "Aaron Zeng";
};
bcdarwin = {
email = "bcdarwin@gmail.com";
github = "bcdarwin";
@ -1078,6 +1096,12 @@
githubId = 75972;
name = "Ben Booth";
};
berberman = {
email = "berberman@yandex.com";
github = "berberman";
githubId = 26041945;
name = "Potato Hatsue";
};
berce = {
email = "bert.moens@gmail.com";
github = "berce";
@ -1577,6 +1601,12 @@
githubId = 33503784;
name = "Yucheng Zhang";
};
cheriimoya = {
email = "github@hausch.xyz";
github = "cheriimoya";
githubId = 28303440;
name = "Max Hausch";
};
chessai = {
email = "chessai1996@gmail.com";
github = "chessai";
@ -1693,16 +1723,6 @@
githubId = 46303707;
name = "Christian Lütke-Stetzkamp";
};
kampka = {
email = "christian@kampka.net";
github = "kampka";
githubId = 422412;
name = "Christian Kampka";
keys = [{
longkeyid = "ed25519/0x1CBE9645DD68E915";
fingerprint = "F7FA 0BD0 8775 337C F6AB 4A14 1CBE 9645 DD68 E915";
}];
};
ckauhaus = {
email = "kc@flyingcircus.io";
github = "ckauhaus";
@ -2031,6 +2051,12 @@
githubId = 23366017;
name = "Dan Haraj";
};
danielbarter = {
email = "danielbarter@gmail.com";
github = "danielbarter";
githubId = 8081722;
name = "Daniel Barter";
};
danieldk = {
email = "me@danieldk.eu";
github = "danieldk";
@ -2575,6 +2601,12 @@
githubId = 119483;
name = "Matthew Brown";
};
eduardosm = {
email = "esm@eduardosm.net";
github = "eduardosm";
githubId = 761151;
name = "Eduardo Sánchez Muñoz";
};
eduarrrd = {
email = "e.bachmakov@gmail.com";
github = "eduarrrd";
@ -3127,6 +3159,12 @@
githubId = 92793;
name = "Friedrich von Never";
};
fortuneteller2k = {
email = "lythe1107@gmail.com";
github = "fortuneteller2k";
githubId = 20619776;
name = "fortuneteller2k";
};
fpletz = {
email = "fpletz@fnordicwalking.de";
github = "fpletz";
@ -3453,6 +3491,12 @@
fingerprint = "7FC7 98AB 390E 1646 ED4D 8F1F 797F 6238 68CD 00C2";
}];
};
greizgh = {
email = "greizgh@ephax.org";
github = "greizgh";
githubId = 1313624;
name = "greizgh";
};
greydot = {
email = "lanablack@amok.cc";
github = "greydot";
@ -3637,6 +3681,12 @@
githubId = 3656888;
name = "hhm";
};
higebu = {
name = "Yuya Kusakabe";
email = "yuya.kusakabe@gmail.com";
github = "higebu";
githubId = 733288;
};
hinton = {
email = "t@larkery.com";
name = "Tom Hinton";
@ -3679,18 +3729,42 @@
fingerprint = "78C2 E81C 828A 420B 269A EBC1 49FA 39F8 A7F7 35F9";
}];
};
humancalico = {
email = "humancalico@disroot.org";
github = "humancalico";
githubId = 51334444;
name = "Akshat Agarwal";
};
hodapp = {
email = "hodapp87@gmail.com";
github = "Hodapp87";
githubId = 896431;
name = "Chris Hodapp";
};
holymonson = {
email = "holymonson@gmail.com";
github = "holymonson";
githubId = 902012;
name = "Monson Shao";
};
hongchangwu = {
email = "wuhc85@gmail.com";
github = "hongchangwu";
githubId = 362833;
name = "Hongchang Wu";
};
hoverbear = {
email = "operator+nix@hoverbear.org";
github = "hoverbear";
githubId = 130903;
name = "Ana Hobden";
};
holgerpeters = {
name = "Holger Peters";
email = "holger.peters@posteo.de";
github = "HolgerPeters";
githubId = 4097049;
};
hrdinka = {
email = "c.nix@hrdinka.at";
github = "hrdinka";
@ -3879,6 +3953,12 @@
githubId = 4458;
name = "Ivan Kozik";
};
ivan-babrou = {
email = "nixpkgs@ivan.computer";
name = "Ivan Babrou";
github = "bobrik";
githubId = 89186;
};
ivan-timokhin = {
email = "nixpkgs@ivan.timokhin.name";
name = "Ivan Timokhin";
@ -4023,6 +4103,12 @@
githubId = 45598;
name = "William Casarin";
};
jbcrail = {
name = "Joseph Crail";
email = "jbcrail@gmail.com";
github = "jbcrail";
githubId = 6038;
};
jbedo = {
email = "cu@cua0.org";
github = "jbedo";
@ -4815,6 +4901,12 @@
github = "kmein";
githubId = 10352507;
};
kmicklas = {
email = "maintainer@kmicklas.com";
name = "Ken Micklas";
github = "kmicklas";
githubId = 929096;
};
knairda = {
email = "adrian@kummerlaender.eu";
name = "Adrian Kummerlaender";
@ -4899,6 +4991,12 @@
githubId = 4032;
name = "Kristoffer Thømt Ravneberg";
};
kritnich = {
email = "kritnich@kritni.ch";
github = "Kritnich";
githubId = 22116767;
name = "Kritnich";
};
kroell = {
email = "nixosmainter@makroell.de";
github = "rokk4";
@ -4958,6 +5056,10 @@
github = "kyleondy";
githubId = 1640900;
name = "Kyle Ondy";
keys = [{
longkeyid = "rsa4096/0xDB0E3C33491F91C9";
fingerprint = "3C79 9D26 057B 64E6 D907 B0AC DB0E 3C33 491F 91C9";
}];
};
kylesferrazza = {
name = "Kyle Sferrazza";
@ -4971,6 +5073,16 @@
fingerprint = "5A9A 1C9B 2369 8049 3B48 CF5B 81A1 5409 4816 2372";
}];
};
l-as = {
email = "las@protonmail.ch";
github = "L-as";
githubId = 22075344;
keys = [{
longkeyid = "rsa2048/0xAC458A7D1087D025";
fingerprint = "A093 EA17 F450 D4D1 60A0 1194 AC45 8A7D 1087 D025";
}];
name = "Las Safin";
};
laikq = {
email = "gwen@quasebarth.de";
github = "laikq";
@ -5110,12 +5222,24 @@
githubId = 42153076;
name = "Alexey Nikashkin";
};
lesuisse = {
email = "thomas@gerbet.me";
github = "LeSuisse";
githubId = 737767;
name = "Thomas Gerbet";
};
lethalman = {
email = "lucabru@src.gnome.org";
github = "lethalman";
githubId = 480920;
name = "Luca Bruno";
};
leungbk = {
email = "leungbk@mailfence.com";
github = "leungbk";
githubId = 29217594;
name = "Brian Leung";
};
lewo = {
email = "lewo@abesis.fr";
github = "nlewo";
@ -5150,6 +5274,12 @@
githubId = 307589;
name = "Nathaniel Baxter";
};
liamdiprose = {
email = "liam@liamdiprose.com";
github = "liamdiprose";
githubId = 1769386;
name = "Liam Diprose";
};
liff = {
email = "liff@iki.fi";
github = "liff";
@ -5432,6 +5562,12 @@
githubId = 2057309;
name = "Sergey Sofeychuk";
};
lxea = {
email = "nix@amk.ie";
github = "lxea";
githubId = 7910815;
name = "Alex McGrath";
};
lynty = {
email = "ltdong93+nix@gmail.com";
github = "lynty";
@ -5564,6 +5700,12 @@
email = "markus@wotringer.de";
name = "Markus Wotringer";
};
marijanp = {
name = "Marijan Petričević";
email = "marijan.petricevic94@gmail.com";
github = "marijanp";
githubId = 13599169;
};
marius851000 = {
email = "mariusdavid@laposte.net";
name = "Marius David";
@ -5598,6 +5740,12 @@
fingerprint = "B573 5118 0375 A872 FBBF 7770 B629 036B E399 EEE9";
}];
};
mausch = {
email = "mauricioscheffer@gmail.com";
github = "mausch";
githubId = 95194;
name = "Mauricio Scheffer";
};
matejc = {
email = "cotman.matej@gmail.com";
github = "matejc";
@ -6587,6 +6735,12 @@
githubId = 148037;
name = "Joachim Breitner";
};
nomisiv = {
email = "simon@nomisiv.com";
github = "NomisIV";
githubId = 47303199;
name = "Simon Gutgesell";
};
noneucat = {
email = "andy@lolc.at";
github = "noneucat";
@ -6663,6 +6817,12 @@
githubId = 7677321;
name = "Paul Trehiou";
};
nyanotech = {
name = "nyanotech";
email = "nyanotechnology@gmail.com";
github = "nyanotech";
githubId = 33802077;
};
nyarly = {
email = "nyarly@gmail.com";
github = "nyarly";
@ -7101,6 +7261,16 @@
fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E";
}];
};
pinpox = {
email = "mail@pablo.tools";
github = "pinpox";
githubId = 1719781;
name = "Pablo Ovelleiro Corral";
keys = [{
longkeyid = "sa4096/0x823A6154426408D3";
fingerprint = "D03B 218C AE77 1F77 D7F9 20D9 823A 6154 4264 08D3";
}];
};
piotr = {
email = "ppietrasa@gmail.com";
name = "Piotr Pietraszkiewicz";
@ -7147,6 +7317,12 @@
githubId = 13000278;
name = "Maksim Bronsky";
};
PlushBeaver = {
name = "Dmitry Kozlyuk";
email = "dmitry.kozliuk+nixpkgs@gmail.com";
github = "PlushBeaver";
githubId = 8988269;
};
pmahoney = {
email = "pat@polycrystal.org";
github = "pmahoney";
@ -7597,6 +7773,12 @@
githubId = 42433779;
name = "Rémy Grünblatt";
};
rguevara84 = {
email = "fuzztkd@gmail.com";
github = "rguevara84";
githubId = 12279531;
name = "Ricardo Guevara";
};
rht = {
email = "rhtbot@protonmail.com";
github = "rht";
@ -8277,6 +8459,12 @@
githubId = 997855;
name = "Narazaki Shuji";
};
shofius = {
name = "Sam Hofius";
email = "sam@samhofi.us";
github = "kf5grd";
githubId = 18297490;
};
shou = {
email = "x+g@shou.io";
github = "Shou";
@ -8515,6 +8703,12 @@
githubId = 7669898;
name = "Katharina Fey";
};
spease = {
email = "peasteven@gmail.com";
github = "spease";
githubId = 2825204;
name = "Steven Pease";
};
spencerjanssen = {
email = "spencerjanssen@gmail.com";
github = "spencerjanssen";
@ -8545,6 +8739,12 @@
githubId = 36899624;
name = "squalus";
};
srapenne = {
email = "solene@perso.pw";
github = "rapenne-s";
githubId = 248016;
name = "Solène Rapenne";
};
srghma = {
email = "srghma@gmail.com";
github = "srghma";
@ -8669,6 +8869,12 @@
githubId = 1315818;
name = "Felix Bühler";
};
stupremee = {
email = "jutus.k@protonmail.com";
github = "Stupremee";
githubId = 39732259;
name = "Justus K";
};
suhr = {
email = "suhr@i2pmail.org";
github = "suhr";
@ -8711,6 +8917,12 @@
githubId = 1040871;
name = "Mathis Antony";
};
svend = {
email = "svend@svends.net";
github = "svend";
githubId = 306190;
name = "Svend Sorensen";
};
svrana = {
email = "shaw@vranix.com";
github = "svrana";
@ -9055,6 +9267,12 @@
githubId = 844343;
name = "Thiago K. Okada";
};
thibautmarty = {
email = "github@thibautmarty.fr";
github = "ThibautMarty";
githubId = 3268082;
name = "Thibaut Marty";
};
thmzlt = {
email = "git@thomazleite.com";
github = "thmzlt";
@ -9660,6 +9878,10 @@
email = "oliver.huntuk@gmail.com";
name = "Oliver Hunt";
};
vq = {
email = "vq@erq.se";
name = "Daniel Nilsson";
};
vrthra = {
email = "rahul@gopinath.org";
github = "vrthra";
@ -9766,6 +9988,12 @@
githubId = 43315;
name = "William Roe";
};
wldhx = {
email = "wldhx+nixpkgs@wldhx.me";
github = "wldhx";
githubId = 15619766;
name = "wldhx";
};
wmertens = {
email = "Wout.Mertens@gmail.com";
github = "wmertens";

View file

@ -6,7 +6,7 @@ basexx,,,,,
binaryheap,,,,,vcunat
bit32,,,,lua5_1,lblasc
busted,,,,,
cassowary,,,,,marsam
cassowary,,,,,marsam alerque
cjson,lua-cjson,,,,
compat53,,,,,vcunat
cosmo,,,,,marsam

1 # nix name luarocks name server version luaversion maintainers
6 binaryheap vcunat
7 bit32 lua5_1 lblasc
8 busted
9 cassowary marsam marsam alerque
10 cjson lua-cjson
11 compat53 vcunat
12 cosmo marsam

View file

@ -1,4 +1,4 @@
{ stdenv, makeWrapper, perl, perlPackages }:
{ stdenv, lib, makeWrapper, perl, perlPackages }:
stdenv.mkDerivation {
name = "nixpkgs-lint-1";
@ -15,9 +15,9 @@ stdenv.mkDerivation {
wrapProgram $out/bin/nixpkgs-lint --set PERL5LIB $PERL5LIB
'';
meta = {
maintainers = [ stdenv.lib.maintainers.eelco ];
meta = with lib; {
maintainers = [ maintainers.eelco ];
description = "A utility for Nixpkgs contributors to check Nixpkgs for common errors";
platforms = stdenv.lib.platforms.unix;
platforms = platforms.unix;
};
}

View file

@ -66,7 +66,7 @@ nixpkgs$ ${0} ${GENERATED_NIXFILE}
These packages are manually refined in lua-overrides.nix
*/
{ self, stdenv, fetchurl, fetchgit, pkgs, ... } @ args:
{ self, stdenv, lib, fetchurl, fetchgit, pkgs, ... } @ args:
self: super:
with self;
{

View file

@ -87,7 +87,7 @@ nixpkgs.config.packageOverrides = pkgs:
You can edit the config with this snippet (by default <command>make
menuconfig</command> won't work out of the box on nixos):
<screen><![CDATA[
nix-shell -E 'with import <nixpkgs> {}; kernelToOverride.overrideAttrs (o: {nativeBuildInputs=o.nativeBuildInputs ++ [ pkgconfig ncurses ];})'
nix-shell -E 'with import <nixpkgs> {}; kernelToOverride.overrideAttrs (o: {nativeBuildInputs=o.nativeBuildInputs ++ [ pkg-config ncurses ];})'
]]></screen>
or you can let nixpkgs generate the configuration. Nixpkgs generates it via
answering the interactive kernel utility <command>make config</command>. The

View file

@ -11,8 +11,7 @@
</para>
<para>
It makes virtio modules available on the initrd, sets the system time from
the hardware clock to work around a bug in qemu-kvm, and
<link linkend="opt-security.rngd.enable">enables rngd</link>.
It makes virtio modules available on the initrd and sets the system time from
the hardware clock to work around a bug in qemu-kvm.
</para>
</section>

View file

@ -186,7 +186,7 @@
The driver has many options (see <xref linkend="ch-options"/>). For
instance, the following disables tap-to-click behavior:
<programlisting>
<xref linkend="opt-services.xserver.libinput.tapping"/> = false;
<xref linkend="opt-services.xserver.libinput.touchpad.tapping"/> = false;
</programlisting>
Note: the use of <literal>services.xserver.synaptics</literal> is deprecated
since NixOS 17.09.

View file

@ -1,7 +1,7 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xml:id="chap-contributing">
<title>Contributing to this documentation</title>
<title>Contributing to this manual</title>
<para>
The DocBook sources of NixOS' manual are in the <filename
xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual">

View file

@ -21,7 +21,11 @@
xlink:href="https://discourse.nixos.org">Discourse</literal> or
on the <link
xlink:href="irc://irc.freenode.net/#nixos">
<literal>#nixos</literal> channel on Freenode</link>. Bugs should be
<literal>#nixos</literal> channel on Freenode</link>, or
consider
<link
xlink:href="#chap-contributing">
contributing to this manual</link>. Bugs should be
reported in
<link
xlink:href="https://github.com/NixOS/nixpkgs/issues">NixOS

View file

@ -43,6 +43,15 @@
Linux kernel is updated to branch 5.4 by default (from 4.19).
</para>
</listitem>
<listitem>
<para>
Grub is updated to 2.04, adding support for booting from F2FS filesystems and
Btrfs volumes using zstd compression. Note that some users have been unable
to boot after upgrading to 2.04 - for more information, please see <link
xlink:href="https://github.com/NixOS/nixpkgs/issues/61718#issuecomment-617618503">this
discussion</link>.
</para>
</listitem>
<listitem>
<para>
Postgresql for NixOS service now defaults to v11.

View file

@ -418,6 +418,26 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
SDK licenses if your project requires it. See the androidenv documentation for more details.
</para>
</listitem>
<listitem>
<para>
The attribute <varname>mpi</varname> is now consistently used to
provide a default, system-wide MPI implementation.
The default implementation is openmpi, which has been used before by
all derivations affects by this change.
Note that all packages that have used <varname>mpi ? null</varname> in the input
for optional MPI builds, have been changed to the boolean input paramater
<varname>useMpi</varname> to enable building with MPI.
Building all packages with <varname>mpich</varname> instead
of the default <varname>openmpi</varname> can now be achived like this:
<programlisting>
self: super:
{
mpi = super.mpich;
}
</programlisting>
</para>
</listitem>
<listitem>
<para>
The Searx module has been updated with the ability to configure the
@ -430,6 +450,22 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
dynamically allocated uid.
</para>
</listitem>
<listitem>
<para>
The libinput module has been updated with the ability to configure mouse and touchpad settings separately.
The options in <literal>services.xserver.libinput</literal> have been renamed to <literal>services.xserver.libinput.touchpad</literal>,
while there is a new <literal>services.xserver.libinput.mouse</literal> for mouse related configuration.
</para>
<para>
Since touchpad options no longer apply to all devices, you may want to replicate your touchpad configuration in
mouse section.
</para>
</listitem>
<listitem>
<para>
ALSA OSS emulation (<varname>sound.enableOSSEmulation</varname>) is now disabled by default.
</para>
</listitem>
</itemizedlist>
</section>
@ -441,6 +477,14 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
<title>Other Notable Changes</title>
<itemizedlist>
<listitem>
<para>
<literal>stdenv.lib</literal> has been deprecated and will break
eval in 21.11. Please use <literal>pkgs.lib</literal> instead.
See <link xlink:href="https://github.com/NixOS/nixpkgs/issues/108938">#108938</link>
for details.
</para>
</listitem>
<listitem>
<para>
The Mailman NixOS module (<literal>services.mailman</literal>) has a new
@ -574,6 +618,15 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
been dropped from upstream releases.
</para>
</listitem>
<listitem>
<para>
In the ACME module, the data used to build the hash for the account
directory has changed to accomodate new features to reduce account
rate limit issues. This will trigger new account creation on the first
rebuild following this update. No issues are expected to arise from this,
thanks to the new account creation handling.
</para>
</listitem>
<listitem>
<para>
<xref linkend="opt-users.users._name_.createHome" /> now always ensures home directory permissions to be <literal>0700</literal>.
@ -592,6 +645,33 @@ http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/e
<literal>/etc/netgroup</literal> defines network-wide groups and may affect to setups using NIS.
</para>
</listitem>
<listitem>
<para>
Platforms, like <varname>stdenv.hostPlatform</varname>, no longer have a <varname>platform</varname> attribute.
It has been (mostly) flattoned away:
</para>
<itemizedlist>
<listitem><para><varname>platform.gcc</varname> is now <varname>gcc</varname></para></listitem>
<listitem><para><literal>platform.kernel*</literal> is now <literal>linux-kernel.*</literal></para></listitem>
</itemizedlist>
<para>
Additionally, <varname>platform.kernelArch</varname> moved to the top level as <varname>linuxArch</varname> to match the other <literal>*Arch</literal> variables.
</para>
<para>
The <varname>platform</varname> grouping of these things never meant anything, and was just a historial/implementation artifact that was overdue removal.
</para>
</listitem>
<listitem>
<para>
<varname>services.restic</varname> now uses a dedicated cache directory for every backup defined in <varname>services.restic.backups</varname>. The old global cache directory, <literal>/root/.cache/restic</literal>, is now unused and can be removed to free up disk space.
</para>
</listitem>
<listitem>
<para>
<literal>isync</literal>: The <literal>isync</literal> compatibility wrapper was removed and the Master/Slave
terminology has been deprecated and should be replaced with Far/Near in the configuration file.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -257,7 +257,8 @@ let format' = format; in let
''}
echo "copying staging root to image..."
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* /
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* / ||
(echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
'';
in pkgs.vmTools.runInLinuxVM (
pkgs.runCommand name

View file

@ -83,7 +83,7 @@ in
packages = mkOption {
type = types.listOf types.package;
default = with pkgs.kbdKeymaps; [ dvp neo ];
defaultText = ''with pkgs.kbdKeymaps; [ dvp neo ]'';
defaultText = "with pkgs.kbdKeymaps; [ dvp neo ]";
description = ''
List of additional packages that provide console fonts, keymaps and
other resources for virtual consoles use.

View file

@ -436,7 +436,7 @@ in
useEmbeddedBitmaps = mkOption {
type = types.bool;
default = false;
description = ''Use embedded bitmaps in fonts like Calibri.'';
description = "Use embedded bitmaps in fonts like Calibri.";
};
};

View file

@ -1,11 +1,9 @@
{ config, lib, pkgs, ... }:
with lib;
{
options = {
gnu = mkOption {
type = types.bool;
gnu = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
When enabled, GNU software is chosen by default whenever a there is
@ -15,7 +13,7 @@ with lib;
};
};
config = mkIf config.gnu {
config = lib.mkIf config.gnu {
environment.systemPackages = with pkgs;
# TODO: Adjust `requiredPackages' from `system-path.nix'.
@ -26,7 +24,7 @@ with lib;
nano zile
texinfo # for the stand-alone Info reader
]
++ stdenv.lib.optional (!stdenv.isAarch32) grub2;
++ lib.optional (!stdenv.isAarch32) grub2;
# GNU GRUB, where available.

View file

@ -84,7 +84,7 @@ with lib;
environment.etc."locale.conf".source = pkgs.writeText "locale.conf"
''
LANG=${config.i18n.defaultLocale}
${concatStringsSep "\n" (mapAttrsToList (n: v: ''${n}=${v}'') config.i18n.extraLocaleSettings)}
${concatStringsSep "\n" (mapAttrsToList (n: v: "${n}=${v}") config.i18n.extraLocaleSettings)}
'';
};

View file

@ -58,6 +58,7 @@ in
"2.nixos.pool.ntp.org"
"3.nixos.pool.ntp.org"
];
type = types.listOf types.str;
description = ''
The set of NTP servers from which to synchronise.
'';
@ -194,8 +195,7 @@ in
'';
# /etc/netgroup: Network-wide groups.
netgroup.text = mkDefault ''
'';
netgroup.text = mkDefault "";
# /etc/host.conf: resolver configuration file
"host.conf".text = ''

View file

@ -183,7 +183,7 @@ in {
config = mkOption {
type = types.attrsOf types.unspecified;
default = {};
description = ''Config of the pulse daemon. See <literal>man pulse-daemon.conf</literal>.'';
description = "Config of the pulse daemon. See <literal>man pulse-daemon.conf</literal>.";
example = literalExample ''{ realtime-scheduling = "yes"; }'';
};
};

View file

@ -364,7 +364,7 @@ let
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate user ids'';
description = "Count of subordinate user ids";
};
};
};
@ -381,7 +381,7 @@ let
count = mkOption {
type = types.int;
default = 1;
description = ''Count of subordinate group ids'';
description = "Count of subordinate group ids";
};
};
};

View file

@ -62,7 +62,7 @@ with lib;
services.dbus.packages = packages;
systemd.packages = packages;
environment.variables = {
environment.sessionVariables = {
GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1";
XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals";
};

View file

@ -68,11 +68,11 @@ let
patchShebangs scripts/*
substituteInPlace scripts/Makefile.lib \
--replace 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget))' 'DTC_FLAGS += $(DTC_FLAGS_$(basetarget)) -@'
make ${pkgs.stdenv.hostPlatform.platform.kernelBaseConfig} ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}"
make dtbs ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}"
make ${pkgs.stdenv.hostPlatform.linux-kernel.baseConfig} ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
make dtbs ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
'';
installPhase = ''
make dtbs_install INSTALL_DTBS_PATH=$out/dtbs ARCH="${pkgs.stdenv.hostPlatform.platform.kernelArch}"
make dtbs_install INSTALL_DTBS_PATH=$out/dtbs ARCH="${pkgs.stdenv.hostPlatform.linuxArch}"
'';
};
@ -115,7 +115,7 @@ in
options = {
hardware.deviceTree = {
enable = mkOption {
default = pkgs.stdenv.hostPlatform.platform.kernelDTB or false;
default = pkgs.stdenv.hostPlatform.linux-kernel.DTB or false;
type = types.bool;
description = ''
Build device tree files. These are used to describe the

View file

@ -0,0 +1,31 @@
{ config, lib, pkgs, ... }:
with lib;
let
kernelVersion = config.boot.kernelPackages.kernel.version;
linuxKernelMinVersion = "5.8";
kernelPatch = pkgs.kernelPatches.ath_regd_optional // {
extraConfig = ''
ATH_USER_REGD y
'';
};
in
{
options.networking.wireless.athUserRegulatoryDomain = mkOption {
default = false;
type = types.bool;
description = ''
If enabled, sets the ATH_USER_REGD kernel config switch to true to
disable the enforcement of EEPROM regulatory restrictions for ath
drivers. Requires at least Linux ${linuxKernelMinVersion}.
'';
};
config = mkIf config.networking.wireless.athUserRegulatoryDomain {
assertions = singleton {
assertion = lessThan 0 (builtins.compareVersions kernelVersion linuxKernelMinVersion);
message = "ATH_USER_REGD patch for kernels older than ${linuxKernelMinVersion} not ported yet!";
};
boot.kernelPatches = [ kernelPatch ];
};
}

View file

@ -19,23 +19,9 @@ in
nitrokey-app package, depending on your device and needs.
'';
};
group = mkOption {
type = types.str;
default = "nitrokey";
example = "wheel";
description = ''
Grant access to Nitrokey devices to users in this group.
'';
};
};
config = mkIf cfg.enable {
services.udev.packages = [
(pkgs.nitrokey-udev-rules.override (attrs:
{ inherit (cfg) group; }
))
];
users.groups.${cfg.group} = {};
services.udev.packages = [ pkgs.nitrokey-udev-rules ];
};
}

View file

@ -0,0 +1,81 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkIf mkOption types;
cfg = config.hardware.sensor.hddtemp;
wrapper = pkgs.writeShellScript "hddtemp-wrapper" ''
set -eEuo pipefail
file=/var/lib/hddtemp/hddtemp.db
drives=(${toString (map (e: ''$(realpath ${lib.escapeShellArg e}) '') cfg.drives)})
cp ${pkgs.hddtemp}/share/hddtemp/hddtemp.db $file
${lib.concatMapStringsSep "\n" (e: "echo ${lib.escapeShellArg e} >> $file") cfg.dbEntries}
exec ${pkgs.hddtemp}/bin/hddtemp ${lib.escapeShellArgs cfg.extraArgs} \
--daemon \
--unit=${cfg.unit} \
--file=$file \
''${drives[@]}
'';
in
{
meta.maintainers = with lib.maintainers; [ peterhoeg ];
###### interface
options = {
hardware.sensor.hddtemp = {
enable = mkOption {
description = ''
Enable this option to support HDD/SSD temperature sensors.
'';
type = types.bool;
default = false;
};
drives = mkOption {
description = "List of drives to monitor. If you pass /dev/disk/by-path/* entries the symlinks will be resolved as hddtemp doesn't like names with colons.";
type = types.listOf types.str;
};
unit = mkOption {
description = "Celcius or Fahrenheit";
type = types.enum [ "C" "F" ];
default = "C";
};
dbEntries = mkOption {
description = "Additional DB entries";
type = types.listOf types.str;
default = [ ];
};
extraArgs = mkOption {
description = "Additional arguments passed to the daemon.";
type = types.listOf types.str;
default = [ ];
};
};
};
###### implementation
config = mkIf cfg.enable {
systemd.services.hddtemp = {
description = "HDD/SSD temperature";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
ExecStart = wrapper;
StateDirectory = "hddtemp";
PrivateTmp = true;
ProtectHome = "tmpfs";
ProtectSystem = "strict";
};
};
};
}

View file

@ -40,7 +40,7 @@ in
default = "wheel";
example = "video";
type = types.str;
description = ''Group for bumblebee socket'';
description = "Group for bumblebee socket";
};
connectDisplay = mkOption {

View file

@ -5,36 +5,17 @@
with lib;
let
nvidia_x11 = let
drivers = config.services.xserver.videoDrivers;
# FIXME: should introduce an option like
# hardware.video.nvidia.package for overriding the default NVIDIA
# driver.
nvidiaForKernel = kernelPackages:
if elem "nvidia" drivers then
kernelPackages.nvidia_x11
else if elem "nvidiaBeta" drivers then
kernelPackages.nvidia_x11_beta
else if elem "nvidiaVulkanBeta" drivers then
kernelPackages.nvidia_x11_vulkan_beta
else if elem "nvidiaLegacy304" drivers then
kernelPackages.nvidia_x11_legacy304
else if elem "nvidiaLegacy340" drivers then
kernelPackages.nvidia_x11_legacy340
else if elem "nvidiaLegacy390" drivers then
kernelPackages.nvidia_x11_legacy390
else null;
nvidia_x11 = nvidiaForKernel config.boot.kernelPackages;
nvidia_libs32 =
if versionOlder nvidia_x11.version "391" then
((nvidiaForKernel pkgs.pkgsi686Linux.linuxPackages).override { libsOnly = true; kernel = null; }).out
else
(nvidiaForKernel config.boot.kernelPackages).lib32;
isDeprecated = str: (hasPrefix "nvidia" str) && (str != "nvidia");
hasDeprecated = drivers: any isDeprecated drivers;
in if (hasDeprecated drivers) then
throw ''
Selecting an nvidia driver has been modified for NixOS 19.03. The version is now set using `hardware.nvidia.package`.
''
else if (elem "nvidia" drivers) then cfg.package else null;
enabled = nvidia_x11 != null;
cfg = config.hardware.nvidia;
pCfg = cfg.prime;
@ -63,6 +44,15 @@ in
'';
};
hardware.nvidia.powerManagement.finegrained = mkOption {
type = types.bool;
default = false;
description = ''
Experimental power management of PRIME offload. For more information, see
the NVIDIA docs, chapter 22. PCI-Express runtime power management.
'';
};
hardware.nvidia.modesetting.enable = mkOption {
type = types.bool;
default = false;
@ -96,6 +86,16 @@ in
'';
};
hardware.nvidia.prime.amdgpuBusId = mkOption {
type = types.str;
default = "";
example = "PCI:4:0:0";
description = ''
Bus ID of the AMD APU. You can find it using lspci; for example if lspci
shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
'';
};
hardware.nvidia.prime.sync.enable = mkOption {
type = types.bool;
default = false;
@ -151,9 +151,22 @@ in
GPUs stay awake even during headless mode.
'';
};
hardware.nvidia.package = lib.mkOption {
type = lib.types.package;
default = config.boot.kernelPackages.nvidiaPackages.stable;
defaultText = "config.boot.kernelPackages.nvidiaPackages.stable";
description = ''
The NVIDIA X11 derivation to use.
'';
example = "config.boot.kernelPackages.nvidiaPackages.legacy340";
};
};
config = mkIf enabled {
config = let
igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
in mkIf enabled {
assertions = [
{
assertion = with config.services.xserver.displayManager; gdm.nvidiaWayland -> cfg.modesetting.enable;
@ -161,7 +174,13 @@ in
}
{
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && pCfg.intelBusId != "";
assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
message = ''
You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.
'';
}
{
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
message = ''
When NVIDIA PRIME is enabled, the GPU bus IDs must configured.
'';
@ -174,6 +193,14 @@ in
assertion = !(syncCfg.enable && offloadCfg.enable);
message = "Only one NVIDIA PRIME solution may be used at a time.";
}
{
assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
message = "Sync precludes powering down the NVIDIA GPU.";
}
{
assertion = cfg.powerManagement.enable -> offloadCfg.enable;
message = "Fine-grained power management requires offload to be enabled.";
}
];
# If Optimus/PRIME is enabled, we:
@ -183,18 +210,22 @@ in
# "nvidia" driver, in order to allow the X server to start without any outputs.
# - Add a separate Device section for the Intel GPU, using the "modesetting"
# driver and with the configured BusID.
# - OR add a separate Device section for the AMD APU, using the "amdgpu"
# driver and with the configures BusID.
# - Reference that Device section from the ServerLayout section as an inactive
# device.
# - Configure the display manager to run specific `xrandr` commands which will
# configure/enable displays connected to the Intel GPU.
# configure/enable displays connected to the Intel iGPU / AMD APU.
services.xserver.useGlamor = mkDefault offloadCfg.enable;
services.xserver.drivers = optional primeEnabled {
name = "modesetting";
services.xserver.drivers = let
in optional primeEnabled {
name = igpuDriver;
display = offloadCfg.enable;
modules = optional (igpuDriver == "amdgpu") [ pkgs.xorg.xf86videoamdgpu ];
deviceSection = ''
BusID "${pCfg.intelBusId}"
BusID "${igpuBusId}"
${optionalString syncCfg.enable ''Option "AccelMethod" "none"''}
'';
} ++ singleton {
@ -205,6 +236,7 @@ in
''
BusID "${pCfg.nvidiaBusId}"
${optionalString syncCfg.allowExternalGpu "Option \"AllowExternalGpus\""}
${optionalString cfg.powerManagement.finegrained "Option \"NVreg_DynamicPowerManagement=0x02\""}
'';
screenSection =
''
@ -214,14 +246,14 @@ in
};
services.xserver.serverLayoutSection = optionalString syncCfg.enable ''
Inactive "Device-modesetting[0]"
Inactive "Device-${igpuDriver}[0]"
'' + optionalString offloadCfg.enable ''
Option "AllowNVIDIAGPUScreens"
'';
services.xserver.displayManager.setupCommands = optionalString syncCfg.enable ''
# Added by nvidia configuration module for Optimus/PRIME.
${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource modesetting NVIDIA-0
${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource ${igpuDriver} NVIDIA-0
${pkgs.xorg.xrandr}/bin/xrandr --auto
'';
@ -230,9 +262,9 @@ in
};
hardware.opengl.package = mkIf (!offloadCfg.enable) nvidia_x11.out;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_libs32;
hardware.opengl.package32 = mkIf (!offloadCfg.enable) nvidia_x11.lib32;
hardware.opengl.extraPackages = optional offloadCfg.enable nvidia_x11.out;
hardware.opengl.extraPackages32 = optional offloadCfg.enable nvidia_libs32;
hardware.opengl.extraPackages32 = optional offloadCfg.enable nvidia_x11.lib32;
environment.systemPackages = [ nvidia_x11.bin nvidia_x11.settings ]
++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ];
@ -292,14 +324,35 @@ in
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1";
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
services.udev.extraRules =
''
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia%n c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) %n'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
'' + optionalString cfg.powerManagement.finegrained ''
# Remove NVIDIA USB xHCI Host Controller devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
# Remove NVIDIA USB Type-C UCSI devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
# Remove NVIDIA Audio devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
# Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
# Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
'';
boot.extraModprobeConfig = mkIf cfg.powerManagement.finegrained ''
options nvidia "NVreg_DynamicPowerManagement=0x02"
'';
boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];

View file

@ -0,0 +1,18 @@
{ config, pkgs, lib, ... }:
with lib;
let
pkg = [ pkgs.switcheroo-control ];
cfg = config.services.switcherooControl;
in {
options.services.switcherooControl = {
enable = mkEnableOption "switcheroo-control, a D-Bus service to check the availability of dual-GPU";
};
config = mkIf cfg.enable {
services.dbus.packages = pkg;
environment.systemPackages = pkg;
systemd.packages = pkg;
systemd.targets.multi-user.wants = [ "switcheroo-control.service" ];
};
}

View file

@ -42,6 +42,7 @@ in
<itemizedlist>
<listitem><para>ibus: The intelligent input bus, extra input engines can be added using <literal>i18n.inputMethod.ibus.engines</literal>.</para></listitem>
<listitem><para>fcitx: A customizable lightweight input method, extra input engines can be added using <literal>i18n.inputMethod.fcitx.engines</literal>.</para></listitem>
<listitem><para>fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using <literal>i18n.inputMethod.fcitx5.addons</literal>.</para></listitem>
<listitem><para>nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.</para></listitem>
<listitem><para>uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.</para></listitem>
<listitem><para>hime: An extremely easy-to-use input method framework.</para></listitem>

View file

@ -48,7 +48,7 @@ in
panel = mkOption {
type = with types; nullOr path;
default = null;
example = literalExample "''${pkgs.plasma5.plasma-desktop}/lib/libexec/kimpanel-ibus-panel";
example = literalExample "''${pkgs.plasma5Packages.plasma-desktop}/lib/libexec/kimpanel-ibus-panel";
description = "Replace the IBus panel with another panel.";
};
};

View file

@ -88,7 +88,7 @@ with lib;
system.build.netbootIpxeScript = pkgs.writeTextDir "netboot.ipxe" ''
#!ipxe
kernel ${pkgs.stdenv.hostPlatform.platform.kernelTarget} init=${config.system.build.toplevel}/init initrd=initrd ${toString config.boot.kernelParams}
kernel ${pkgs.stdenv.hostPlatform.linux-kernel.target} init=${config.system.build.toplevel}/init initrd=initrd ${toString config.boot.kernelParams}
initrd initrd
boot
'';

View file

@ -1,11 +1,11 @@
{lib, stdenv, boost, cmake, pkgconfig, nix, ... }:
{lib, stdenv, boost, cmake, pkg-config, nix, ... }:
stdenv.mkDerivation rec {
name = "nixos-option";
src = ./.;
nativeBuildInputs = [ cmake pkgconfig ];
nativeBuildInputs = [ cmake pkg-config ];
buildInputs = [ boost nix ];
meta = {
license = stdenv.lib.licenses.lgpl2Plus;
maintainers = with lib.maintainers; [ chkno ];
meta = with lib; {
license = licenses.lgpl2Plus;
maintainers = with maintainers; [ chkno ];
};
}

View file

@ -26,6 +26,7 @@ in
};
reservedMemory = mkOption {
default = "128M";
type = types.str;
description = ''
The amount of memory reserved for the crashdump kernel.
If you choose a too high value, dmesg will mention

View file

@ -71,7 +71,7 @@ in
#utmp = 29; # unused
# ddclient = 30; # converted to DynamicUser = true
davfs2 = 31;
#disnix = 33; # unused
#disnix = 33; # module removed
osgi = 34;
tor = 35;
cups = 36;
@ -387,7 +387,7 @@ in
utmp = 29;
# ddclient = 30; # converted to DynamicUser = true
davfs2 = 31;
disnix = 33;
#disnix = 33; # module removed
osgi = 34;
tor = 35;
#cups = 36; # unused

View file

@ -215,7 +215,7 @@ in {
''
else ''
exec ${cfg.locate}/bin/updatedb \
${optionalString (cfg.localuser != null && ! isMLocate) ''--localuser=${cfg.localuser}''} \
${optionalString (cfg.localuser != null && ! isMLocate) "--localuser=${cfg.localuser}"} \
--output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
'';
environment = optionalAttrs (!isMLocate) {

View file

@ -73,7 +73,7 @@ in
}
'';
type = pkgsType;
example = literalExample ''import <nixpkgs> {}'';
example = literalExample "import <nixpkgs> {}";
description = ''
If set, the pkgs argument to all NixOS modules is the value of
this option, extended with <code>nixpkgs.overlays</code>, if

View file

@ -46,12 +46,14 @@
./hardware/cpu/intel-microcode.nix
./hardware/digitalbitbox.nix
./hardware/device-tree.nix
./hardware/sensor/hddtemp.nix
./hardware/sensor/iio.nix
./hardware/keyboard/zsa.nix
./hardware/ksm.nix
./hardware/ledger.nix
./hardware/logitech.nix
./hardware/mcelog.nix
./hardware/network/ath-user-regd.nix
./hardware/network/b43.nix
./hardware/network/intel-2200bg.nix
./hardware/nitrokey.nix
@ -169,6 +171,7 @@
./programs/sway.nix
./programs/system-config-printer.nix
./programs/thefuck.nix
./programs/tilp2.nix
./programs/tmux.nix
./programs/traceroute.nix
./programs/tsm-client.nix
@ -348,6 +351,7 @@
./services/editors/emacs.nix
./services/editors/infinoted.nix
./services/games/factorio.nix
./services/games/freeciv.nix
./services/games/minecraft-server.nix
./services/games/minetest-server.nix
./services/games/openarena.nix
@ -448,8 +452,6 @@
./services/misc/devmon.nix
./services/misc/dictd.nix
./services/misc/dwm-status.nix
./services/misc/dysnomia.nix
./services/misc/disnix.nix
./services/misc/docker-registry.nix
./services/misc/domoticz.nix
./services/misc/errbot.nix
@ -608,6 +610,8 @@
./services/networking/atftpd.nix
./services/networking/avahi-daemon.nix
./services/networking/babeld.nix
./services/networking/bee.nix
./services/networking/bee-clef.nix
./services/networking/biboumi.nix
./services/networking/bind.nix
./services/networking/bitcoind.nix
@ -633,6 +637,7 @@
./services/networking/dnsdist.nix
./services/networking/dnsmasq.nix
./services/networking/ncdns.nix
./services/networking/nomad.nix
./services/networking/ejabberd.nix
./services/networking/epmd.nix
./services/networking/ergo.nix
@ -724,6 +729,7 @@
./services/networking/owamp.nix
./services/networking/pdnsd.nix
./services/networking/pixiecore.nix
./services/networking/pleroma.nix
./services/networking/polipo.nix
./services/networking/powerdns.nix
./services/networking/pdns-recursor.nix
@ -870,10 +876,12 @@
./services/web-apps/documize.nix
./services/web-apps/dokuwiki.nix
./services/web-apps/engelsystem.nix
./services/web-apps/galene.nix
./services/web-apps/gerrit.nix
./services/web-apps/gotify-server.nix
./services/web-apps/grocy.nix
./services/web-apps/hedgedoc.nix
./services/web-apps/hledger-web.nix
./services/web-apps/icingaweb2/icingaweb2.nix
./services/web-apps/icingaweb2/module-monitoring.nix
./services/web-apps/ihatemoney

View file

@ -1,7 +1,7 @@
# Common configuration for virtual machines running under QEMU (using
# virtio).
{ lib, ... }:
{ ... }:
{
boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ];
@ -14,6 +14,4 @@
# to the *boot time* of the host).
hwclock -s
'';
security.rngd.enable = lib.mkDefault false;
}

View file

@ -27,14 +27,14 @@ in
# the options below are the same as in "captive-browser.toml"
browser = mkOption {
type = types.str;
default = concatStringsSep " " [ ''${pkgs.chromium}/bin/chromium''
''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive''
default = concatStringsSep " " [ "${pkgs.chromium}/bin/chromium"
"--user-data-dir=\${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive"
''--proxy-server="socks5://$PROXY"''
''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"''
''--no-first-run''
''--new-window''
''--incognito''
''http://cache.nixos.org/''
"--no-first-run"
"--new-window"
"--incognito"
"http://cache.nixos.org/"
];
description = ''
The shell (/bin/sh) command executed once the proxy starts.
@ -62,7 +62,7 @@ in
socks5-addr = mkOption {
type = types.str;
default = "localhost:1666";
description = ''the listen address for the SOCKS5 proxy server'';
description = "the listen address for the SOCKS5 proxy server";
};
bindInterface = mkOption {

View file

@ -16,18 +16,21 @@ in {
'';
};
group = mkOption {
type = types.str;
default = "cdrom";
description = ''
Group that users must be in to use <command>cdemu</command>.
'';
};
gui = mkOption {
type = types.bool;
default = true;
description = ''
Whether to install the <command>cdemu</command> GUI (gCDEmu).
'';
};
image-analyzer = mkOption {
type = types.bool;
default = true;
description = ''
Whether to install the image analyzer.

View file

@ -80,6 +80,8 @@ in
# Retry the command if we just installed it.
if [ $? = 126 ]; then
"$@"
else
return 127
fi
else
# Indicate than there was an error so ZSH falls back to its default handler

View file

@ -13,6 +13,27 @@ let
(filterAttrs (k: v: v != null) cfg.shellAliases)
);
envShellInit = pkgs.writeText "shellInit" cfge.shellInit;
envLoginShellInit = pkgs.writeText "loginShellInit" cfge.loginShellInit;
envInteractiveShellInit = pkgs.writeText "interactiveShellInit" cfge.interactiveShellInit;
sourceEnv = file:
if cfg.useBabelfish then
"source /etc/fish/${file}.fish"
else
''
set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $fish_function_path
fenv source /etc/fish/foreign-env/${file} > /dev/null
set -e fish_function_path[1]
'';
babelfishTranslate = path: name:
pkgs.runCommand "${name}.fish" {
nativeBuildInputs = [ pkgs.babelfish ];
} "${pkgs.babelfish}/bin/babelfish < ${path} > $out;";
in
{
@ -29,6 +50,15 @@ in
type = types.bool;
};
useBabelfish = mkOption {
type = types.bool;
default = false;
description = ''
If enabled, the configured environment will be translated to native fish using <link xlink:href="https://github.com/bouk/babelfish">babelfish</link>.
Otherwise, <link xlink:href="https://github.com/oh-my-fish/plugin-foreign-env">foreign-env</link> will be used.
'';
};
vendor.config.enable = mkOption {
type = types.bool;
default = true;
@ -105,11 +135,32 @@ in
# Required for man completions
documentation.man.generateCaches = lib.mkDefault true;
environment.etc."fish/foreign-env/shellInit".text = cfge.shellInit;
environment.etc."fish/foreign-env/loginShellInit".text = cfge.loginShellInit;
environment.etc."fish/foreign-env/interactiveShellInit".text = cfge.interactiveShellInit;
environment = mkMerge [
(mkIf cfg.useBabelfish
{
etc."fish/setEnvironment.fish".source = babelfishTranslate config.system.build.setEnvironment "setEnvironment";
etc."fish/shellInit.fish".source = babelfishTranslate envShellInit "shellInit";
etc."fish/loginShellInit.fish".source = babelfishTranslate envLoginShellInit "loginShellInit";
etc."fish/interactiveShellInit.fish".source = babelfishTranslate envInteractiveShellInit "interactiveShellInit";
})
environment.etc."fish/nixos-env-preinit.fish".text = ''
(mkIf (!cfg.useBabelfish)
{
etc."fish/foreign-env/shellInit".source = envShellInit;
etc."fish/foreign-env/loginShellInit".source = envLoginShellInit;
etc."fish/foreign-env/interactiveShellInit".source = envInteractiveShellInit;
})
{
etc."fish/nixos-env-preinit.fish".text =
if cfg.useBabelfish
then ''
# source the NixOS environment config
if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
source /etc/fish/setEnvironment.fish
end
''
else ''
# This happens before $__fish_datadir/config.fish sets fish_function_path, so it is currently
# unset. We set it and then completely erase it, leaving its configuration to $__fish_datadir/config.fish
set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $__fish_datadir/functions
@ -122,15 +173,15 @@ in
# clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish
set -e fish_function_path
'';
}
environment.etc."fish/config.fish".text = ''
{
etc."fish/config.fish".text = ''
# /etc/fish/config.fish: DO NOT EDIT -- this file has been generated automatically.
# if we haven't sourced the general config, do it
if not set -q __fish_nixos_general_config_sourced
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d
fenv source /etc/fish/foreign-env/shellInit > /dev/null
set -e fish_function_path[1]
${sourceEnv "shellInit"}
${cfg.shellInit}
@ -142,9 +193,7 @@ in
# if we haven't sourced the login config, do it
status --is-login; and not set -q __fish_nixos_login_config_sourced
and begin
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d
fenv source /etc/fish/foreign-env/loginShellInit > /dev/null
set -e fish_function_path[1]
${sourceEnv "loginShellInit"}
${cfg.loginShellInit}
@ -158,9 +207,7 @@ in
and begin
${fishAliases}
set --prepend fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d
fenv source /etc/fish/foreign-env/interactiveShellInit > /dev/null
set -e fish_function_path[1]
${sourceEnv "interactiveShellInit"}
${cfg.promptInit}
${cfg.interactiveShellInit}
@ -171,23 +218,10 @@ in
set -g __fish_nixos_interactive_config_sourced 1
end
'';
}
programs.fish.interactiveShellInit = ''
# add completions generated by NixOS to $fish_complete_path
begin
# joins with null byte to acommodate all characters in paths, then respectively gets all paths before (exclusive) / after (inclusive) the first one including "generated_completions",
# splits by null byte, and then removes all empty lines produced by using 'string'
set -l prev (string join0 $fish_complete_path | string match --regex "^.*?(?=\x00[^\x00]*generated_completions.*)" | string split0 | string match -er ".")
set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
set fish_complete_path $prev "/etc/fish/generated_completions" $post
end
# prevent fish from generating completions on first run
if not test -d $__fish_user_data_dir/generated_completions
${pkgs.coreutils}/bin/mkdir $__fish_user_data_dir/generated_completions
end
'';
environment.etc."fish/generated_completions".source =
{
etc."fish/generated_completions".source =
let
patchedGenerator = pkgs.stdenv.mkDerivation {
name = "fish_patched-completion-generator";
@ -228,19 +262,40 @@ in
ignoreCollisions = true;
paths = map generateCompletions config.environment.systemPackages;
};
}
# include programs that bring their own completions
environment.pathsToLink = []
{
pathsToLink = []
++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"
++ optional cfg.vendor.completions.enable "/share/fish/vendor_completions.d"
++ optional cfg.vendor.functions.enable "/share/fish/vendor_functions.d";
}
environment.systemPackages = [ pkgs.fish ];
{ systemPackages = [ pkgs.fish ]; }
environment.shells = [
{
shells = [
"/run/current-system/sw/bin/fish"
"${pkgs.fish}/bin/fish"
];
}
];
programs.fish.interactiveShellInit = ''
# add completions generated by NixOS to $fish_complete_path
begin
# joins with null byte to acommodate all characters in paths, then respectively gets all paths before (exclusive) / after (inclusive) the first one including "generated_completions",
# splits by null byte, and then removes all empty lines produced by using 'string'
set -l prev (string join0 $fish_complete_path | string match --regex "^.*?(?=\x00[^\x00]*generated_completions.*)" | string split0 | string match -er ".")
set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
set fish_complete_path $prev "/etc/fish/generated_completions" $post
end
# prevent fish from generating completions on first run
if not test -d $__fish_user_data_dir/generated_completions
${pkgs.coreutils}/bin/mkdir $__fish_user_data_dir/generated_completions
end
'';
};

View file

@ -36,7 +36,7 @@ in
askPassword = mkOption {
type = types.str;
default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
description = ''Program used by SSH to ask for passwords.'';
description = "Program used by SSH to ask for passwords.";
};
forwardX11 = mkOption {

View file

@ -0,0 +1,28 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.programs.tilp2;
in {
options.programs.tilp2 = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable tilp2 and udev rules for supported calculators.
'';
};
};
config = mkIf cfg.enable {
services.udev.packages = [
pkgs.libticables2
];
environment.systemPackages = [
pkgs.tilp2
];
};
}

View file

@ -11,7 +11,7 @@ in
lockerCommand = mkOption {
default = "${pkgs.i3lock}/bin/i3lock";
example = literalExample ''''${pkgs.i3lock-fancy}/bin/i3lock-fancy'';
example = literalExample "\${pkgs.i3lock-fancy}/bin/i3lock-fancy";
type = types.separatedString " ";
description = "Locker to be used with xsslock";
};

View file

@ -7,6 +7,11 @@ let
numCerts = length (builtins.attrNames cfg.certs);
_24hSecs = 60 * 60 * 24;
# Used to make unique paths for each cert/account config set
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
accountDirRoot = "/var/lib/acme/.lego/accounts/";
# There are many services required to make cert renewals work.
# They all follow a common structure:
# - They inherit this commonServiceConfig
@ -19,7 +24,7 @@ let
Type = "oneshot";
User = "acme";
Group = mkDefault "acme";
UMask = 0027;
UMask = 0023;
StateDirectoryMode = 750;
ProtectSystem = "full";
PrivateTmp = true;
@ -54,23 +59,35 @@ let
'';
};
# Previously, all certs were owned by whatever user was configured in
# config.security.acme.certs.<cert>.user. Now everything is owned by and
# run by the acme user.
userMigrationService = {
description = "Fix owner and group of all ACME certificates";
script = with builtins; concatStringsSep "\n" (mapAttrsToList (cert: data: ''
for fixpath in /var/lib/acme/${escapeShellArg cert} /var/lib/acme/.lego/${escapeShellArg cert}; do
# Ensures that directories which are shared across all certs
# exist and have the correct user and group, since group
# is configurable on a per-cert basis.
userMigrationService = let
script = with builtins; ''
chown -R acme .lego/accounts
'' + (concatStringsSep "\n" (mapAttrsToList (cert: data: ''
for fixpath in ${escapeShellArg cert} .lego/${escapeShellArg cert}; do
if [ -d "$fixpath" ]; then
chmod -R u=rwX,g=rX,o= "$fixpath"
chown -R acme:${data.group} "$fixpath"
fi
done
'') certConfigs);
'') certConfigs));
in {
description = "Fix owner and group of all ACME certificates";
serviceConfig = commonServiceConfig // {
# We don't want this to run every time a renewal happens
serviceConfig.RemainAfterExit = true;
RemainAfterExit = true;
# These StateDirectory entries negate the need for tmpfiles
StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
StateDirectoryMode = 755;
WorkingDirectory = "/var/lib/acme";
# Run the start script as root
ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
};
};
certToConfig = cert: data: let
@ -101,11 +118,10 @@ let
${toString acmeServer} ${toString data.dnsProvider}
${toString data.ocspMustStaple} ${data.keyType}
'';
mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
certDir = mkHash hashData;
domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}";
othersHash = mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
accountDir = "/var/lib/acme/.lego/accounts/" + othersHash;
accountHash = (mkAccountHash acmeServer data);
accountDir = accountDirRoot + accountHash;
protocolOpts = if useDns then (
[ "--dns" data.dnsProvider ]
@ -142,9 +158,8 @@ let
);
in {
inherit accountDir selfsignedDeps;
inherit accountHash cert selfsignedDeps;
webroot = data.webroot;
group = data.group;
renewTimer = {
@ -184,7 +199,10 @@ let
StateDirectory = "acme/${cert}";
BindPaths = "/var/lib/acme/.minica:/tmp/ca /var/lib/acme/${cert}:/tmp/${keyName}";
BindPaths = [
"/var/lib/acme/.minica:/tmp/ca"
"/var/lib/acme/${cert}:/tmp/${keyName}"
];
};
# Working directory will be /tmp
@ -222,16 +240,22 @@ let
serviceConfig = commonServiceConfig // {
Group = data.group;
# AccountDir dir will be created by tmpfiles to ensure correct permissions
# And to avoid deletion during systemctl clean
# acme/.lego/${cert} is listed so that it is deleted during systemctl clean
StateDirectory = "acme/${cert} acme/.lego/${cert} acme/.lego/${cert}/${certDir}";
# Keep in mind that these directories will be deleted if the user runs
# systemctl clean --what=state
# acme/.lego/${cert} is listed for this reason.
StateDirectory = [
"acme/${cert}"
"acme/.lego/${cert}"
"acme/.lego/${cert}/${certDir}"
"acme/.lego/accounts/${accountHash}"
];
# Needs to be space separated, but can't use a multiline string because that'll include newlines
BindPaths =
"${accountDir}:/tmp/accounts " +
"/var/lib/acme/${cert}:/tmp/out " +
"/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates ";
BindPaths = [
"${accountDir}:/tmp/accounts"
"/var/lib/acme/${cert}:/tmp/out"
"/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
];
# Only try loading the credentialsFile if the dns challenge is enabled
EnvironmentFile = mkIf useDns data.credentialsFile;
@ -248,13 +272,18 @@ let
# Working directory will be /tmp
script = ''
set -euo pipefail
set -euxo pipefail
${optionalString (data.webroot != null) ''
# Ensure the webroot exists
mkdir -p '${data.webroot}/.well-known/acme-challenge'
chown 'acme:${data.group}' ${data.webroot}/{.well-known,.well-known/acme-challenge}
''}
echo '${domainHash}' > domainhash.txt
# Check if we can renew
# Certificates and account credentials must exist
if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a "$(ls -1 accounts)" ]; then
if [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a -n "$(ls -1 accounts)" ]; then
# When domains are updated, there's no need to do a full
# Lego run, but it's likely renew won't work if days is too low.
@ -317,7 +346,7 @@ let
webroot = mkOption {
type = types.nullOr types.str;
default = null;
example = "/var/lib/acme/acme-challenges";
example = "/var/lib/acme/acme-challenge";
description = ''
Where the webroot of the HTTP vhost is located.
<filename>.well-known/acme-challenge/</filename> directory
@ -550,12 +579,12 @@ in {
example = literalExample ''
{
"example.com" = {
webroot = "/var/www/challenges/";
webroot = "/var/lib/acme/acme-challenge/";
email = "foo@example.com";
extraDomainNames = [ "www.example.com" "foo.example.com" ];
};
"bar.example.com" = {
webroot = "/var/www/challenges/";
webroot = "/var/lib/acme/acme-challenge/";
email = "bar@example.com";
};
}
@ -664,21 +693,33 @@ in {
systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
# .lego and .lego/accounts specified to fix any incorrect permissions
systemd.tmpfiles.rules = [
"d /var/lib/acme/.lego - acme acme"
"d /var/lib/acme/.lego/accounts - acme acme"
] ++ (unique (concatMap (conf: [
"d ${conf.accountDir} - acme acme"
] ++ (optional (conf.webroot != null) "d ${conf.webroot}/.well-known/acme-challenge - acme ${conf.group}")
) (attrValues certConfigs)));
systemd.targets = let
# Create some targets which can be depended on to be "active" after cert renewals
systemd.targets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" {
finishedTargets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" {
wantedBy = [ "default.target" ];
requires = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
after = [ "acme-${cert}.service" ] ++ conf.selfsignedDeps;
}) certConfigs;
# Create targets to limit the number of simultaneous account creations
# How it works:
# - Pick a "leader" cert service, which will be in charge of creating the account,
# and run first (requires + after)
# - Make all other cert services sharing the same account wait for the leader to
# finish before starting (requiredBy + before).
# Using a target here is fine - account creation is a one time event. Even if
# systemd clean --what=state is used to delete the account, so long as the user
# then runs one of the cert services, there won't be any issues.
accountTargets = mapAttrs' (hash: confs: let
leader = "acme-${(builtins.head confs).cert}.service";
dependantServices = map (conf: "acme-${conf.cert}.service") (builtins.tail confs);
in nameValuePair "acme-account-${hash}" {
requiredBy = dependantServices;
before = dependantServices;
requires = [ leader ];
after = [ leader ];
}) (groupBy (conf: conf.accountHash) (attrValues certConfigs));
in finishedTargets // accountTargets;
})
];

View file

@ -115,15 +115,18 @@ services.nginx = {
<programlisting>
<xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.email" /> = "admin+acme@example.com";
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
<link linkend="opt-users.users._name_.extraGroups">users.users.nginx.extraGroups</link> = [ "acme" ];
services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
"acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
<link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user.
# By default, this is the case.
locations."/.well-known/acme-challenge" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges";
};
@ -134,6 +137,7 @@ services.nginx = {
};
}
# Alternative config for Apache
<link linkend="opt-users.users._name_.extraGroups">users.users.wwwrun.extraGroups</link> = [ "acme" ];
services.httpd = {
<link linkend="opt-services.httpd.enable">enable = true;</link>
<link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = {
@ -162,6 +166,9 @@ services.httpd = {
<xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
<link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
<link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
# Ensure that the web server you use can read the generated certs
# Take a look at the <link linkend="opt-services.nginx.group">group</link> option for the web server you choose.
<link linkend="opt-security.acme.certs._name_.group">group</link> = "nginx";
# Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything!
# Just make sure your DNS resolves them.
@ -257,10 +264,11 @@ chmod 400 /var/lib/secrets/certs.secret
<para>
Should you need to regenerate a particular certificate in a hurry, such
as when a vulnerability is found in Let's Encrypt, there is now a convenient
mechanism for doing so. Running <literal>systemctl clean acme-example.com.service</literal>
will remove all certificate files for the given domain, allowing you to then
<literal>systemctl start acme-example.com.service</literal> to generate fresh
ones.
mechanism for doing so. Running
<literal>systemctl clean --what=state acme-example.com.service</literal>
will remove all certificate files and the account data for the given domain,
allowing you to then <literal>systemctl start acme-example.com.service</literal>
to generate fresh ones.
</para>
</section>
<section xml:id="module-security-acme-fix-jws">

View file

@ -430,8 +430,8 @@ let
${optionalString cfg.pamMount
"auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so"}
${optionalString cfg.enableKwallet
("auth optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.kdeFrameworks.kwallet.bin}/bin/kwalletd5")}
("auth optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5")}
${optionalString cfg.enableGnomeKeyring
"auth optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so"}
${optionalString cfg.gnupg.enable
@ -509,8 +509,8 @@ let
${optionalString (cfg.enableAppArmor && config.security.apparmor.enable)
"session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug"}
${optionalString (cfg.enableKwallet)
("session optional ${pkgs.plasma5.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.kdeFrameworks.kwallet.bin}/bin/kwalletd5")}
("session optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so" +
" kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5")}
${optionalString (cfg.enableGnomeKeyring)
"session optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start"}
${optionalString cfg.gnupg.enable

View file

@ -10,16 +10,8 @@ let
(n: v: (if v ? program then v else v // {program=n;}))
wrappers);
securityWrapper = pkgs.stdenv.mkDerivation {
name = "security-wrapper";
phases = [ "installPhase" "fixupPhase" ];
buildInputs = [ pkgs.libcap pkgs.libcap_ng pkgs.linuxHeaders ];
hardeningEnable = [ "pie" ];
installPhase = ''
mkdir -p $out/bin
$CC -Wall -O2 -DWRAPPER_DIR=\"${parentWrapperDir}\" \
-lcap-ng -lcap ${./wrapper.c} -o $out/bin/security-wrapper
'';
securityWrapper = pkgs.callPackage ./wrapper.nix {
inherit parentWrapperDir;
};
###### Activation script for the setcap wrappers

View file

@ -4,15 +4,17 @@
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/xattr.h>
#include <fcntl.h>
#include <dirent.h>
#include <assert.h>
#include <errno.h>
#include <linux/capability.h>
#include <sys/capability.h>
#include <sys/prctl.h>
#include <limits.h>
#include <cap-ng.h>
#include <stdint.h>
#include <syscall.h>
#include <byteswap.h>
// Make sure assertions are not compiled out, we use them to codify
// invariants about this program and we want it to fail fast and
@ -23,119 +25,94 @@ extern char **environ;
// The WRAPPER_DIR macro is supplied at compile time so that it cannot
// be changed at runtime
static char * wrapperDir = WRAPPER_DIR;
static char *wrapper_dir = WRAPPER_DIR;
// Wrapper debug variable name
static char * wrapperDebug = "WRAPPER_DEBUG";
static char *wrapper_debug = "WRAPPER_DEBUG";
// Update the capabilities of the running process to include the given
// capability in the Ambient set.
static void set_ambient_cap(cap_value_t cap)
{
capng_get_caps_process();
#define CAP_SETPCAP 8
if (capng_update(CAPNG_ADD, CAPNG_INHERITABLE, (unsigned long) cap))
{
perror("cannot raise the capability into the Inheritable set\n");
exit(1);
#if __BYTE_ORDER == __BIG_ENDIAN
#define LE32_TO_H(x) bswap_32(x)
#else
#define LE32_TO_H(x) (x)
#endif
int get_last_cap(unsigned *last_cap) {
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
if (file == NULL) {
int saved_errno = errno;
fprintf(stderr, "failed to open /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
return -saved_errno;
}
capng_apply(CAPNG_SELECT_CAPS);
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0))
{
perror("cannot raise the capability into the Ambient set\n");
exit(1);
int res = fscanf(file, "%u", last_cap);
if (res == EOF) {
int saved_errno = errno;
fprintf(stderr, "could not read number from /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
return -saved_errno;
}
fclose(file);
return 0;
}
// Given the path to this program, fetch its configured capability set
// (as set by `setcap ... /path/to/file`) and raise those capabilities
// into the Ambient set.
static int make_caps_ambient(const char *selfPath)
{
cap_t caps = cap_get_file(selfPath);
if(!caps)
{
if(getenv(wrapperDebug))
fprintf(stderr, "no caps set or could not retrieve the caps for this file, not doing anything...");
static int make_caps_ambient(const char *self_path) {
struct vfs_ns_cap_data data = {};
int r = getxattr(self_path, "security.capability", &data, sizeof(data));
if (r < 0) {
if (errno == ENODATA) {
// no capabilities set
return 0;
}
fprintf(stderr, "cannot get capabilities for %s: %s", self_path, strerror(errno));
return 1;
}
// We use `cap_to_text` and iteration over the tokenized result
// string because, as of libcap's current release, there is no
// facility for retrieving an array of `cap_value_t`'s that can be
// given to `prctl` in order to lift that capability into the
// Ambient set.
//
// Some discussion was had around shot-gunning all of the
// capabilities we know about into the Ambient set but that has a
// security smell and I deemed the risk of the current
// implementation crashing the program to be lower than the risk
// of a privilege escalation security hole being introduced by
// raising all capabilities, even ones we didn't intend for the
// program, into the Ambient set.
//
// `cap_t` which is returned by `cap_get_*` is an opaque type and
// even if we could retrieve the bitmasks (which, as far as I can
// tell we cannot) in order to get the `cap_value_t`
// representation for each capability we would have to take the
// total number of capabilities supported and iterate over the
// sequence of integers up-to that maximum total, testing each one
// against the bitmask ((bitmask >> n) & 1) to see if it's set and
// aggregating each "capability integer n" that is set in the
// bitmask.
//
// That, combined with the fact that we can't easily get the
// bitmask anyway seemed much more brittle than fetching the
// `cap_t`, transforming it into a textual representation,
// tokenizing the string, and using `cap_from_name` on the token
// to get the `cap_value_t` that we need for `prctl`. There is
// indeed risk involved if the output string format of
// `cap_to_text` ever changes but at this time the combination of
// factors involving the below list have led me to the conclusion
// that the best implementation at this time is reading then
// parsing with *lots of documentation* about why we're doing it
// this way.
//
// 1. No explicit API for fetching an array of `cap_value_t`'s or
// for transforming a `cap_t` into such a representation
// 2. The risk of a crash is lower than lifting all capabilities
// into the Ambient set
// 3. libcap is depended on heavily in the Linux ecosystem so
// there is a high chance that the output representation of
// `cap_to_text` will not change which reduces our risk that
// this parsing step will cause a crash
//
// The preferred method, should it ever be available in the
// future, would be to use libcap API's to transform the result
// from a `cap_get_*` into an array of `cap_value_t`'s that can
// then be given to prctl.
//
// - Parnell
ssize_t capLen;
char* capstr = cap_to_text(caps, &capLen);
cap_free(caps);
// TODO: For now, we assume that cap_to_text always starts its
// result string with " =" and that the first capability is listed
// immediately after that. We should verify this.
assert(capLen >= 2);
capstr += 2;
char* saveptr = NULL;
for(char* tok = strtok_r(capstr, ",", &saveptr); tok; tok = strtok_r(NULL, ",", &saveptr))
{
cap_value_t capnum;
if (cap_from_name(tok, &capnum))
{
if(getenv(wrapperDebug))
fprintf(stderr, "cap_from_name failed, skipping: %s", tok);
size_t size;
uint32_t version = LE32_TO_H(data.magic_etc) & VFS_CAP_REVISION_MASK;
switch (version) {
case VFS_CAP_REVISION_1:
size = VFS_CAP_U32_1;
break;
case VFS_CAP_REVISION_2:
case VFS_CAP_REVISION_3:
size = VFS_CAP_U32_3;
break;
default:
fprintf(stderr, "BUG! Unsupported capability version 0x%x on %s. Report to NixOS bugtracker\n", version, self_path);
return 1;
}
else if (capnum == CAP_SETPCAP)
{
const struct __user_cap_header_struct header = {
.version = _LINUX_CAPABILITY_VERSION_3,
.pid = getpid(),
};
struct __user_cap_data_struct user_data[2] = {};
for (size_t i = 0; i < size; i++) {
// merge inheritable & permitted into one
user_data[i].permitted = user_data[i].inheritable =
LE32_TO_H(data.data[i].inheritable) | LE32_TO_H(data.data[i].permitted);
}
if (syscall(SYS_capset, &header, &user_data) < 0) {
fprintf(stderr, "failed to inherit capabilities: %s", strerror(errno));
return 1;
}
unsigned last_cap;
r = get_last_cap(&last_cap);
if (r < 0) {
return 1;
}
uint64_t set = user_data[0].permitted | (uint64_t)user_data[1].permitted << 32;
for (unsigned cap = 0; cap < last_cap; cap++) {
if (!(set & (1ULL << cap))) {
continue;
}
// Check for the cap_setpcap capability, we set this on the
// wrapper so it can elevate the capabilities to the Ambient
// set but we do not want to propagate it down into the
@ -143,62 +120,77 @@ static int make_caps_ambient(const char *selfPath)
//
// TODO: what happens if that's the behavior you want
// though???? I'm preferring a strict vs. loose policy here.
if(getenv(wrapperDebug))
if (cap == CAP_SETPCAP) {
if(getenv(wrapper_debug)) {
fprintf(stderr, "cap_setpcap in set, skipping it\n");
}
else
{
set_ambient_cap(capnum);
if(getenv(wrapperDebug))
fprintf(stderr, "raised %s into the Ambient capability set\n", tok);
continue;
}
if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) {
fprintf(stderr, "cannot raise the capability %d into the ambient set: %s\n", cap, strerror(errno));
return 1;
}
if (getenv(wrapper_debug)) {
fprintf(stderr, "raised %d into the ambient capability set\n", cap);
}
}
cap_free(capstr);
return 0;
}
int main(int argc, char * * argv)
{
// I *think* it's safe to assume that a path from a symbolic link
// should safely fit within the PATH_MAX system limit. Though I'm
// not positive it's safe...
char selfPath[PATH_MAX];
int selfPathSize = readlink("/proc/self/exe", selfPath, sizeof(selfPath));
int readlink_malloc(const char *p, char **ret) {
size_t l = FILENAME_MAX+1;
int r;
assert(selfPathSize > 0);
for (;;) {
char *c = calloc(l, sizeof(char));
if (!c) {
return -ENOMEM;
}
// Assert we have room for the zero byte, this ensures the path
// isn't being truncated because it's too big for the buffer.
//
// A better way to handle this might be to use something like the
// whereami library (https://github.com/gpakosz/whereami) or a
// loop that resizes the buffer and re-reads the link if the
// contents are being truncated.
assert(selfPathSize < sizeof(selfPath));
ssize_t n = readlink(p, c, l-1);
if (n < 0) {
r = -errno;
free(c);
return r;
}
// Set the zero byte since readlink doesn't do that for us.
selfPath[selfPathSize] = '\0';
if ((size_t) n < l-1) {
c[n] = 0;
*ret = c;
return 0;
}
free(c);
l *= 2;
}
}
int main(int argc, char **argv) {
char *self_path = NULL;
int self_path_size = readlink_malloc("/proc/self/exe", &self_path);
if (self_path_size < 0) {
fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
}
// Make sure that we are being executed from the right location,
// i.e., `safeWrapperDir'. This is to prevent someone from creating
// i.e., `safe_wrapper_dir'. This is to prevent someone from creating
// hard link `X' from some other location, along with a false
// `X.real' file, to allow arbitrary programs from being executed
// with elevated capabilities.
int len = strlen(wrapperDir);
if (len > 0 && '/' == wrapperDir[len - 1])
int len = strlen(wrapper_dir);
if (len > 0 && '/' == wrapper_dir[len - 1])
--len;
assert(!strncmp(selfPath, wrapperDir, len));
assert('/' == wrapperDir[0]);
assert('/' == selfPath[len]);
assert(!strncmp(self_path, wrapper_dir, len));
assert('/' == wrapper_dir[0]);
assert('/' == self_path[len]);
// Make *really* *really* sure that we were executed as
// `selfPath', and not, say, as some other setuid program. That
// `self_path', and not, say, as some other setuid program. That
// is, our effective uid/gid should match the uid/gid of
// `selfPath'.
// `self_path'.
struct stat st;
assert(lstat(selfPath, &st) != -1);
assert(lstat(self_path, &st) != -1);
assert(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid()));
assert(!(st.st_mode & S_ISGID) || (st.st_gid == getegid()));
@ -207,33 +199,35 @@ int main(int argc, char * * argv)
assert(!(st.st_mode & (S_IWGRP | S_IWOTH)));
// Read the path of the real (wrapped) program from <self>.real.
char realFN[PATH_MAX + 10];
int realFNSize = snprintf (realFN, sizeof(realFN), "%s.real", selfPath);
assert (realFNSize < sizeof(realFN));
char real_fn[PATH_MAX + 10];
int real_fn_size = snprintf(real_fn, sizeof(real_fn), "%s.real", self_path);
assert(real_fn_size < sizeof(real_fn));
int fdSelf = open(realFN, O_RDONLY);
assert (fdSelf != -1);
int fd_self = open(real_fn, O_RDONLY);
assert(fd_self != -1);
char sourceProg[PATH_MAX];
len = read(fdSelf, sourceProg, PATH_MAX);
assert (len != -1);
assert (len < sizeof(sourceProg));
assert (len > 0);
sourceProg[len] = 0;
char source_prog[PATH_MAX];
len = read(fd_self, source_prog, PATH_MAX);
assert(len != -1);
assert(len < sizeof(source_prog));
assert(len > 0);
source_prog[len] = 0;
close(fdSelf);
close(fd_self);
// Read the capabilities set on the wrapper and raise them in to
// the Ambient set so the program we're wrapping receives the
// the ambient set so the program we're wrapping receives the
// capabilities too!
make_caps_ambient(selfPath);
if (make_caps_ambient(self_path) != 0) {
free(self_path);
return 1;
}
free(self_path);
execve(sourceProg, argv, environ);
execve(source_prog, argv, environ);
fprintf(stderr, "%s: cannot run `%s': %s\n",
argv[0], sourceProg, strerror(errno));
argv[0], source_prog, strerror(errno));
exit(1);
return 1;
}

View file

@ -0,0 +1,21 @@
{ stdenv, linuxHeaders, parentWrapperDir, debug ? false }:
# For testing:
# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
stdenv.mkDerivation {
name = "security-wrapper";
buildInputs = [ linuxHeaders ];
dontUnpack = true;
hardeningEnable = [ "pie" ];
CFLAGS = [
''-DWRAPPER_DIR="${parentWrapperDir}"''
] ++ (if debug then [
"-Werror" "-Og" "-g"
] else [
"-Wall" "-O2"
]);
dontStrip = debug;
installPhase = ''
mkdir -p $out/bin
$CC $CFLAGS ${./wrapper.c} -o $out/bin/security-wrapper
'';
}

View file

@ -33,6 +33,7 @@ in {
};
configurationDir = mkOption {
default = "${activemq}/conf";
type = types.str;
description = ''
The base directory for ActiveMQ's configuration.
By default, this directory is searched for a file named activemq.xml,

View file

@ -32,7 +32,7 @@ in
enableOSSEmulation = mkOption {
type = types.bool;
default = true;
default = false;
description = ''
Whether to enable ALSA OSS emulation (with certain cards sound mixing may not work!).
'';

View file

@ -74,7 +74,7 @@ in {
musicDirectory = mkOption {
type = with types; either path (strMatching "(http|https|nfs|smb)://.+");
default = "${cfg.dataDir}/music";
defaultText = ''''${dataDir}/music'';
defaultText = "\${dataDir}/music";
description = ''
The directory or NFS/SMB network share where MPD reads music from. If left
as the default value this directory will automatically be created before
@ -86,7 +86,7 @@ in {
playlistDirectory = mkOption {
type = types.path;
default = "${cfg.dataDir}/playlists";
defaultText = ''''${dataDir}/playlists'';
defaultText = "\${dataDir}/playlists";
description = ''
The directory where MPD stores playlists. If left as the default value
this directory will automatically be created before the MPD server starts,
@ -155,7 +155,7 @@ in {
dbFile = mkOption {
type = types.nullOr types.str;
default = "${cfg.dataDir}/tag_cache";
defaultText = ''''${dataDir}/tag_cache'';
defaultText = "\${dataDir}/tag_cache";
description = ''
The path to MPD's database. If set to <literal>null</literal> the
parameter is omitted from the configuration.

View file

@ -1,5 +1,6 @@
{ config, lib, pkgs, ... }:
# TODO: test configuration when building nixexpr (use -t parameter)
# TODO: support sqlite3 (it's deprecate?) and mysql
@ -111,6 +112,7 @@ let
{
options = {
password = mkOption {
type = types.str;
# TODO: required?
description = ''
Specifies the password that must be supplied for the default Bacula
@ -130,6 +132,7 @@ let
};
monitor = mkOption {
type = types.enum [ "no" "yes" ];
default = "no";
example = "yes";
description = ''
@ -150,6 +153,7 @@ let
{
options = {
changerDevice = mkOption {
type = types.str;
description = ''
The specified name-string must be the generic SCSI device name of the
autochanger that corresponds to the normal read/write Archive Device
@ -168,6 +172,7 @@ let
};
changerCommand = mkOption {
type = types.str;
description = ''
The name-string specifies an external program to be called that will
automatically change volumes as required by Bacula. Normally, this
@ -190,12 +195,13 @@ let
};
devices = mkOption {
description = ''
'';
description = "";
type = types.listOf types.str;
};
extraAutochangerConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Autochanger directive.
'';
@ -212,6 +218,7 @@ let
options = {
archiveDevice = mkOption {
# TODO: required?
type = types.str;
description = ''
The specified name-string gives the system file name of the storage
device managed by this storage daemon. This will usually be the
@ -228,6 +235,7 @@ let
mediaType = mkOption {
# TODO: required?
type = types.str;
description = ''
The specified name-string names the type of media supported by this
device, for example, <literal>DLT7000</literal>. Media type names are
@ -265,6 +273,7 @@ let
extraDeviceConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Device directive.
'';
@ -293,6 +302,7 @@ in {
name = mkOption {
default = "${config.networking.hostName}-fd";
type = types.str;
description = ''
The client name that must be used by the Director when connecting.
Generally, it is a good idea to use a name related to the machine so
@ -321,6 +331,7 @@ in {
extraClientConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Client directive.
'';
@ -332,6 +343,7 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
@ -352,6 +364,7 @@ in {
name = mkOption {
default = "${config.networking.hostName}-sd";
type = types.str;
description = ''
Specifies the Name of the Storage daemon.
'';
@ -392,6 +405,7 @@ in {
extraStorageConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Storage directive.
'';
@ -403,6 +417,7 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
@ -424,6 +439,7 @@ in {
name = mkOption {
default = "${config.networking.hostName}-dir";
type = types.str;
description = ''
The director name used by the system administrator. This directive is
required.
@ -445,6 +461,7 @@ in {
password = mkOption {
# TODO: required?
type = types.str;
description = ''
Specifies the password that must be supplied for a Director.
'';
@ -452,6 +469,7 @@ in {
extraMessagesConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Messages directive.
'';
@ -462,6 +480,7 @@ in {
extraDirectorConfig = mkOption {
default = "";
type = types.lines;
description = ''
Extra configuration to be passed in Director directive.
'';

View file

@ -48,6 +48,7 @@ in
};
user = mkOption {
type = types.str;
default = defaultUser;
description = ''
User to be used to perform backup.
@ -56,12 +57,14 @@ in
databases = mkOption {
default = [];
type = types.listOf types.str;
description = ''
List of database names to dump.
'';
};
location = mkOption {
type = types.path;
default = "/var/backup/mysql";
description = ''
Location to put the gzipped MySQL database dumps.
@ -70,6 +73,7 @@ in
singleTransaction = mkOption {
default = false;
type = types.bool;
description = ''
Whether to create database dump in a single transaction
'';

View file

@ -48,6 +48,7 @@ in {
startAt = mkOption {
default = "*-*-* 01:15:00";
type = types.str;
description = ''
This option defines (see <literal>systemd.time</literal> for format) when the
databases should be dumped.
@ -70,6 +71,7 @@ in {
databases = mkOption {
default = [];
type = types.listOf types.str;
description = ''
List of database names to dump.
'';
@ -77,6 +79,7 @@ in {
location = mkOption {
default = "/var/backup/postgresql";
type = types.path;
description = ''
Location to put the gzipped PostgreSQL database dumps.
'';

View file

@ -243,9 +243,11 @@ in
restartIfChanged = false;
serviceConfig = {
Type = "oneshot";
ExecStart = [ "${resticCmd} backup ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ] ++ pruneCmd;
ExecStart = [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " backup.extraBackupArgs} ${backupPaths}" ] ++ pruneCmd;
User = backup.user;
RuntimeDirectory = "restic-backups-${name}";
CacheDirectory = "restic-backups-${name}";
CacheDirectoryMode = "0700";
} // optionalAttrs (backup.s3CredentialsFile != null) {
EnvironmentFile = backup.s3CredentialsFile;
};

View file

@ -354,7 +354,7 @@ in
script = let
tarsnap = ''tarsnap --configfile "/etc/tarsnap/${name}.conf"'';
lastArchive = ''$(${tarsnap} --list-archives | sort | tail -1)'';
lastArchive = "$(${tarsnap} --list-archives | sort | tail -1)";
run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}'';
in if (cfg.cachedir != null) then ''

Some files were not shown because too many files have changed in this diff Show more