Project import generated by Copybara.

GitOrigin-RevId: 7c9cc5a6e5d38010801741ac830a3f8fd667a7a0
This commit is contained in:
Default email 2023-10-19 15:55:26 +02:00
parent 80154c5673
commit b5f92a349c
2097 changed files with 43786 additions and 38200 deletions

View file

@ -53,7 +53,7 @@
/pkgs/test/nixpkgs-check-by-name @infinisil /pkgs/test/nixpkgs-check-by-name @infinisil
/pkgs/by-name/README.md @infinisil /pkgs/by-name/README.md @infinisil
/pkgs/top-level/by-name-overlay.nix @infinisil /pkgs/top-level/by-name-overlay.nix @infinisil
/.github/workflows/check-by-name.nix @infinisil /.github/workflows/check-by-name.yml @infinisil
# Nixpkgs build-support # Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch /pkgs/build-support/writers @lassulus @Profpatsch

View file

@ -18,12 +18,34 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Resolving the merge commit - name: Resolving the merge commit
env:
GH_TOKEN: ${{ github.token }}
run: | run: |
if result=$(git ls-remote --exit-code ${{ github.event.pull_request.base.repo.clone_url }} refs/pull/${{ github.event.pull_request.number }}/merge); then # This checks for mergeability of a pull request as recommended in
mergedSha=$(cut -f1 <<< "$result") # https://docs.github.com/en/rest/guides/using-the-rest-api-to-interact-with-your-git-database?apiVersion=2022-11-28#checking-mergeability-of-pull-requests
echo "The PR appears to not have any conflicts, checking the merge commit $mergedSha" while true; do
echo "Checking whether the pull request can be merged"
prInfo=$(gh api \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/"$GITHUB_REPOSITORY"/pulls/${{ github.event.pull_request.number }})
mergeable=$(jq -r .mergeable <<< "$prInfo")
mergedSha=$(jq -r .merge_commit_sha <<< "$prInfo")
if [[ "$mergeable" == "null" ]]; then
# null indicates that GitHub is still computing whether it's mergeable
# Wait a couple seconds before trying again
echo "GitHub is still computing whether this PR can be merged, waiting 5 seconds before trying again"
sleep 5
else
break
fi
done
if [[ "$mergeable" == "true" ]]; then
echo "The PR can be merged, checking the merge commit $mergedSha"
else else
echo "The PR may have a merge conflict" echo "The PR cannot be merged, it has a merge conflict"
exit 1 exit 1
fi fi
echo "mergedSha=$mergedSha" >> "$GITHUB_ENV" echo "mergedSha=$mergedSha" >> "$GITHUB_ENV"

View file

@ -538,7 +538,7 @@ To get a sense for what changes are considered mass rebuilds, see [previously me
When adding yourself as maintainer, in the same pull request, make a separate When adding yourself as maintainer, in the same pull request, make a separate
commit with the message `maintainers: add <handle>`. commit with the message `maintainers: add <handle>`.
Add the commit before those making changes to the package or module. Add the commit before those making changes to the package or module.
See [Nixpkgs Maintainers](../maintainers/README.md) for details. See [Nixpkgs Maintainers](./maintainers/README.md) for details.
### Writing good commit messages ### Writing good commit messages

View file

@ -3,6 +3,7 @@
This directory houses the sources files for the Nixpkgs manual. This directory houses the sources files for the Nixpkgs manual.
You can find the [rendered documentation for Nixpkgs `unstable` on nixos.org](https://nixos.org/manual/nixpkgs/unstable/). You can find the [rendered documentation for Nixpkgs `unstable` on nixos.org](https://nixos.org/manual/nixpkgs/unstable/).
The rendering tool is [nixos-render-docs](../pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs), sometimes abbreviated `nrd`.
[Docs for Nixpkgs stable](https://nixos.org/manual/nixpkgs/stable/) are also available. [Docs for Nixpkgs stable](https://nixos.org/manual/nixpkgs/stable/) are also available.

View file

@ -243,3 +243,26 @@ or
*** ***
``` ```
## `fetchFromBittorrent` {#fetchfrombittorrent}
`fetchFromBittorrent` expects two arguments. `url` which can either be a Magnet URI (Magnet Link) such as `magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c` or an HTTP URL pointing to a `.torrent` file. It can also take a `config` argument which will craft a `settings.json` configuration file and give it to `transmission`, the underlying program that is performing the fetch. The available config options for `transmission` can be found [here](https://github.com/transmission/transmission/blob/main/docs/Editing-Configuration-Files.md#options)
```
{ fetchFromBittorrent }:
fetchFromBittorrent {
config = { peer-limit-global = 100; };
url = "magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c";
sha256 = "";
}
```
### Parameters {#fetchfrombittorrent-parameters}
- `url`: Magnet URI (Magnet Link) such as `magnet:?xt=urn:btih:dd8255ecdc7ca55fb0bbf81323d87062db1f6d1c` or an HTTP URL pointing to a `.torrent` file.
- `backend`: Which bittorrent program to use. Default: `"transmission"`. Valid values are `"rqbit"` or `"transmission"`. These are the two most suitable torrent clients for fetching in a fixed-output derivation at the time of writing, as they can be easily exited after usage. `rqbit` is written in Rust and has a smaller closure size than `transmission`, and the performance and peer discovery properties differs between these clients, requiring experimentation to decide upon which is the best.
- `config`: When using `transmission` as the `backend`, a json configuration can
be supplied to transmission. Refer to the [upstream documentation](https://github.com/transmission/transmission/blob/main/docs/Editing-Configuration-Files.md) for information on how to configure.

View file

@ -157,3 +157,17 @@ in the example below and rebuild.
You may make any other changes to your VM in this attribute set. For example, You may make any other changes to your VM in this attribute set. For example,
you could enable Docker or X11 forwarding to your Darwin host. you could enable Docker or X11 forwarding to your Darwin host.
## Troubleshooting the generated configuration {#sec-darwin-builder-troubleshoot}
The `linux-builder` package exposes the attributes `nixosConfig` and `nixosOptions` that allow you to inspect the generated NixOS configuration in the `nix repl`. For example:
```
$ nix repl --file ~/src/nixpkgs --argstr system aarch64-darwin
nix-repl> darwin.linux-builder.nixosConfig.nix.package
«derivation /nix/store/...-nix-2.17.0.drv»
nix-repl> :p darwin.linux-builder.nixosOptions.virtualisation.memorySize.definitionsWithLocations
[ { file = "/home/user/src/nixpkgs/nixos/modules/profiles/macos-builder.nix"; value = 3072; } ]
```

View file

@ -817,7 +817,7 @@ $ cargo test
## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains} ## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains}
::: {.note} ::: {.note}
Note: The following projects cannot be used within nixpkgs since [IFD](#ssec-import-from-derivation) is disallowed. The following projects cannot be used within Nixpkgs since [Import From Derivation](https://nixos.org/manual/nix/unstable/language/import-from-derivation) (IFD) is disallowed in Nixpkgs.
To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack. To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack.
::: :::

View file

@ -542,6 +542,36 @@ rec {
attrs: attrs:
map (name: f name attrs.${name}) (attrNames attrs); map (name: f name attrs.${name}) (attrNames attrs);
/*
Deconstruct an attrset to a list of name-value pairs as expected by [`builtins.listToAttrs`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-listToAttrs).
Each element of the resulting list is an attribute set with these attributes:
- `name` (string): The name of the attribute
- `value` (any): The value of the attribute
The following is always true:
```nix
builtins.listToAttrs (attrsToList attrs) == attrs
```
:::{.warning}
The opposite is not always true. In general expect that
```nix
attrsToList (builtins.listToAttrs list) != list
```
This is because the `listToAttrs` removes duplicate names and doesn't preserve the order of the list.
:::
Example:
attrsToList { foo = 1; bar = "asdf"; }
=> [ { name = "bar"; value = "asdf"; } { name = "foo"; value = 1; } ]
Type:
attrsToList :: AttrSet -> [ { name :: String; value :: Any; } ]
*/
attrsToList = mapAttrsToList nameValuePair;
/* Like `mapAttrs`, except that it recursively applies itself to /* Like `mapAttrs`, except that it recursively applies itself to
the *leaf* attributes of a potentially-nested attribute set: the *leaf* attributes of a potentially-nested attribute set:

View file

@ -69,8 +69,8 @@ rec {
"<pkg>.overrideDerivation" to learn about `overrideDerivation` and caveats "<pkg>.overrideDerivation" to learn about `overrideDerivation` and caveats
related to its use. related to its use.
*/ */
makeOverridable = f: origArgs: makeOverridable = f: lib.setFunctionArgs
let (origArgs: let
result = f origArgs; result = f origArgs;
# Creates a functor with the same arguments as f # Creates a functor with the same arguments as f
@ -95,7 +95,8 @@ rec {
lib.setFunctionArgs result (lib.functionArgs result) // { lib.setFunctionArgs result (lib.functionArgs result) // {
override = overrideArgs; override = overrideArgs;
} }
else result; else result)
(lib.functionArgs f);
/* Call the package function in the file `fn` with the required /* Call the package function in the file `fn` with the required

View file

@ -81,8 +81,8 @@ let
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
mapAttrs' mapAttrsToList concatMapAttrs mapAttrsRecursive mapAttrsRecursiveCond mapAttrs' mapAttrsToList attrsToList concatMapAttrs mapAttrsRecursive
genAttrs isDerivation toDerivation optionalAttrs mapAttrsRecursiveCond genAttrs isDerivation toDerivation optionalAttrs
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
recursiveUpdate matchAttrs overrideExisting showAttrPath getOutput getBin recursiveUpdate matchAttrs overrideExisting showAttrPath getOutput getBin
getLib getDev getMan chooseDevOutputs zipWithNames zip getLib getDev getMan chooseDevOutputs zipWithNames zip

View file

@ -58,7 +58,8 @@ An attribute set with these values:
- `_internalBase` (path): - `_internalBase` (path):
Any files outside of this path cannot influence the set of files. Any files outside of this path cannot influence the set of files.
This is always a directory. This is always a directory and should be as long as possible.
This is used by `lib.fileset.toSource` to check that all files are under the `root` argument
- `_internalBaseRoot` (path): - `_internalBaseRoot` (path):
The filesystem root of `_internalBase`, same as `(lib.path.splitRoot _internalBase).root`. The filesystem root of `_internalBase`, same as `(lib.path.splitRoot _internalBase).root`.
@ -143,9 +144,37 @@ Arguments:
- (-) Leaves us with no identity element for `union` and no reasonable return value for `unions []`. - (-) Leaves us with no identity element for `union` and no reasonable return value for `unions []`.
From a set theory perspective, which has a well-known notion of empty sets, this is unintuitive. From a set theory perspective, which has a well-known notion of empty sets, this is unintuitive.
### No intersection for lists
While there is `intersection a b`, there is no function `intersections [ a b c ]`.
Arguments:
- (+) There is no known use case for such a function, it can be added later if a use case arises
- (+) There is no suitable return value for `intersections [ ]`, see also "Nullary intersections" [here](https://en.wikipedia.org/w/index.php?title=List_of_set_identities_and_relations&oldid=1177174035#Definitions)
- (-) Could throw an error for that case
- (-) Create a special value to represent "all the files" and return that
- (+) Such a value could then not be used with `fileFilter` unless the internal representation is changed considerably
- (-) Could return the empty file set
- (+) This would be wrong in set theory
- (-) Inconsistent with `union` and `unions`
### Intersection base path
The base path of the result of an `intersection` is the longest base path of the arguments.
E.g. the base path of `intersection ./foo ./foo/bar` is `./foo/bar`.
Meanwhile `intersection ./foo ./bar` returns the empty file set without a base path.
Arguments:
- Alternative: Use the common prefix of all base paths as the resulting base path
- (-) This is unnecessarily strict, because the purpose of the base path is to track the directory under which files _could_ be in the file set. It should be as long as possible.
All files contained in `intersection ./foo ./foo/bar` will be under `./foo/bar` (never just under `./foo`), and `intersection ./foo ./bar` will never contain any files (never under `./.`).
This would lead to `toSource` having to unexpectedly throw errors for cases such as `toSource { root = ./foo; fileset = intersect ./foo base; }`, where `base` may be `./bar` or `./.`.
- (-) There is no benefit to the user, since base path is not directly exposed in the interface
### Empty directories ### Empty directories
File sets can only represent a _set_ of local files, directories on their own are not representable. File sets can only represent a _set_ of local files.
Directories on their own are not representable.
Arguments: Arguments:
- (+) There does not seem to be a sensible set of combinators when directories can be represented on their own. - (+) There does not seem to be a sensible set of combinators when directories can be represented on their own.
@ -161,7 +190,7 @@ Arguments:
- `./.` represents all files in `./.` _and_ the directory itself, but not its subdirectories, meaning that at least `./.` will be preserved even if it's empty. - `./.` represents all files in `./.` _and_ the directory itself, but not its subdirectories, meaning that at least `./.` will be preserved even if it's empty.
In that case, `intersect ./. ./foo` should only include files and no directories themselves, since `./.` includes only `./.` as a directory, and same for `./foo`, so there's no overlap in directories. In that case, `intersection ./. ./foo` should only include files and no directories themselves, since `./.` includes only `./.` as a directory, and same for `./foo`, so there's no overlap in directories.
But intuitively this operation should result in the same as `./foo` everything else is just confusing. But intuitively this operation should result in the same as `./foo` everything else is just confusing.
- (+) This matches how Git only supports files, so developers should already be used to it. - (+) This matches how Git only supports files, so developers should already be used to it.
- (-) Empty directories (even if they contain nested directories) are neither representable nor preserved when coercing from paths. - (-) Empty directories (even if they contain nested directories) are neither representable nor preserved when coercing from paths.
@ -176,7 +205,7 @@ File sets do not support Nix store paths in strings such as `"/nix/store/...-sou
Arguments: Arguments:
- (+) Such paths are usually produced by derivations, which means `toSource` would either: - (+) Such paths are usually produced by derivations, which means `toSource` would either:
- Require IFD if `builtins.path` is used as the underlying primitive - Require [Import From Derivation](https://nixos.org/manual/nix/unstable/language/import-from-derivation) (IFD) if `builtins.path` is used as the underlying primitive
- Require importing the entire `root` into the store such that derivations can be used to do the filtering - Require importing the entire `root` into the store such that derivations can be used to do the filtering
- (+) The convenient path coercion like `union ./foo ./bar` wouldn't work for absolute paths, requiring more verbose alternate interfaces: - (+) The convenient path coercion like `union ./foo ./bar` wouldn't work for absolute paths, requiring more verbose alternate interfaces:
- `let root = "/nix/store/...-source"; in union "${root}/foo" "${root}/bar"` - `let root = "/nix/store/...-source"; in union "${root}/foo" "${root}/bar"`

View file

@ -7,6 +7,7 @@ let
_toSourceFilter _toSourceFilter
_unionMany _unionMany
_printFileset _printFileset
_intersection
; ;
inherit (builtins) inherit (builtins)
@ -18,6 +19,7 @@ let
; ;
inherit (lib.lists) inherit (lib.lists)
elemAt
imap0 imap0
; ;
@ -276,6 +278,45 @@ If a directory does not recursively contain any file, it is omitted from the sto
_unionMany _unionMany
]; ];
/*
The file set containing all files that are in both of two given file sets.
See also [Intersection (set theory)](https://en.wikipedia.org/wiki/Intersection_(set_theory)).
The given file sets are evaluated as lazily as possible,
with the first argument being evaluated first if needed.
Type:
intersection :: FileSet -> FileSet -> FileSet
Example:
# Limit the selected files to the ones in ./., so only ./src and ./Makefile
intersection ./. (unions [ ../LICENSE ./src ./Makefile ])
*/
intersection =
# The first file set.
# This argument can also be a path,
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
fileset1:
# The second file set.
# This argument can also be a path,
# which gets [implicitly coerced to a file set](#sec-fileset-path-coercion).
fileset2:
let
filesets = _coerceMany "lib.fileset.intersection" [
{
context = "first argument";
value = fileset1;
}
{
context = "second argument";
value = fileset2;
}
];
in
_intersection
(elemAt filesets 0)
(elemAt filesets 1);
/* /*
Incrementally evaluate and trace a file set in a pretty way. Incrementally evaluate and trace a file set in a pretty way.
This function is only intended for debugging purposes. This function is only intended for debugging purposes.

View file

@ -172,11 +172,11 @@ rec {
else if ! isPath value then else if ! isPath value then
if isStringLike value then if isStringLike value then
throw '' throw ''
${context} ("${toString value}") is a string-like value, but it should be a path instead. ${context} ("${toString value}") is a string-like value, but it should be a file set or a path instead.
Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'' Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.''
else else
throw '' throw ''
${context} is of type ${typeOf value}, but it should be a path instead.'' ${context} is of type ${typeOf value}, but it should be a file set or a path instead.''
else if ! pathExists value then else if ! pathExists value then
throw '' throw ''
${context} (${toString value}) does not exist.'' ${context} (${toString value}) does not exist.''
@ -461,6 +461,43 @@ rec {
else else
nonEmpty; nonEmpty;
# Transforms the filesetTree of a file set to a shorter base path, e.g.
# _shortenTreeBase [ "foo" ] (_create /foo/bar null)
# => { bar = null; }
_shortenTreeBase = targetBaseComponents: fileset:
let
recurse = index:
# If we haven't reached the required depth yet
if index < length fileset._internalBaseComponents then
# Create an attribute set and recurse as the value, this can be lazily evaluated this way
{ ${elemAt fileset._internalBaseComponents index} = recurse (index + 1); }
else
# Otherwise we reached the appropriate depth, here's the original tree
fileset._internalTree;
in
recurse (length targetBaseComponents);
# Transforms the filesetTree of a file set to a longer base path, e.g.
# _lengthenTreeBase [ "foo" "bar" ] (_create /foo { bar.baz = "regular"; })
# => { baz = "regular"; }
_lengthenTreeBase = targetBaseComponents: fileset:
let
recurse = index: tree:
# If the filesetTree is an attribute set and we haven't reached the required depth yet
if isAttrs tree && index < length targetBaseComponents then
# Recurse with the tree under the right component (which might not exist)
recurse (index + 1) (tree.${elemAt targetBaseComponents index} or null)
else
# For all values here we can just return the tree itself:
# tree == null -> the result is also null, everything is excluded
# tree == "directory" -> the result is also "directory",
# because the base path is always a directory and everything is included
# isAttrs tree -> the result is `tree`
# because we don't need to recurse any more since `index == length longestBaseComponents`
tree;
in
recurse (length fileset._internalBaseComponents) fileset._internalTree;
# Computes the union of a list of filesets. # Computes the union of a list of filesets.
# The filesets must already be coerced and validated to be in the same filesystem root # The filesets must already be coerced and validated to be in the same filesystem root
# Type: [ Fileset ] -> Fileset # Type: [ Fileset ] -> Fileset
@ -497,11 +534,7 @@ rec {
# So the tree under `/foo/bar` gets nested under `{ bar = ...; ... }`, # So the tree under `/foo/bar` gets nested under `{ bar = ...; ... }`,
# while the tree under `/foo/baz` gets nested under `{ baz = ...; ... }` # while the tree under `/foo/baz` gets nested under `{ baz = ...; ... }`
# Therefore allowing combined operations over them. # Therefore allowing combined operations over them.
trees = map (fileset: trees = map (_shortenTreeBase commonBaseComponents) filesetsWithBase;
setAttrByPath
(drop (length commonBaseComponents) fileset._internalBaseComponents)
fileset._internalTree
) filesetsWithBase;
# Folds all trees together into a single one using _unionTree # Folds all trees together into a single one using _unionTree
# We do not use a fold here because it would cause a thunk build-up # We do not use a fold here because it would cause a thunk build-up
@ -533,4 +566,76 @@ rec {
# The non-null elements have to be attribute sets representing partial trees # The non-null elements have to be attribute sets representing partial trees
# We need to recurse into those # We need to recurse into those
zipAttrsWith (name: _unionTrees) withoutNull; zipAttrsWith (name: _unionTrees) withoutNull;
# Computes the intersection of a list of filesets.
# The filesets must already be coerced and validated to be in the same filesystem root
# Type: Fileset -> Fileset -> Fileset
_intersection = fileset1: fileset2:
let
# The common base components prefix, e.g.
# (/foo/bar, /foo/bar/baz) -> /foo/bar
# (/foo/bar, /foo/baz) -> /foo
commonBaseComponentsLength =
# TODO: Have a `lib.lists.commonPrefixLength` function such that we don't need the list allocation from commonPrefix here
length (
commonPrefix
fileset1._internalBaseComponents
fileset2._internalBaseComponents
);
# To be able to intersect filesetTree's together, they need to have the same base path.
# Base paths can be intersected by taking the longest one (if any)
# The fileset with the longest base, if any, e.g.
# (/foo/bar, /foo/bar/baz) -> /foo/bar/baz
# (/foo/bar, /foo/baz) -> null
longestBaseFileset =
if commonBaseComponentsLength == length fileset1._internalBaseComponents then
# The common prefix is the same as the first path, so the second path is equal or longer
fileset2
else if commonBaseComponentsLength == length fileset2._internalBaseComponents then
# The common prefix is the same as the second path, so the first path is longer
fileset1
else
# The common prefix is neither the first nor the second path
# This means there's no overlap between the two sets
null;
# Whether the result should be the empty value without a base
resultIsEmptyWithoutBase =
# If either fileset is the empty fileset without a base, the intersection is too
fileset1._internalIsEmptyWithoutBase
|| fileset2._internalIsEmptyWithoutBase
# If there is no overlap between the base paths
|| longestBaseFileset == null;
# Lengthen each fileset's tree to the longest base prefix
tree1 = _lengthenTreeBase longestBaseFileset._internalBaseComponents fileset1;
tree2 = _lengthenTreeBase longestBaseFileset._internalBaseComponents fileset2;
# With two filesetTree's with the same base, we can compute their intersection
resultTree = _intersectTree tree1 tree2;
in
if resultIsEmptyWithoutBase then
_emptyWithoutBase
else
_create longestBaseFileset._internalBase resultTree;
# The intersection of two filesetTree's with the same base path
# The second element is only evaluated as much as necessary.
# Type: filesetTree -> filesetTree -> filesetTree
_intersectTree = lhs: rhs:
if isAttrs lhs && isAttrs rhs then
# Both sides are attribute sets, we can recurse for the attributes existing on both sides
mapAttrs
(name: _intersectTree lhs.${name})
(builtins.intersectAttrs lhs rhs)
else if lhs == null || isString rhs then
# If the lhs is null, the result should also be null
# And if the rhs is the identity element
# (a string, aka it includes everything), then it's also the lhs
lhs
else
# In all other cases it's the rhs
rhs;
} }

View file

@ -355,8 +355,8 @@ expectFailure 'toSource { root = ./a; fileset = ./.; }' 'lib.fileset.toSource: `
rm -rf * rm -rf *
# Path coercion only works for paths # Path coercion only works for paths
expectFailure 'toSource { root = ./.; fileset = 10; }' 'lib.fileset.toSource: `fileset` is of type int, but it should be a path instead.' expectFailure 'toSource { root = ./.; fileset = 10; }' 'lib.fileset.toSource: `fileset` is of type int, but it should be a file set or a path instead.'
expectFailure 'toSource { root = ./.; fileset = "/some/path"; }' 'lib.fileset.toSource: `fileset` \("/some/path"\) is a string-like value, but it should be a path instead. expectFailure 'toSource { root = ./.; fileset = "/some/path"; }' 'lib.fileset.toSource: `fileset` \("/some/path"\) is a string-like value, but it should be a file set or a path instead.
\s*Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.' \s*Paths represented as strings are not supported by `lib.fileset`, use `lib.sources` or derivations instead.'
# Path coercion errors for non-existent paths # Path coercion errors for non-existent paths
@ -587,6 +587,97 @@ done
# So, just using 1000 files for now. # So, just using 1000 files for now.
checkFileset 'unions (mapAttrsToList (name: _: ./. + "/${name}/a") (builtins.readDir ./.))' checkFileset 'unions (mapAttrsToList (name: _: ./. + "/${name}/a") (builtins.readDir ./.))'
## lib.fileset.intersection
# Different filesystem roots in root and fileset are not supported
mkdir -p {foo,bar}/mock-root
expectFailure 'with ((import <nixpkgs/lib>).extend (import <nixpkgs/lib/fileset/mock-splitRoot.nix>)).fileset;
toSource { root = ./.; fileset = intersection ./foo/mock-root ./bar/mock-root; }
' 'lib.fileset.intersection: Filesystem roots are not the same:
\s*first argument: root "'"$work"'/foo/mock-root"
\s*second argument: root "'"$work"'/bar/mock-root"
\s*Different roots are not supported.'
rm -rf -- *
# Coercion errors show the correct context
expectFailure 'toSource { root = ./.; fileset = intersection ./a ./.; }' 'lib.fileset.intersection: first argument \('"$work"'/a\) does not exist.'
expectFailure 'toSource { root = ./.; fileset = intersection ./. ./b; }' 'lib.fileset.intersection: second argument \('"$work"'/b\) does not exist.'
# The tree of later arguments should not be evaluated if a former argument already excludes all files
tree=(
[a]=0
)
checkFileset 'intersection _emptyWithoutBase (_create ./. (abort "This should not be used!"))'
# We don't have any combinators that can explicitly remove files yet, so we need to rely on internal functions to test this for now
checkFileset 'intersection (_create ./. { a = null; }) (_create ./. { a = abort "This should not be used!"; })'
# If either side is empty, the result is empty
tree=(
[a]=0
)
checkFileset 'intersection _emptyWithoutBase _emptyWithoutBase'
checkFileset 'intersection _emptyWithoutBase (_create ./. null)'
checkFileset 'intersection (_create ./. null) _emptyWithoutBase'
checkFileset 'intersection (_create ./. null) (_create ./. null)'
# If the intersection base paths are not overlapping, the result is empty and has no base path
mkdir a b c
touch {a,b,c}/x
expectEqual 'toSource { root = ./c; fileset = intersection ./a ./b; }' 'toSource { root = ./c; fileset = _emptyWithoutBase; }'
rm -rf -- *
# If the intersection exists, the resulting base path is the longest of them
mkdir a
touch x a/b
expectEqual 'toSource { root = ./a; fileset = intersection ./a ./.; }' 'toSource { root = ./a; fileset = ./a; }'
expectEqual 'toSource { root = ./a; fileset = intersection ./. ./a; }' 'toSource { root = ./a; fileset = ./a; }'
rm -rf -- *
# Also finds the intersection with null'd filesetTree's
tree=(
[a]=0
[b]=1
[c]=0
)
checkFileset 'intersection (_create ./. { a = "regular"; b = "regular"; c = null; }) (_create ./. { a = null; b = "regular"; c = "regular"; })'
# Actually computes the intersection between files
tree=(
[a]=0
[b]=0
[c]=1
[d]=1
[e]=0
[f]=0
)
checkFileset 'intersection (unions [ ./a ./b ./c ./d ]) (unions [ ./c ./d ./e ./f ])'
tree=(
[a/x]=0
[a/y]=0
[b/x]=1
[b/y]=1
[c/x]=0
[c/y]=0
)
checkFileset 'intersection ./b ./.'
checkFileset 'intersection ./b (unions [ ./a/x ./a/y ./b/x ./b/y ./c/x ./c/y ])'
# Complicated case
tree=(
[a/x]=0
[a/b/i]=1
[c/d/x]=0
[c/d/f]=1
[c/x]=0
[c/e/i]=1
[c/e/j]=1
)
checkFileset 'intersection (unions [ ./a/b ./c/d ./c/e ]) (unions [ ./a ./c/d/f ./c/e ])'
## Tracing ## Tracing
# The second trace argument is returned # The second trace argument is returned
@ -609,6 +700,10 @@ rm -rf -- *
# The empty file set without a base also prints as empty # The empty file set without a base also prints as empty
expectTrace '_emptyWithoutBase' '(empty)' expectTrace '_emptyWithoutBase' '(empty)'
expectTrace 'unions [ ]' '(empty)' expectTrace 'unions [ ]' '(empty)'
mkdir foo bar
touch {foo,bar}/x
expectTrace 'intersection ./foo ./bar' '(empty)'
rm -rf -- *
# If a directory is fully included, print it as such # If a directory is fully included, print it as such
touch a touch a

View file

@ -1,26 +1,76 @@
{ lib, ... }: { lib, ... }:
rec { rec {
/* /*
Compute the fixed point of the given function `f`, which is usually an `fix f` computes the fixed point of the given function `f`. In other words, the return value is `x` in `x = f x`.
attribute set that expects its final, non-recursive representation as an
argument:
``` `f` must be a lazy function.
f = self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; } This means that `x` must be a value that can be partially evaluated,
such as an attribute set, a list, or a function.
This way, `f` can use one part of `x` to compute another part.
**Relation to syntactic recursion**
This section explains `fix` by refactoring from syntactic recursion to a call of `fix` instead.
For context, Nix lets you define attributes in terms of other attributes syntactically using the [`rec { }` syntax](https://nixos.org/manual/nix/stable/language/constructs.html#recursive-sets).
```nix
nix-repl> rec {
foo = "foo";
bar = "bar";
foobar = foo + bar;
}
{ bar = "bar"; foo = "foo"; foobar = "foobar"; }
``` ```
Nix evaluates this recursion until all references to `self` have been This is convenient when constructing a value to pass to a function for example,
resolved. At that point, the final result is returned and `f x = x` holds: but an equivalent effect can be achieved with the `let` binding syntax:
```nix
nix-repl> let self = {
foo = "foo";
bar = "bar";
foobar = self.foo + self.bar;
}; in self
{ bar = "bar"; foo = "foo"; foobar = "foobar"; }
``` ```
But in general you can get more reuse out of `let` bindings by refactoring them to a function.
```nix
nix-repl> f = self: {
foo = "foo";
bar = "bar";
foobar = self.foo + self.bar;
}
```
This is where `fix` comes in, it contains the syntactic that's not in `f` anymore.
```nix
nix-repl> fix = f:
let self = f self; in self;
```
By applying `fix` we get the final result.
```nix
nix-repl> fix f nix-repl> fix f
{ bar = "bar"; foo = "foo"; foobar = "foobar"; } { bar = "bar"; foo = "foo"; foobar = "foobar"; }
``` ```
Such a refactored `f` using `fix` is not useful by itself.
See [`extends`](#function-library-lib.fixedPoints.extends) for an example use case.
There `self` is also often called `final`.
Type: fix :: (a -> a) -> a Type: fix :: (a -> a) -> a
See https://en.wikipedia.org/wiki/Fixed-point_combinator for further Example:
details. fix (self: { foo = "foo"; bar = "bar"; foobar = self.foo + self.bar; })
=> { bar = "bar"; foo = "foo"; foobar = "foobar"; }
fix (self: [ 1 2 (elemAt self 0 + elemAt self 1) ])
=> [ 1 2 3 ]
*/ */
fix = f: let x = f x; in x; fix = f: let x = f x; in x;

View file

@ -109,7 +109,13 @@ rec {
The package is specified in the third argument under `default` as a list of strings The package is specified in the third argument under `default` as a list of strings
representing its attribute path in nixpkgs (or another package set). representing its attribute path in nixpkgs (or another package set).
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument. Because of this, you need to pass nixpkgs itself (usually `pkgs` in a module;
alternatively to nixpkgs itself, another package set) as the first argument.
If you pass another package set you should set the `pkgsText` option.
This option is used to display the expression for the package set. It is `"pkgs"` by default.
If your expression is complex you should parenthesize it, as the `pkgsText` argument
is usually immediately followed by an attribute lookup (`.`).
The second argument may be either a string or a list of strings. The second argument may be either a string or a list of strings.
It provides the display name of the package in the description of the generated option It provides the display name of the package in the description of the generated option
@ -118,68 +124,100 @@ rec {
To include extra information in the description, pass `extraDescription` to To include extra information in the description, pass `extraDescription` to
append arbitrary text to the generated description. append arbitrary text to the generated description.
You can also pass an `example` value, either a literal string or an attribute path. You can also pass an `example` value, either a literal string or an attribute path.
The default argument can be omitted if the provided name is The `default` argument can be omitted if the provided name is
an attribute of pkgs (if name is a string) or a an attribute of pkgs (if `name` is a string) or a valid attribute path in pkgs (if `name` is a list).
valid attribute path in pkgs (if name is a list). You can also set `default` to just a string in which case it is interpreted as an attribute name
(a singleton attribute path, if you will).
If you wish to explicitly provide no default, pass `null` as `default`. If you wish to explicitly provide no default, pass `null` as `default`.
Type: mkPackageOption :: pkgs -> (string|[string]) -> { default? :: [string], example? :: null|string|[string], extraDescription? :: string } -> option If you want users to be able to set no package, pass `nullable = true`.
In this mode a `default = null` will not be interpreted as no default and is interpreted literally.
Type: mkPackageOption :: pkgs -> (string|[string]) -> { nullable? :: bool, default? :: string|[string], example? :: null|string|[string], extraDescription? :: string, pkgsText? :: string } -> option
Example: Example:
mkPackageOption pkgs "hello" { } mkPackageOption pkgs "hello" { }
=> { _type = "option"; default = «derivation /nix/store/3r2vg51hlxj3cx5vscp0vkv60bqxkaq0-hello-2.10.drv»; defaultText = { ... }; description = "The hello package to use."; type = { ... }; } => { ...; default = pkgs.hello; defaultText = literalExpression "pkgs.hello"; description = "The hello package to use."; type = package; }
Example: Example:
mkPackageOption pkgs "GHC" { mkPackageOption pkgs "GHC" {
default = [ "ghc" ]; default = [ "ghc" ];
example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])"; example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
} }
=> { _type = "option"; default = «derivation /nix/store/jxx55cxsjrf8kyh3fp2ya17q99w7541r-ghc-8.10.7.drv»; defaultText = { ... }; description = "The GHC package to use."; example = { ... }; type = { ... }; } => { ...; default = pkgs.ghc; defaultText = literalExpression "pkgs.ghc"; description = "The GHC package to use."; example = literalExpression "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])"; type = package; }
Example: Example:
mkPackageOption pkgs [ "python39Packages" "pytorch" ] { mkPackageOption pkgs [ "python3Packages" "pytorch" ] {
extraDescription = "This is an example and doesn't actually do anything."; extraDescription = "This is an example and doesn't actually do anything.";
} }
=> { _type = "option"; default = «derivation /nix/store/gvqgsnc4fif9whvwd9ppa568yxbkmvk8-python3.9-pytorch-1.10.2.drv»; defaultText = { ... }; description = "The pytorch package to use. This is an example and doesn't actually do anything."; type = { ... }; } => { ...; default = pkgs.python3Packages.pytorch; defaultText = literalExpression "pkgs.python3Packages.pytorch"; description = "The pytorch package to use. This is an example and doesn't actually do anything."; type = package; }
Example:
mkPackageOption pkgs "nushell" {
nullable = true;
}
=> { ...; default = pkgs.nushell; defaultText = literalExpression "pkgs.nushell"; description = "The nushell package to use."; type = nullOr package; }
Example:
mkPackageOption pkgs "coreutils" {
default = null;
}
=> { ...; description = "The coreutils package to use."; type = package; }
Example:
mkPackageOption pkgs "dbus" {
nullable = true;
default = null;
}
=> { ...; default = null; description = "The dbus package to use."; type = nullOr package; }
Example:
mkPackageOption pkgs.javaPackages "OpenJFX" {
default = "openjfx20";
pkgsText = "pkgs.javaPackages";
}
=> { ...; default = pkgs.javaPackages.openjfx20; defaultText = literalExpression "pkgs.javaPackages.openjfx20"; description = "The OpenJFX package to use."; type = package; }
*/ */
mkPackageOption = mkPackageOption =
# Package set (a specific version of nixpkgs or a subset) # Package set (an instantiation of nixpkgs such as pkgs in modules or another package set)
pkgs: pkgs:
# Name for the package, shown in option description # Name for the package, shown in option description
name: name:
{ {
# Whether the package can be null, for example to disable installing a package altogether. # Whether the package can be null, for example to disable installing a package altogether (defaults to false)
nullable ? false, nullable ? false,
# The attribute path where the default package is located (may be omitted) # The attribute path where the default package is located (may be omitted, in which case it is copied from `name`)
default ? name, default ? name,
# A string or an attribute path to use as an example (may be omitted) # A string or an attribute path to use as an example (may be omitted)
example ? null, example ? null,
# Additional text to include in the option description (may be omitted) # Additional text to include in the option description (may be omitted)
extraDescription ? "", extraDescription ? "",
# Representation of the package set passed as pkgs (defaults to `"pkgs"`)
pkgsText ? "pkgs"
}: }:
let let
name' = if isList name then last name else name; name' = if isList name then last name else name;
in mkOption ({ default' = if isList default then default else [ default ];
type = with lib.types; (if nullable then nullOr else lib.id) package; defaultText = concatStringsSep "." default';
defaultValue = attrByPath default'
(throw "${defaultText} cannot be found in ${pkgsText}") pkgs;
defaults = if default != null then {
default = defaultValue;
defaultText = literalExpression ("${pkgsText}." + defaultText);
} else optionalAttrs nullable {
default = null;
};
in mkOption (defaults // {
description = "The ${name'} package to use." description = "The ${name'} package to use."
+ (if extraDescription == "" then "" else " ") + extraDescription; + (if extraDescription == "" then "" else " ") + extraDescription;
} // (if default != null then let type = with lib.types; (if nullable then nullOr else lib.id) package;
default' = if isList default then default else [ default ]; } // optionalAttrs (example != null) {
defaultPath = concatStringsSep "." default';
defaultValue = attrByPath default'
(throw "${defaultPath} cannot be found in pkgs") pkgs;
in {
default = defaultValue;
defaultText = literalExpression ("pkgs." + defaultPath);
} else if nullable then {
default = null;
} else { }) // lib.optionalAttrs (example != null) {
example = literalExpression example = literalExpression
(if isList example then "pkgs." + concatStringsSep "." example else example); (if isList example then "${pkgsText}." + concatStringsSep "." example else example);
}); });
/* Alias of mkPackageOption. Previously used to create options with markdown /* Alias of mkPackageOption. Previously used to create options with markdown

View file

@ -854,7 +854,7 @@ rec {
assert (lib.isBool flag); assert (lib.isBool flag);
mesonOption feature (if flag then "enabled" else "disabled"); mesonOption feature (if flag then "enabled" else "disabled");
/* Create an --{enable,disable}-<feat> string that can be passed to /* Create an --{enable,disable}-<feature> string that can be passed to
standard GNU Autoconf scripts. standard GNU Autoconf scripts.
Example: Example:
@ -863,11 +863,12 @@ rec {
enableFeature false "shared" enableFeature false "shared"
=> "--disable-shared" => "--disable-shared"
*/ */
enableFeature = enable: feat: enableFeature = flag: feature:
assert isString feat; # e.g. passing openssl instead of "openssl" assert lib.isBool flag;
"--${if enable then "enable" else "disable"}-${feat}"; assert lib.isString feature; # e.g. passing openssl instead of "openssl"
"--${if flag then "enable" else "disable"}-${feature}";
/* Create an --{enable-<feat>=<value>,disable-<feat>} string that can be passed to /* Create an --{enable-<feature>=<value>,disable-<feature>} string that can be passed to
standard GNU Autoconf scripts. standard GNU Autoconf scripts.
Example: Example:
@ -876,9 +877,10 @@ rec {
enableFeatureAs false "shared" (throw "ignored") enableFeatureAs false "shared" (throw "ignored")
=> "--disable-shared" => "--disable-shared"
*/ */
enableFeatureAs = enable: feat: value: enableFeature enable feat + optionalString enable "=${value}"; enableFeatureAs = flag: feature: value:
enableFeature flag feature + optionalString flag "=${value}";
/* Create an --{with,without}-<feat> string that can be passed to /* Create an --{with,without}-<feature> string that can be passed to
standard GNU Autoconf scripts. standard GNU Autoconf scripts.
Example: Example:
@ -887,11 +889,11 @@ rec {
withFeature false "shared" withFeature false "shared"
=> "--without-shared" => "--without-shared"
*/ */
withFeature = with_: feat: withFeature = flag: feature:
assert isString feat; # e.g. passing openssl instead of "openssl" assert isString feature; # e.g. passing openssl instead of "openssl"
"--${if with_ then "with" else "without"}-${feat}"; "--${if flag then "with" else "without"}-${feature}";
/* Create an --{with-<feat>=<value>,without-<feat>} string that can be passed to /* Create an --{with-<feature>=<value>,without-<feature>} string that can be passed to
standard GNU Autoconf scripts. standard GNU Autoconf scripts.
Example: Example:
@ -900,7 +902,8 @@ rec {
withFeatureAs false "shared" (throw "ignored") withFeatureAs false "shared" (throw "ignored")
=> "--without-shared" => "--without-shared"
*/ */
withFeatureAs = with_: feat: value: withFeature with_ feat + optionalString with_ "=${value}"; withFeatureAs = flag: feature: value:
withFeature flag feature + optionalString flag "=${value}";
/* Create a fixed width string with additional prefix to match /* Create a fixed width string with additional prefix to match
required width. required width.

View file

@ -20,6 +20,10 @@ let
expr = (builtins.tryEval (builtins.seq expr "didn't throw")); expr = (builtins.tryEval (builtins.seq expr "didn't throw"));
expected = { success = false; value = false; }; expected = { success = false; value = false; };
}; };
testingEval = expr: {
expr = (builtins.tryEval expr).success;
expected = true;
};
testingDeepThrow = expr: testingThrow (builtins.deepSeq expr expr); testingDeepThrow = expr: testingThrow (builtins.deepSeq expr expr);
testSanitizeDerivationName = { name, expected }: testSanitizeDerivationName = { name, expected }:
@ -39,6 +43,18 @@ in
runTests { runTests {
# CUSTOMIZATION
testFunctionArgsMakeOverridable = {
expr = functionArgs (makeOverridable ({ a, b, c ? null}: {}));
expected = { a = false; b = false; c = true; };
};
testFunctionArgsMakeOverridableOverride = {
expr = functionArgs (makeOverridable ({ a, b, c ? null }: {}) { a = 1; b = 2; }).override;
expected = { a = false; b = false; c = true; };
};
# TRIVIAL # TRIVIAL
testId = { testId = {
@ -816,6 +832,26 @@ runTests {
expected = { a = 1; b = 2; }; expected = { a = 1; b = 2; };
}; };
testListAttrsReverse = let
exampleAttrs = {foo=1; bar="asdf"; baz = [1 3 3 7]; fnord=null;};
exampleSingletonList = [{name="foo"; value=1;}];
in {
expr = {
isReverseToListToAttrs = builtins.listToAttrs (attrsToList exampleAttrs) == exampleAttrs;
isReverseToAttrsToList = attrsToList (builtins.listToAttrs exampleSingletonList) == exampleSingletonList;
testDuplicatePruningBehaviour = attrsToList (builtins.listToAttrs [{name="a"; value=2;} {name="a"; value=1;}]);
};
expected = {
isReverseToAttrsToList = true;
isReverseToListToAttrs = true;
testDuplicatePruningBehaviour = [{name="a"; value=2;}];
};
};
testAttrsToListsCanDealWithFunctions = testingEval (
attrsToList { someFunc= a: a + 1;}
);
# GENERATORS # GENERATORS
# these tests assume attributes are converted to lists # these tests assume attributes are converted to lists
# in alphabetical order # in alphabetical order

View file

@ -227,8 +227,16 @@ checkConfigOutput '^false$' config.enableAlias ./alias-with-priority-can-overrid
# Check mkPackageOption # Check mkPackageOption
checkConfigOutput '^"hello"$' config.package.pname ./declare-mkPackageOption.nix checkConfigOutput '^"hello"$' config.package.pname ./declare-mkPackageOption.nix
checkConfigOutput '^"hello"$' config.namedPackage.pname ./declare-mkPackageOption.nix
checkConfigOutput '^".*Hello.*"$' options.namedPackage.description ./declare-mkPackageOption.nix
checkConfigOutput '^"hello"$' config.pathPackage.pname ./declare-mkPackageOption.nix
checkConfigOutput '^"pkgs\.hello\.override \{ stdenv = pkgs\.clangStdenv; \}"$' options.packageWithExample.example.text ./declare-mkPackageOption.nix
checkConfigOutput '^".*Example extra description\..*"$' options.packageWithExtraDescription.description ./declare-mkPackageOption.nix
checkConfigError 'The option .undefinedPackage. is used but not defined' config.undefinedPackage ./declare-mkPackageOption.nix checkConfigError 'The option .undefinedPackage. is used but not defined' config.undefinedPackage ./declare-mkPackageOption.nix
checkConfigOutput '^null$' config.nullablePackage ./declare-mkPackageOption.nix checkConfigOutput '^null$' config.nullablePackage ./declare-mkPackageOption.nix
checkConfigOutput '^"null or package"$' options.nullablePackageWithDefault.type.description ./declare-mkPackageOption.nix
checkConfigOutput '^"myPkgs\.hello"$' options.packageWithPkgsText.defaultText.text ./declare-mkPackageOption.nix
checkConfigOutput '^"hello-other"$' options.packageFromOtherSet.default.pname ./declare-mkPackageOption.nix
# submoduleWith # submoduleWith

View file

@ -7,6 +7,28 @@ in {
options = { options = {
package = lib.mkPackageOption pkgs "hello" { }; package = lib.mkPackageOption pkgs "hello" { };
namedPackage = lib.mkPackageOption pkgs "Hello" {
default = [ "hello" ];
};
namedPackageSingletonDefault = lib.mkPackageOption pkgs "Hello" {
default = "hello";
};
pathPackage = lib.mkPackageOption pkgs [ "hello" ] { };
packageWithExample = lib.mkPackageOption pkgs "hello" {
example = "pkgs.hello.override { stdenv = pkgs.clangStdenv; }";
};
packageWithPathExample = lib.mkPackageOption pkgs "hello" {
example = [ "hello" ];
};
packageWithExtraDescription = lib.mkPackageOption pkgs "hello" {
extraDescription = "Example extra description.";
};
undefinedPackage = lib.mkPackageOption pkgs "hello" { undefinedPackage = lib.mkPackageOption pkgs "hello" {
default = null; default = null;
}; };
@ -15,5 +37,17 @@ in {
nullable = true; nullable = true;
default = null; default = null;
}; };
nullablePackageWithDefault = lib.mkPackageOption pkgs "hello" {
nullable = true;
};
packageWithPkgsText = lib.mkPackageOption pkgs "hello" {
pkgsText = "myPkgs";
};
packageFromOtherSet = let myPkgs = {
hello = pkgs.hello // { pname = "hello-other"; };
}; in lib.mkPackageOption myPkgs "hello" { };
}; };
} }

View file

@ -793,6 +793,12 @@
githubId = 5053729; githubId = 5053729;
name = "Alias Gram"; name = "Alias Gram";
}; };
alias-dev = {
email = "alias-dev@protonmail.com";
github = "alias-dev";
githubId = 30437811;
name = "Alex Andrews";
};
alibabzo = { alibabzo = {
email = "alistair.bill@gmail.com"; email = "alistair.bill@gmail.com";
github = "alistairbill"; github = "alistairbill";
@ -3695,6 +3701,12 @@
githubId = 490965; githubId = 490965;
name = "Craig Swank"; name = "Craig Swank";
}; };
ctron = {
email = "ctron@dentrassi.de";
github = "ctron";
githubId = 202474;
name = "Jens Reimann";
};
cust0dian = { cust0dian = {
email = "serg@effectful.software"; email = "serg@effectful.software";
github = "cust0dian"; github = "cust0dian";
@ -3962,7 +3974,7 @@
}; };
davidarmstronglewis = { davidarmstronglewis = {
email = "davidlewis@mac.com"; email = "davidlewis@mac.com";
github = "davidarmstronglewis"; github = "oceanlewis";
githubId = 6754950; githubId = 6754950;
name = "David Armstrong Lewis"; name = "David Armstrong Lewis";
}; };
@ -4412,6 +4424,15 @@
githubId = 14034137; githubId = 14034137;
name = "Mostly Void"; name = "Mostly Void";
}; };
ditsuke = {
name = "Tushar";
email = "hello@ditsuke.com";
github = "ditsuke";
githubId = 72784348;
keys = [{
fingerprint = "8FD2 153F 4889 541A 54F1 E09E 71B6 C31C 8A5A 9D21";
}];
};
djacu = { djacu = {
email = "daniel.n.baker@gmail.com"; email = "daniel.n.baker@gmail.com";
github = "djacu"; github = "djacu";
@ -5305,6 +5326,11 @@
githubId = 1855930; githubId = 1855930;
name = "Ertugrul Söylemez"; name = "Ertugrul Söylemez";
}; };
esau79p = {
github = "EsAu79p";
githubId = 21313906;
name = "EsAu";
};
esclear = { esclear = {
github = "esclear"; github = "esclear";
githubId = 7432848; githubId = 7432848;
@ -6306,6 +6332,16 @@
fingerprint = "D0CF 440A A703 E0F9 73CB A078 82BB 70D5 41AE 2DB4"; fingerprint = "D0CF 440A A703 E0F9 73CB A078 82BB 70D5 41AE 2DB4";
}]; }];
}; };
gepbird = {
email = "gutyina.gergo.2@gmail.com";
github = "gepbird";
githubId = 29818440;
name = "Gutyina Gergő";
keys = [
{ fingerprint = "RoAfvqa6w1l8Vdm3W60TDXurYwJ6h03VEGD+wDNGEwc"; }
{ fingerprint = "MP2UpIRtJpbFFqyucP431H/FPCfn58UhEUTro4lXtRs"; }
];
};
gerg-l = { gerg-l = {
email = "gregleyda@proton.me"; email = "gregleyda@proton.me";
github = "Gerg-L"; github = "Gerg-L";
@ -6458,6 +6494,10 @@
githubId = 1447245; githubId = 1447245;
name = "Robin Gloster"; name = "Robin Gloster";
}; };
gm6k = {
email = "nix@quidecco.pl";
name = "Isidor Zeuner";
};
gmemstr = { gmemstr = {
email = "git@gmem.ca"; email = "git@gmem.ca";
github = "gmemstr"; github = "gmemstr";
@ -7176,6 +7216,12 @@
fingerprint = "731A 7A05 AD8B 3AE5 956A C227 4A03 18E0 4E55 5DE5"; fingerprint = "731A 7A05 AD8B 3AE5 956A C227 4A03 18E0 4E55 5DE5";
}]; }];
}; };
hubble = {
name = "Hubble the Wolverine";
matrix = "@hubofeverything:bark.lgbt";
github = "the-furry-hubofeverything";
githubId = 53921912;
};
hufman = { hufman = {
email = "hufman@gmail.com"; email = "hufman@gmail.com";
github = "hufman"; github = "hufman";
@ -8164,6 +8210,12 @@
githubId = 6445082; githubId = 6445082;
name = "Joseph Lukasik"; name = "Joseph Lukasik";
}; };
jgoux = {
email = "hi@jgoux.dev";
github = "jgoux";
githubId = 1443499;
name = "Julien Goux";
};
jhh = { jhh = {
email = "jeff@j3ff.io"; email = "jeff@j3ff.io";
github = "jhh"; github = "jhh";
@ -8750,6 +8802,12 @@
githubId = 1189739; githubId = 1189739;
name = "Julio Borja Barra"; name = "Julio Borja Barra";
}; };
jue89 = {
email = "me@jue.yt";
github = "jue89";
githubId = 6105784;
name = "Juergen Fitschen";
};
jugendhacker = { jugendhacker = {
name = "j.r"; name = "j.r";
email = "j.r@jugendhacker.de"; email = "j.r@jugendhacker.de";
@ -8894,6 +8952,15 @@
githubId = 386765; githubId = 386765;
matrix = "@k900:0upti.me"; matrix = "@k900:0upti.me";
}; };
kachick = {
email = "kachick1@gmail.com";
github = "kachick";
githubId = 1180335;
name = "Kenichi Kamiya";
keys = [{
fingerprint = "9121 5D87 20CA B405 C63F 24D2 EF6E 574D 040A E2A5";
}];
};
kaction = { kaction = {
name = "Dmitry Bogatov"; name = "Dmitry Bogatov";
email = "KAction@disroot.org"; email = "KAction@disroot.org";
@ -9981,6 +10048,17 @@
githubId = 3696783; githubId = 3696783;
name = "Leroy Hopson"; name = "Leroy Hopson";
}; };
liketechnik = {
name = "Florian Warzecha";
email = "liketechnik@disroot.org";
github = "liketechnik";
githubId = 24209689;
keys = [{
fingerprint = "92D8 A09D 03DD B774 AABD 53B9 E136 2F07 D750 DB5C";
}];
};
lillycham = { lillycham = {
email = "lillycat332@gmail.com"; email = "lillycat332@gmail.com";
github = "lillycat332"; github = "lillycat332";
@ -11019,12 +11097,6 @@
githubId = 4708337; githubId = 4708337;
name = "Marcelo A. de L. Santos"; name = "Marcelo A. de L. Santos";
}; };
maxhille = {
email = "mh@lambdasoup.com";
github = "maxhille";
githubId = 693447;
name = "Max Hille";
};
maximsmol = { maximsmol = {
email = "maximsmol@gmail.com"; email = "maximsmol@gmail.com";
github = "maximsmol"; github = "maximsmol";
@ -12835,6 +12907,12 @@
githubId = 9939720; githubId = 9939720;
name = "Philippe Nguyen"; name = "Philippe Nguyen";
}; };
npulidomateo = {
matrix = "@npulidomateo:matrix.org";
github = "npulidomateo";
githubId = 13149442;
name = "Nico Pulido-Mateo";
};
nrdxp = { nrdxp = {
email = "tim.deh@pm.me"; email = "tim.deh@pm.me";
matrix = "@timdeh:matrix.org"; matrix = "@timdeh:matrix.org";
@ -14143,6 +14221,12 @@
githubId = 406946; githubId = 406946;
name = "Valentin Lorentz"; name = "Valentin Lorentz";
}; };
prominentretail = {
email = "me@jakepark.me";
github = "ProminentRetail";
githubId = 94048404;
name = "Jake Park";
};
proofconstruction = { proofconstruction = {
email = "source@proof.construction"; email = "source@proof.construction";
github = "proofconstruction"; github = "proofconstruction";
@ -14384,6 +14468,12 @@
githubId = 1332289; githubId = 1332289;
name = "Quentin Machu"; name = "Quentin Machu";
}; };
quinn-dougherty = {
email = "quinnd@riseup.net";
github = "quinn-dougherty";
githubId = 39039420;
name = "Quinn Dougherty";
};
qyliss = { qyliss = {
email = "hi@alyssa.is"; email = "hi@alyssa.is";
github = "alyssais"; github = "alyssais";
@ -14786,6 +14876,12 @@
githubId = 42619; githubId = 42619;
name = "Wei-Ming Yang"; name = "Wei-Ming Yang";
}; };
rickvanprim = {
email = "me@rickvanprim.com";
github = "rickvanprim";
githubId = 13792812;
name = "James Leitch";
};
rickynils = { rickynils = {
email = "rickynils@gmail.com"; email = "rickynils@gmail.com";
github = "rickynils"; github = "rickynils";
@ -15106,15 +15202,6 @@
}]; }];
name = "Rahul Butani"; name = "Rahul Butani";
}; };
rs0vere = {
email = "rs0vere@proton.me";
github = "rs0vere";
githubId = 140035635;
keys = [{
fingerprint = "C6D8 B5C2 FA79 901B DCCF 95E1 FEC4 5C5A ED00 C58D";
}];
name = "Red Star Over Earth";
};
rski = { rski = {
name = "rski"; name = "rski";
email = "rom.skiad+nix@gmail.com"; email = "rom.skiad+nix@gmail.com";
@ -15139,6 +15226,12 @@
githubId = 47790121; githubId = 47790121;
name = "Ryan Burns"; name = "Ryan Burns";
}; };
rtimush = {
email = "rtimush@gmail.com";
github = "rtimush";
githubId = 831307;
name = "Roman Timushev";
};
rtreffer = { rtreffer = {
email = "treffer+nixos@measite.de"; email = "treffer+nixos@measite.de";
github = "rtreffer"; github = "rtreffer";
@ -15255,6 +15348,12 @@
fingerprint = "E4F4 1EAB BF0F C785 06D8 62EF EF68 CF41 D42A 593D"; fingerprint = "E4F4 1EAB BF0F C785 06D8 62EF EF68 CF41 D42A 593D";
}]; }];
}; };
ryangibb = {
email = "ryan@freumh.org";
github = "ryangibb";
githubId = 22669046;
name = "Ryan Gibb";
};
ryanorendorff = { ryanorendorff = {
github = "ryanorendorff"; github = "ryanorendorff";
githubId = 12442942; githubId = 12442942;
@ -15370,7 +15469,7 @@
}; };
SamirTalwar = { SamirTalwar = {
email = "lazy.git@functional.computer"; email = "lazy.git@functional.computer";
github = "SamirTalwar"; github = "abstracte";
githubId = 47852; githubId = 47852;
name = "Samir Talwar"; name = "Samir Talwar";
}; };
@ -15524,6 +15623,12 @@
githubId = 3958212; githubId = 3958212;
name = "Tom Sorlie"; name = "Tom Sorlie";
}; };
schinmai-akamai = {
email = "schinmai@akamai.com";
github = "schinmai-akamai";
githubId = 70169773;
name = "Tarun Chinmai Sekar";
};
schmitthenner = { schmitthenner = {
email = "development@schmitthenner.eu"; email = "development@schmitthenner.eu";
github = "fkz"; github = "fkz";
@ -16320,6 +16425,16 @@
fingerprint = "E067 520F 5EF2 C175 3F60 50C0 BA46 725F 6A26 7442"; fingerprint = "E067 520F 5EF2 C175 3F60 50C0 BA46 725F 6A26 7442";
}]; }];
}; };
soispha = {
name = "Soispha";
email = "soispha@vhack.eu";
matrix = "@soispha:vhack.eu";
github = "soispha";
githubId = 132207423;
keys = [{
fingerprint = "9606 FC74 9FCE 1636 0723 D4AD A5E9 4010 C3A6 42AD";
}];
};
solson = { solson = {
email = "scott@solson.me"; email = "scott@solson.me";
matrix = "@solson:matrix.org"; matrix = "@solson:matrix.org";
@ -16384,6 +16499,11 @@
fingerprint = "75F0 AB7C FE01 D077 AEE6 CAFD 353E 4A18 EE0F AB72"; fingerprint = "75F0 AB7C FE01 D077 AEE6 CAFD 353E 4A18 EE0F AB72";
}]; }];
}; };
spacefault = {
github = "spacefault";
githubId = 74156492;
name = "spacefault";
};
spacefrogg = { spacefrogg = {
email = "spacefrogg-nixos@meterriblecrew.net"; email = "spacefrogg-nixos@meterriblecrew.net";
github = "spacefrogg"; github = "spacefrogg";
@ -17779,12 +17899,6 @@
githubId = 10110; githubId = 10110;
name = "Travis B. Hartwell"; name = "Travis B. Hartwell";
}; };
travisdavis-ops = {
email = "travisdavismedia@gmail.com";
github = "TravisDavis-ops";
githubId = 52011418;
name = "Travis Davis";
};
traxys = { traxys = {
email = "quentin+dev@familleboyer.net"; email = "quentin+dev@familleboyer.net";
github = "traxys"; github = "traxys";
@ -17994,6 +18108,12 @@
githubId = 1983821; githubId = 1983821;
name = "Eric Wolf"; name = "Eric Wolf";
}; };
u2x1 = {
email = "u2x1@outlook.com";
github = "u2x1";
githubId = 30677291;
name = "u2x1";
};
uakci = { uakci = {
name = "uakci"; name = "uakci";
email = "uakci@uakci.pl"; email = "uakci@uakci.pl";
@ -18012,6 +18132,16 @@
githubId = 1607770; githubId = 1607770;
name = "Ulrik Strid"; name = "Ulrik Strid";
}; };
unclamped = {
name = "Maru";
email = "clear6860@tutanota.com";
matrix = "@unhidden0174:matrix.org";
github = "unclamped";
githubId = 104658278;
keys = [{
fingerprint = "57A2 CC43 3068 CB62 89C1 F1DA 9137 BB2E 77AD DE7E";
}];
};
unclechu = { unclechu = {
name = "Viacheslav Lotsmanov"; name = "Viacheslav Lotsmanov";
email = "lotsmanov89@gmail.com"; email = "lotsmanov89@gmail.com";
@ -18303,6 +18433,15 @@
githubId = 245573; githubId = 245573;
name = "Dmitry Kalinkin"; name = "Dmitry Kalinkin";
}; };
vgskye = {
name = "Skye Green";
email = "me@skye.vg";
github = "vgskye";
githubId = 116078858;
keys = [{
fingerprint = "CDEA 7E04 69E3 0885 A754 4B05 0104 BC05 F41B 77B8";
}];
};
victormeriqui = { victormeriqui = {
name = "Victor Meriqui"; name = "Victor Meriqui";
email = "victor.meriqui@ororatech.com"; email = "victor.meriqui@ororatech.com";
@ -19229,6 +19368,13 @@
github = "YorikSar"; github = "YorikSar";
githubId = 428074; githubId = 428074;
}; };
YoshiRulz = {
name = "YoshiRulz";
email = "OSSYoshiRulz+Nixpkgs@gmail.com";
matrix = "@YoshiRulz:matrix.org";
github = "YoshiRulz";
githubId = 13409956;
};
yrashk = { yrashk = {
email = "yrashk@gmail.com"; email = "yrashk@gmail.com";
github = "yrashk"; github = "yrashk";

View file

@ -13,12 +13,15 @@ STDOUT->autoflush(1);
my $ua = LWP::UserAgent->new(); my $ua = LWP::UserAgent->new();
if (!defined $ENV{GH_TOKEN}) {
die "Set GH_TOKEN before running this script";
}
keys %$maintainers_json; # reset the internal iterator so a prior each() doesn't affect the loop keys %$maintainers_json; # reset the internal iterator so a prior each() doesn't affect the loop
while(my($k, $v) = each %$maintainers_json) { while(my($k, $v) = each %$maintainers_json) {
my $current_user = %$v{'github'}; my $current_user = %$v{'github'};
if (!defined $current_user) { if (!defined $current_user) {
print "$k has no github handle\n"; print "$k has no github handle\n";
next;
} }
my $github_id = %$v{'githubId'}; my $github_id = %$v{'githubId'};
if (!defined $github_id) { if (!defined $github_id) {
@ -37,13 +40,16 @@ while(my($k, $v) = each %$maintainers_json) {
sleep($ratelimit_reset - time() + 5); sleep($ratelimit_reset - time() + 5);
} }
if ($resp->code != 200) { if ($resp->code != 200) {
print $current_user . " likely deleted their github account\n"; print "$k likely deleted their github account\n";
next; next;
} }
my $resp_json = from_json($resp->content); my $resp_json = from_json($resp->content);
my $api_user = %$resp_json{"login"}; my $api_user = %$resp_json{"login"};
if (lc($current_user) ne lc($api_user)) { if (!defined $current_user) {
print $current_user . " is now known on github as " . $api_user . ". Editing maintainer-list.nix…\n"; print "$k is known on github as $api_user.\n";
}
elsif (lc($current_user) ne lc($api_user)) {
print "$k is now known on github as $api_user. Editing maintainer-list.nix…\n";
my $file = path($maintainers_list_nix); my $file = path($maintainers_list_nix);
my $data = $file->slurp_utf8; my $data = $file->slurp_utf8;
$data =~ s/github = "$current_user";$/github = "$api_user";/m; $data =~ s/github = "$current_user";$/github = "$api_user";/m;

View file

@ -34,7 +34,6 @@ loadkit,,,,,,alerque
lpeg,,,,,,vyp lpeg,,,,,,vyp
lpeg_patterns,,,,,, lpeg_patterns,,,,,,
lpeglabel,,,,1.6.0,, lpeglabel,,,,1.6.0,,
lpty,,,,,,
lrexlib-gnu,,,,,, lrexlib-gnu,,,,,,
lrexlib-pcre,,,,,,vyp lrexlib-pcre,,,,,,vyp
lrexlib-posix,,,,,, lrexlib-posix,,,,,,
@ -72,6 +71,7 @@ lualogging,,,,,,
luaossl,,,,,5.1, luaossl,,,,,5.1,
luaposix,,,,34.1.1-1,,vyp lblasc luaposix,,,,34.1.1-1,,vyp lblasc
luarepl,,,,,, luarepl,,,,,,
luarocks-build-rust-mlua,,,,,,mrcjkb
luasec,,,,,,flosse luasec,,,,,,flosse
luasocket,,,,,, luasocket,,,,,,
luasql-sqlite3,,,,,,vyp luasql-sqlite3,,,,,,vyp
@ -92,6 +92,7 @@ mediator_lua,,,,,,
middleclass,,,,,, middleclass,,,,,,
mpack,,,,,, mpack,,,,,,
moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn
nui-nvim,,,,,,mrcjkb
nvim-client,https://github.com/neovim/lua-client.git,,,,, nvim-client,https://github.com/neovim/lua-client.git,,,,,
nvim-cmp,https://github.com/hrsh7th/nvim-cmp,,,,, nvim-cmp,https://github.com/hrsh7th/nvim-cmp,,,,,
penlight,https://github.com/lunarmodules/Penlight.git,,,,,alerque penlight,https://github.com/lunarmodules/Penlight.git,,,,,alerque
@ -109,5 +110,7 @@ teal-language-server,,,http://luarocks.org/dev,,,
telescope.nvim,,,,,5.1, telescope.nvim,,,,,5.1,
telescope-manix,,,,,, telescope-manix,,,,,,
tl,,,,,,mephistophiles tl,,,,,,mephistophiles
toml,,,,,,mrcjkb
toml-edit,,,,,5.1,mrcjkb
vstruct,https://github.com/ToxicFrog/vstruct.git,,,,, vstruct,https://github.com/ToxicFrog/vstruct.git,,,,,
vusted,,,,,,figsoda vusted,,,,,,figsoda

1 name src ref server version luaversion maintainers
34 lpeg vyp
35 lpeg_patterns
36 lpeglabel 1.6.0
lpty
37 lrexlib-gnu
38 lrexlib-pcre vyp
39 lrexlib-posix
71 luaossl 5.1
72 luaposix 34.1.1-1 vyp lblasc
73 luarepl
74 luarocks-build-rust-mlua mrcjkb
75 luasec flosse
76 luasocket
77 luasql-sqlite3 vyp
92 middleclass
93 mpack
94 moonscript https://github.com/leafo/moonscript.git dev-1 arobyn
95 nui-nvim mrcjkb
96 nvim-client https://github.com/neovim/lua-client.git
97 nvim-cmp https://github.com/hrsh7th/nvim-cmp
98 penlight https://github.com/lunarmodules/Penlight.git alerque
110 telescope.nvim 5.1
111 telescope-manix
112 tl mephistophiles
113 toml mrcjkb
114 toml-edit 5.1 mrcjkb
115 vstruct https://github.com/ToxicFrog/vstruct.git
116 vusted figsoda

View file

@ -327,7 +327,6 @@ def run_nix_expr(expr, nixpkgs: str):
:param expr nix expression to fetch current plugins :param expr nix expression to fetch current plugins
:param nixpkgs Path towards a nixpkgs checkout :param nixpkgs Path towards a nixpkgs checkout
''' '''
# local_pkgs = str(Path(__file__).parent.parent.parent)
with CleanEnvironment(nixpkgs) as nix_path: with CleanEnvironment(nixpkgs) as nix_path:
cmd = [ cmd = [
"nix", "nix",
@ -341,8 +340,8 @@ def run_nix_expr(expr, nixpkgs: str):
"--nix-path", "--nix-path",
nix_path, nix_path,
] ]
log.debug("Running command %s", " ".join(cmd)) log.debug("Running command: %s", " ".join(cmd))
out = subprocess.check_output(cmd) out = subprocess.check_output(cmd, timeout=90)
data = json.loads(out) data = json.loads(out)
return data return data
@ -572,7 +571,6 @@ class CleanEnvironment(object):
self.empty_config = NamedTemporaryFile() self.empty_config = NamedTemporaryFile()
self.empty_config.write(b"{}") self.empty_config.write(b"{}")
self.empty_config.flush() self.empty_config.flush()
# os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
return f"localpkgs={self.local_pkgs}" return f"localpkgs={self.local_pkgs}"
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:

View file

@ -682,6 +682,18 @@ with lib.maintainers; {
shortName = "Numtide team"; shortName = "Numtide team";
}; };
ocaml = {
members = [
alizter
];
githubTeams = [
"ocaml"
];
scope = "Maintain the OCaml compiler and package set.";
shortName = "OCaml";
enableFeatureFreezePing = true;
};
openstack = { openstack = {
members = [ members = [
SuperSandro2000 SuperSandro2000

View file

@ -26,7 +26,7 @@ directory which is scanned by the ICL loader for ICD files. For example:
```ShellSession ```ShellSession
$ export \ $ export \
OCL_ICD_VENDORS=`nix-build '<nixpkgs>' --no-out-link -A rocm-opencl-icd`/etc/OpenCL/vendors/ OCL_ICD_VENDORS=`nix-build '<nixpkgs>' --no-out-link -A rocmPackages.clr.icd`/etc/OpenCL/vendors/
``` ```
The second mechanism is to add the OpenCL driver package to The second mechanism is to add the OpenCL driver package to
@ -50,13 +50,13 @@ Platform Vendor Advanced Micro Devices, Inc.
Modern AMD [Graphics Core Modern AMD [Graphics Core
Next](https://en.wikipedia.org/wiki/Graphics_Core_Next) (GCN) GPUs are Next](https://en.wikipedia.org/wiki/Graphics_Core_Next) (GCN) GPUs are
supported through the rocm-opencl-icd package. Adding this package to supported through the rocmPackages.clr.icd package. Adding this package to
[](#opt-hardware.opengl.extraPackages) [](#opt-hardware.opengl.extraPackages)
enables OpenCL support: enables OpenCL support:
```nix ```nix
hardware.opengl.extraPackages = [ hardware.opengl.extraPackages = [
rocm-opencl-icd rocmPackages.clr.icd
]; ];
``` ```

View file

@ -45,8 +45,8 @@ services.xserver.displayManager.gdm.enable = true;
You can set the keyboard layout (and optionally the layout variant): You can set the keyboard layout (and optionally the layout variant):
```nix ```nix
services.xserver.layout = "de"; services.xserver.xkb.layout = "de";
services.xserver.xkbVariant = "neo"; services.xserver.xkb.variant = "neo";
``` ```
The X server is started automatically at boot time. If you don't want The X server is started automatically at boot time. If you don't want
@ -266,7 +266,7 @@ Once the configuration is applied, and you did a logout/login cycle, the
layout should be ready to use. You can try it by e.g. running layout should be ready to use. You can try it by e.g. running
`setxkbmap us-greek` and then type `<alt>+a` (it may not get applied in `setxkbmap us-greek` and then type `<alt>+a` (it may not get applied in
your terminal straight away). To change the default, the usual your terminal straight away). To change the default, the usual
`services.xserver.layout` option can still be used. `services.xserver.xkb.layout` option can still be used.
A layout can have several other components besides `xkb_symbols`, for A layout can have several other components besides `xkb_symbols`, for
example we will define new keycodes for some multimedia key and bind example we will define new keycodes for some multimedia key and bind

View file

@ -90,7 +90,7 @@ lib.mkOption {
``` ```
::: :::
### `mkPackageOption`, `mkPackageOptionMD` {#sec-option-declarations-util-mkPackageOption} ### `mkPackageOption` {#sec-option-declarations-util-mkPackageOption}
Usage: Usage:
@ -121,15 +121,13 @@ valid attribute path in pkgs (if name is a list).
If you wish to explicitly provide no default, pass `null` as `default`. If you wish to explicitly provide no default, pass `null` as `default`.
During the transition to CommonMark documentation `mkPackageOption` creates an option with a DocBook description attribute, once the transition is completed it will create a CommonMark description instead. `mkPackageOptionMD` always creates an option with a CommonMark description attribute and will be removed some time after the transition is completed.
[]{#ex-options-declarations-util-mkPackageOption} []{#ex-options-declarations-util-mkPackageOption}
Examples: Examples:
::: {#ex-options-declarations-util-mkPackageOption-hello .example} ::: {#ex-options-declarations-util-mkPackageOption-hello .example}
### Simple `mkPackageOption` usage ### Simple `mkPackageOption` usage
```nix ```nix
lib.mkPackageOptionMD pkgs "hello" { } lib.mkPackageOption pkgs "hello" { }
# is like # is like
lib.mkOption { lib.mkOption {
type = lib.types.package; type = lib.types.package;
@ -143,7 +141,7 @@ lib.mkOption {
::: {#ex-options-declarations-util-mkPackageOption-ghc .example} ::: {#ex-options-declarations-util-mkPackageOption-ghc .example}
### `mkPackageOption` with explicit default and example ### `mkPackageOption` with explicit default and example
```nix ```nix
lib.mkPackageOptionMD pkgs "GHC" { lib.mkPackageOption pkgs "GHC" {
default = [ "ghc" ]; default = [ "ghc" ];
example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])"; example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
} }

View file

@ -528,7 +528,7 @@ The only required parameter is `name`.
: A string representation of the type function name. : A string representation of the type function name.
`definition` `description`
: Description of the type used in documentation. Give information of : Description of the type used in documentation. Give information of
the type and any of its arguments. the type and any of its arguments.

View file

@ -16,7 +16,7 @@ You can quickly validate your edits with `make`:
```ShellSession ```ShellSession
$ cd /path/to/nixpkgs/nixos/doc/manual $ cd /path/to/nixpkgs/nixos/doc/manual
$ nix-shell $ nix-shell
nix-shell$ make nix-shell$ devmode
``` ```
Once you are done making modifications to the manual, it's important to Once you are done making modifications to the manual, it's important to

View file

@ -130,7 +130,7 @@ In addition to numerous new and upgraded packages, this release includes the fol
don't lose access to their files. don't lose access to their files.
In any other case, it's safe to use OpenSSL 3 for PHP's OpenSSL extension. This can be done by setting In any other case, it's safe to use OpenSSL 3 for PHP's OpenSSL extension. This can be done by setting
[](#opt-services.nextcloud.enableBrokenCiphersForSSE) to `false`. `services.nextcloud.enableBrokenCiphersForSSE` to `false`.
- The `coq` package and versioned variants starting at `coq_8_14` no - The `coq` package and versioned variants starting at `coq_8_14` no
longer include CoqIDE, which is now available through longer include CoqIDE, which is now available through

View file

@ -26,6 +26,14 @@
[`sudo-rs`]: https://github.com/memorysafety/sudo-rs/ [`sudo-rs`]: https://github.com/memorysafety/sudo-rs/
- All [ROCm](https://rocm.docs.amd.com/en/latest/) packages have been updated to 5.7.0.
- [ROCm](https://rocm.docs.amd.com/en/latest/) package attribute sets are versioned: `rocmPackages` -> `rocmPackages_5`.
- If the user has a custom shell enabled via `users.users.${USERNAME}.shell = ${CUSTOMSHELL}`, the
assertion will require them to also set `programs.${CUSTOMSHELL}.enable =
true`. This is generally safe behavior, but for anyone needing to opt out from
the check `users.users.${USERNAME}.ignoreShellProgramCheck = true` will do the job.
## New Services {#sec-release-23.11-new-services} ## New Services {#sec-release-23.11-new-services}
- [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable). - [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable).
@ -58,10 +66,14 @@
- [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable). - [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable).
- [LibreNMS](https://www.librenms.org), a auto-discovering PHP/MySQL/SNMP based network monitoring. Available as [services.librenms](#opt-services.librenms.enable).
- [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable). - [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
- [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable). - [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable).
- [tang](https://github.com/latchset/tang), a server for binding data to network presence. Available as [services.tang](#opt-services.tang.enable).
- [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable). - [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable).
- [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services. - [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
@ -83,6 +95,8 @@
- [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs. - [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs.
Available as [services.honk](#opt-services.honk.enable). Available as [services.honk](#opt-services.honk.enable).
- [ferretdb](https://www.ferretdb.io/), an open-source proxy, converting the MongoDB 6.0+ wire protocol queries to PostgreSQL or SQLite. Available as [services.ferretdb](options.html#opt-services.ferretdb.enable).
- [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable). - [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable).
- [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers. - [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers.
@ -93,6 +107,10 @@
- [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable). - [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
- [trunk-ng](https://github.com/ctron/trunk), A fork of `trunk`: Build, bundle & ship your Rust WASM application to the web
- [virt-manager](https://virt-manager.org/), an UI for managing virtual machines in libvirt, is now available as `programs.virt-manager`.
## Backward Incompatibilities {#sec-release-23.11-incompatibilities} ## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
- `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`. - `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
@ -148,6 +166,17 @@
- `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version. - `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
- `llvmPackages_rocm` has been moved to `rocmPackages.llvm`.
- `hip`, `rocm-opencl-runtime`, `rocm-opencl-icd`, and `rocclr` have been combined into `rocmPackages.clr`.
- `clang-ocl`, `clr`, `composable_kernel`, `hipblas`, `hipcc`, `hip-common`, `hipcub`,
`hipfft`, `hipfort`, `hipify`, `hipsolver`, `hipsparse`, `migraphx`, `miopen`, `miopengemm`,
`rccl`, `rdc`, `rocalution`, `rocblas`, `rocdgbapi`, `rocfft`, `rocgdb`, `rocm-cmake`,
`rocm-comgr`, `rocm-core`, `rocm-device-libs`, `rocminfo`, `rocmlir`, `rocm-runtime`,
`rocm-smi`, `rocm-thunk`, `rocprim`, `rocprofiler`, `rocrand`, `rocr-debug-agent`,
`rocsolver`, `rocsparse`, `rocthrust`, `roctracer`, `rocwmma`, and `tensile` have been moved to `rocmPackages`.
- `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details. - `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
- `nix-prefetch-git` now ignores global and user git config, to improve reproducibility. - `nix-prefetch-git` now ignores global and user git config, to improve reproducibility.
@ -185,6 +214,8 @@
- `odoo` now defaults to 16, updated from 15. - `odoo` now defaults to 16, updated from 15.
- `varnish` was upgraded from 7.2.x to 7.4.x, see https://varnish-cache.org/docs/7.3/whats-new/upgrading-7.3.html and https://varnish-cache.org/docs/7.4/whats-new/upgrading-7.4.html for upgrade notes. The current LTS version is still offered as `varnish60`.
- `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities. - `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities.
- `services.keyd` changed API. Now you can create multiple configuration files. - `services.keyd` changed API. Now you can create multiple configuration files.
@ -199,6 +230,8 @@
- `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be. - `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be.
- `nixos-rebuild {switch,boot,test,dry-activate}` now runs the system activation inside `systemd-run`, creating an ephemeral systemd service and protecting the system switch against issues like network disconnections during remote (e.g. SSH) sessions. This has the side effect of running the switch in an isolated environment, that could possible break post-switch scripts that depends on things like environment variables being set. If you want to opt-out from this behavior for now, you may set the `NIXOS_SWITCH_USE_DIRTY_ENV` environment variable before running `nixos-rebuild`. However, keep in mind that this option will be removed in the future.
- The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192). - The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192).
- `services.lemmy.settings.federation` was removed in 0.17.0 and no longer has any effect. To enable federation, the hostname must be set in the configuration file and then federation must be enabled in the admin web UI. See the [release notes](https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions) for more details. - `services.lemmy.settings.federation` was removed in 0.17.0 and no longer has any effect. To enable federation, the hostname must be set in the configuration file and then federation must be enabled in the admin web UI. See the [release notes](https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions) for more details.
@ -217,6 +250,8 @@
- The binary of the package `cloud-sql-proxy` has changed from `cloud_sql_proxy` to `cloud-sql-proxy`. - The binary of the package `cloud-sql-proxy` has changed from `cloud_sql_proxy` to `cloud-sql-proxy`.
- Garage has been upgraded to 0.9.x. `services.garage.package` now needs to be explicitly set, so version upgrades can be done in a controlled fashion. For this, we expose `garage_x_y` attributes which can be set here.
- The `woodpecker-*` CI packages have been updated to 1.0.0. This release is wildly incompatible with the 0.15.X versions that were previously packaged. Please read [upstream's documentation](https://woodpecker-ci.org/docs/next/migrations#100) to learn how to update your CI configurations. - The `woodpecker-*` CI packages have been updated to 1.0.0. This release is wildly incompatible with the 0.15.X versions that were previously packaged. Please read [upstream's documentation](https://woodpecker-ci.org/docs/next/migrations#100) to learn how to update your CI configurations.
- The Caddy module gained a new option named `services.caddy.enableReload` which is enabled by default. It allows reloading the service instead of restarting it, if only a config file has changed. This option must be disabled if you have turned off the [Caddy admin API](https://caddyserver.com/docs/caddyfile/options#admin). If you keep this option enabled, you should consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period) to a non-infinite value to prevent Caddy from delaying the reload indefinitely. - The Caddy module gained a new option named `services.caddy.enableReload` which is enabled by default. It allows reloading the service instead of restarting it, if only a config file has changed. This option must be disabled if you have turned off the [Caddy admin API](https://caddyserver.com/docs/caddyfile/options#admin). If you keep this option enabled, you should consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period) to a non-infinite value to prevent Caddy from delaying the reload indefinitely.
@ -246,6 +281,8 @@
- Package `noto-fonts-emoji` was renamed to `noto-fonts-color-emoji`; - Package `noto-fonts-emoji` was renamed to `noto-fonts-color-emoji`;
see [#221181](https://github.com/NixOS/nixpkgs/issues/221181). see [#221181](https://github.com/NixOS/nixpkgs/issues/221181).
- Package `cloud-sql-proxy` was renamed to `google-cloud-sql-proxy` as it cannot be used with other cloud providers.;
- Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative. - Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
- `security.sudo.extraRules` now includes `root`'s default rule, with ordering - `security.sudo.extraRules` now includes `root`'s default rule, with ordering
@ -253,6 +290,8 @@
order, or relying on `mkBefore` and `mkAfter`, but may impact users calling order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
`mkOrder n` with n  400. `mkOrder n` with n  400.
- X keyboard extension (XKB) options have been reorganized into a single attribute set, `services.xserver.xkb`. Specifically, `services.xserver.layout` is now `services.xserver.xkb.layout`, `services.xserver.xkbModel` is now `services.xserver.xkb.model`, `services.xserver.xkbOptions` is now `services.xserver.xkb.options`, `services.xserver.xkbVariant` is now `services.xserver.xkb.variant`, and `services.xserver.xkbDir` is now `services.xserver.xkb.dir`.
- `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally. - `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime) now always evaluates the initial accumulator argument first. - [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime) now always evaluates the initial accumulator argument first.
@ -266,6 +305,12 @@
- Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays. - Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays.
- `service.borgmatic.settings.location` and `services.borgmatic.configurations.<name>.location` are deprecated, please move your options out of sections to the global scope.
- `dagger` was removed because using a package called `dagger` and packaging it from source violates their trademark policy.
- `win-virtio` package was renamed to `virtio-win` to be consistent with the upstream package name.
## Other Notable Changes {#sec-release-23.11-notable-changes} ## Other Notable Changes {#sec-release-23.11-notable-changes}
- The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration. - The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
@ -300,6 +345,8 @@
- The `fonts.fonts` and `fonts.enableDefaultFonts` options have been renamed to `fonts.packages` and `fonts.enableDefaultPackages` respectively. - The `fonts.fonts` and `fonts.enableDefaultFonts` options have been renamed to `fonts.packages` and `fonts.enableDefaultPackages` respectively.
- `pkgs.openvpn3` now optionally supports systemd-resolved. `programs.openvpn3` will automatically enable systemd-resolved support if `config.services.resolved.enable` is enabled.
- `services.fail2ban.jails` can now be configured with attribute sets defining settings and filters instead of lines. The stringed options `daemonConfig` and `extraSettings` have respectively been replaced by `daemonSettings` and `jails.DEFAULT.settings` which use attribute sets. - `services.fail2ban.jails` can now be configured with attribute sets defining settings and filters instead of lines. The stringed options `daemonConfig` and `extraSettings` have respectively been replaced by `daemonSettings` and `jails.DEFAULT.settings` which use attribute sets.
- The application firewall `opensnitch` now uses the process monitor method eBPF as default as recommended by upstream. The method can be changed with the setting [services.opensnitch.settings.ProcMonitorMethod](#opt-services.opensnitch.settings.ProcMonitorMethod). - The application firewall `opensnitch` now uses the process monitor method eBPF as default as recommended by upstream. The method can be changed with the setting [services.opensnitch.settings.ProcMonitorMethod](#opt-services.opensnitch.settings.ProcMonitorMethod).
@ -415,6 +462,8 @@ The module update takes care of the new config syntax and the data itself (user
- `python3.pkgs.flitBuildHook` has been removed. Use `flit-core` and `format = "pyproject"` instead. - `python3.pkgs.flitBuildHook` has been removed. Use `flit-core` and `format = "pyproject"` instead.
- The `extend` function of `llvmPackages` has been removed due it coming from the `tools` attrset thus only extending the `tool` attrset. A possible replacement is to construct the set from `libraries` and `tools`, or patch nixpkgs.
- The `qemu-vm.nix` module now supports disabling overriding `fileSystems` with - The `qemu-vm.nix` module now supports disabling overriding `fileSystems` with
`virtualisation.fileSystems`. This enables the user to boot VMs from `virtualisation.fileSystems`. This enables the user to boot VMs from
"external" disk images not created by the qemu-vm module. You can stop the "external" disk images not created by the qemu-vm module. You can stop the
@ -424,4 +473,3 @@ The module update takes care of the new config syntax and the data itself (user
- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`. - The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
- `teleport` has been upgraded from major version 12 to major version 14. Please see upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/) and release notes for versions [13](https://goteleport.com/docs/changelog/#1300-050823) and [14](https://goteleport.com/docs/changelog/#1400-092023). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 13.x version by setting `services.teleport.package = pkgs.teleport_13`. Afterwards, this option can be removed to upgrade to the default version (14). - `teleport` has been upgraded from major version 12 to major version 14. Please see upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/) and release notes for versions [13](https://goteleport.com/docs/changelog/#1300-050823) and [14](https://goteleport.com/docs/changelog/#1400-092023). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 13.x version by setting `services.teleport.package = pkgs.teleport_13`. Afterwards, this option can be removed to upgrade to the default version (14).

View file

@ -34,9 +34,6 @@ evalConfigArgs@
in lib.optional (e != "") (import e) in lib.optional (e != "") (import e)
}: }:
let pkgs_ = pkgs;
in
let let
inherit (lib) optional; inherit (lib) optional;
@ -58,8 +55,9 @@ let
nixpkgs.system = lib.mkDefault system; nixpkgs.system = lib.mkDefault system;
}) })
++ ++
(optional (pkgs_ != null) { (optional (pkgs != null) {
_module.args.pkgs = lib.mkForce pkgs_; # This should be default priority, so it conflicts with any user-defined pkgs.
nixpkgs.pkgs = pkgs;
}) })
); );
}; };
@ -109,10 +107,10 @@ let
nixosWithUserModules = noUserModules.extendModules { modules = allUserModules; }; nixosWithUserModules = noUserModules.extendModules { modules = allUserModules; };
withExtraArgs = nixosSystem: nixosSystem // { withExtraAttrs = configuration: configuration // {
inherit extraArgs; inherit extraArgs;
inherit (nixosSystem._module.args) pkgs; inherit (configuration._module.args) pkgs;
extendModules = args: withExtraArgs (nixosSystem.extendModules args); extendModules = args: withExtraAttrs (configuration.extendModules args);
}; };
in in
withWarnings (withExtraArgs nixosWithUserModules) withWarnings (withExtraAttrs nixosWithUserModules)

View file

@ -127,8 +127,8 @@ in
${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT) ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
"-I${config.environment.sessionVariables.XKB_CONFIG_ROOT}" "-I${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
} \ } \
-model '${xkbModel}' -layout '${layout}' \ -model '${xkb.model}' -layout '${xkb.layout}' \
-option '${xkbOptions}' -variant '${xkbVariant}' > "$out" -option '${xkb.options}' -variant '${xkb.variant}' > "$out"
''); '');
} }

View file

@ -1,43 +0,0 @@
{ config, lib, pkgs, ... }:
{
options = {
gnu = lib.mkOption {
type = lib.types.bool;
default = false;
description = lib.mdDoc ''
When enabled, GNU software is chosen by default whenever a there is
a choice between GNU and non-GNU software (e.g., GNU lsh
vs. OpenSSH).
'';
};
};
config = lib.mkIf config.gnu {
environment.systemPackages = with pkgs;
# TODO: Adjust `requiredPackages' from `system-path.nix'.
# TODO: Add Inetutils once it has the new `ifconfig'.
[ parted
#fdisk # XXX: GNU fdisk currently fails to build and it's redundant
# with the `parted' command.
nano zile
texinfo # for the stand-alone Info reader
]
++ lib.optional (!stdenv.isAarch32) grub2;
# GNU GRUB, where available.
boot.loader.grub.enable = !pkgs.stdenv.isAarch32;
# GNU lsh.
services.openssh.enable = false;
services.lshd.enable = true;
programs.ssh.startAgent = false;
services.xserver.startGnuPGAgent = true;
# TODO: GNU dico.
# TODO: GNU Inetutils' inetd.
# TODO: GNU Pies.
};
}

View file

@ -89,12 +89,6 @@ in
for a running system, entries can be removed for a more for a running system, entries can be removed for a more
minimal NixOS installation. minimal NixOS installation.
Note: If `pkgs.nano` is removed from this list,
make sure another editor is installed and the
`EDITOR` environment variable is set to it.
Environment variables can be set using
{option}`environment.variables`.
Like with systemPackages, packages are installed to Like with systemPackages, packages are installed to
{file}`/run/current-system/sw`. They are {file}`/run/current-system/sw`. They are
automatically available to all users, and are automatically available to all users, and are

View file

@ -172,6 +172,17 @@ let
''; '';
}; };
ignoreShellProgramCheck = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
By default, nixos will check that programs.SHELL.enable is set to
true if the user has a custom shell specified. If that behavior isn't
required and there are custom overrides in place to make sure that the
shell is functional, set this to true.
'';
};
subUidRanges = mkOption { subUidRanges = mkOption {
type = with types; listOf (submodule subordinateUidRange); type = with types; listOf (submodule subordinateUidRange);
default = []; default = [];
@ -330,6 +341,20 @@ let
administrator before being able to use the system again. administrator before being able to use the system again.
''; '';
}; };
linger = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to enable lingering for this user. If true, systemd user
units will start at boot, rather than starting at login and stopping
at logout. This is the declarative equivalent of running
`loginctl enable-linger` for this user.
If false, user units will not be started until the user logs in, and
may be stopped on logout depending on the settings in `logind.conf`.
'';
};
}; };
config = mkMerge config = mkMerge
@ -663,6 +688,20 @@ in {
''; '';
}; };
system.activationScripts.update-lingering = let
lingerDir = "/var/lib/systemd/linger";
lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
lingeringUsersFile = builtins.toFile "lingering-users"
(concatStrings (map (s: "${s}\n")
(sort (a: b: a < b) lingeringUsers))); # this sorting is important for `comm` to work correctly
in stringAfter [ "users" ] ''
if [ -e ${lingerDir} ] ; then
cd ${lingerDir}
ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl enable-linger
fi
'';
# Warn about user accounts with deprecated password hashing schemes # Warn about user accounts with deprecated password hashing schemes
system.activationScripts.hashes = { system.activationScripts.hashes = {
deps = [ "users" ]; deps = [ "users" ];
@ -702,7 +741,8 @@ in {
environment.profiles = [ environment.profiles = [
"$HOME/.nix-profile" "$HOME/.nix-profile"
"\${XDG_STATE_HOME:-$HOME/.local/state}/nix/profile" "\${XDG_STATE_HOME}/nix/profile"
"$HOME/.local/state/nix/profile"
"/etc/profiles/per-user/$USER" "/etc/profiles/per-user/$USER"
]; ];
@ -824,13 +864,17 @@ in {
''; '';
} }
] ++ (map (shell: { ] ++ (map (shell: {
assertion = (user.shell == pkgs.${shell}) -> (config.programs.${shell}.enable == true); assertion = !user.ignoreShellProgramCheck -> (user.shell == pkgs.${shell}) -> (config.programs.${shell}.enable == true);
message = '' message = ''
users.users.${user.name}.shell is set to ${shell}, but users.users.${user.name}.shell is set to ${shell}, but
programs.${shell}.enable is not true. This will cause the ${shell} programs.${shell}.enable is not true. This will cause the ${shell}
shell to lack the basic nix directories in its PATH and might make shell to lack the basic nix directories in its PATH and might make
logging in as that user impossible. You can fix it with: logging in as that user impossible. You can fix it with:
programs.${shell}.enable = true; programs.${shell}.enable = true;
If you know what you're doing and you are fine with the behavior,
set users.users.${user.name}.ignoreShellProgramCheck = true;
instead.
''; '';
}) [ }) [
"fish" "fish"

View file

@ -163,15 +163,15 @@ in
# console = { # console = {
# font = "Lat2-Terminus16"; # font = "Lat2-Terminus16";
# keyMap = "us"; # keyMap = "us";
# useXkbConfig = true; # use xkbOptions in tty. # useXkbConfig = true; # use xkb.options in tty.
# }; # };
$xserverConfig $xserverConfig
$desktopConfiguration $desktopConfiguration
# Configure keymap in X11 # Configure keymap in X11
# services.xserver.layout = "us"; # services.xserver.xkb.layout = "us";
# services.xserver.xkbOptions = "eurosign:e,caps:escape"; # services.xserver.xkb.options = "eurosign:e,caps:escape";
# Enable CUPS to print documents. # Enable CUPS to print documents.
# services.printing.enable = true; # services.printing.enable = true;

View file

@ -6,7 +6,6 @@
./config/fonts/fontdir.nix ./config/fonts/fontdir.nix
./config/fonts/ghostscript.nix ./config/fonts/ghostscript.nix
./config/fonts/packages.nix ./config/fonts/packages.nix
./config/gnu.nix
./config/gtk/gtk-icon-cache.nix ./config/gtk/gtk-icon-cache.nix
./config/i18n.nix ./config/i18n.nix
./config/iproute2.nix ./config/iproute2.nix
@ -232,6 +231,7 @@
./programs/pantheon-tweaks.nix ./programs/pantheon-tweaks.nix
./programs/partition-manager.nix ./programs/partition-manager.nix
./programs/plotinus.nix ./programs/plotinus.nix
./programs/projecteur.nix
./programs/proxychains.nix ./programs/proxychains.nix
./programs/qdmr.nix ./programs/qdmr.nix
./programs/qt5ct.nix ./programs/qt5ct.nix
@ -416,6 +416,7 @@
./services/databases/couchdb.nix ./services/databases/couchdb.nix
./services/databases/dgraph.nix ./services/databases/dgraph.nix
./services/databases/dragonflydb.nix ./services/databases/dragonflydb.nix
./services/databases/ferretdb.nix
./services/databases/firebird.nix ./services/databases/firebird.nix
./services/databases/foundationdb.nix ./services/databases/foundationdb.nix
./services/databases/hbase-standalone.nix ./services/databases/hbase-standalone.nix
@ -775,6 +776,7 @@
./services/monitoring/kapacitor.nix ./services/monitoring/kapacitor.nix
./services/monitoring/karma.nix ./services/monitoring/karma.nix
./services/monitoring/kthxbye.nix ./services/monitoring/kthxbye.nix
./services/monitoring/librenms.nix
./services/monitoring/loki.nix ./services/monitoring/loki.nix
./services/monitoring/longview.nix ./services/monitoring/longview.nix
./services/monitoring/mackerel-agent.nix ./services/monitoring/mackerel-agent.nix
@ -881,6 +883,7 @@
./services/networking/croc.nix ./services/networking/croc.nix
./services/networking/dae.nix ./services/networking/dae.nix
./services/networking/dante.nix ./services/networking/dante.nix
./services/networking/deconz.nix
./services/networking/dhcpcd.nix ./services/networking/dhcpcd.nix
./services/networking/dnscache.nix ./services/networking/dnscache.nix
./services/networking/dnscrypt-proxy2.nix ./services/networking/dnscrypt-proxy2.nix
@ -1083,6 +1086,7 @@
./services/networking/thelounge.nix ./services/networking/thelounge.nix
./services/networking/tinc.nix ./services/networking/tinc.nix
./services/networking/tinydns.nix ./services/networking/tinydns.nix
./services/networking/tinyproxy.nix
./services/networking/tmate-ssh-server.nix ./services/networking/tmate-ssh-server.nix
./services/networking/tox-bootstrapd.nix ./services/networking/tox-bootstrapd.nix
./services/networking/tox-node.nix ./services/networking/tox-node.nix
@ -1163,6 +1167,7 @@
./services/security/sshguard.nix ./services/security/sshguard.nix
./services/security/sslmate-agent.nix ./services/security/sslmate-agent.nix
./services/security/step-ca.nix ./services/security/step-ca.nix
./services/security/tang.nix
./services/security/tor.nix ./services/security/tor.nix
./services/security/torify.nix ./services/security/torify.nix
./services/security/torsocks.nix ./services/security/torsocks.nix

View file

@ -1,4 +1,4 @@
{ config, lib, ... }: { config, lib, options, ... }:
let let
keysDirectory = "/var/keys"; keysDirectory = "/var/keys";
@ -163,9 +163,15 @@ in
in in
script.overrideAttrs (old: { script.overrideAttrs (old: {
pos = __curPos; # sets meta.position to point here; see script binding above for package definition
meta = (old.meta or { }) // { meta = (old.meta or { }) // {
platforms = lib.platforms.darwin; platforms = lib.platforms.darwin;
}; };
passthru = (old.passthru or { }) // {
# Let users in the repl inspect the config
nixosConfig = config;
nixosOptions = options;
};
}); });
system = { system = {

View file

@ -24,7 +24,7 @@ in {
security.wrappers.bandwhich = { security.wrappers.bandwhich = {
owner = "root"; owner = "root";
group = "root"; group = "root";
capabilities = "cap_net_raw,cap_net_admin+ep"; capabilities = "cap_sys_ptrace,cap_dac_read_search,cap_net_raw,cap_net_admin+ep";
source = "${pkgs.bandwhich}/bin/bandwhich"; source = "${pkgs.bandwhich}/bin/bandwhich";
}; };
}; };

View file

@ -29,7 +29,7 @@ in
syntaxHighlight = lib.mkOption { syntaxHighlight = lib.mkOption {
type = lib.types.bool; type = lib.types.bool;
default = false; default = true;
description = lib.mdDoc "Whether to enable syntax highlight for various languages."; description = lib.mdDoc "Whether to enable syntax highlight for various languages.";
}; };
}; };
@ -40,6 +40,7 @@ in
etc.nanorc.text = (lib.optionalString cfg.syntaxHighlight '' etc.nanorc.text = (lib.optionalString cfg.syntaxHighlight ''
# load syntax highlighting files # load syntax highlighting files
include "${cfg.package}/share/nano/*.nanorc" include "${cfg.package}/share/nano/*.nanorc"
include "${cfg.package}/share/nano/extra/*.nanorc"
'') + cfg.nanorc; '') + cfg.nanorc;
systemPackages = [ cfg.package ]; systemPackages = [ cfg.package ];
}; };

View file

@ -8,11 +8,23 @@ in
{ {
options.programs.openvpn3 = { options.programs.openvpn3 = {
enable = mkEnableOption (lib.mdDoc "the openvpn3 client"); enable = mkEnableOption (lib.mdDoc "the openvpn3 client");
package = mkOption {
type = types.package;
default = pkgs.openvpn3.override {
enableSystemdResolved = config.services.resolved.enable;
};
defaultText = literalExpression ''pkgs.openvpn3.override {
enableSystemdResolved = config.services.resolved.enable;
}'';
description = lib.mdDoc ''
Which package to use for `openvpn3`.
'';
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.dbus.packages = with pkgs; [ services.dbus.packages = [
openvpn3 cfg.package
]; ];
users.users.openvpn = { users.users.openvpn = {
@ -25,8 +37,8 @@ in
gid = config.ids.gids.openvpn; gid = config.ids.gids.openvpn;
}; };
environment.systemPackages = with pkgs; [ environment.systemPackages = [
openvpn3 cfg.package
]; ];
}; };

View file

@ -0,0 +1,20 @@
{ config, lib, pkgs, ... }:
let
cfg = config.programs.projecteur;
in
{
options.programs.projecteur = {
enable = lib.mkEnableOption (lib.mdDoc "projecteur");
package = lib.mkPackageOptionMD pkgs "projecteur" { };
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
services.udev.packages = [ cfg.package ];
};
meta = {
maintainers = with lib.maintainers; [ benneti drupol ];
};
}

View file

@ -36,6 +36,19 @@ in
''; '';
}; };
cageArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "-s" ];
example = lib.literalExpression
''
[ "-s" "-m" "last" ]
'';
description = lib.mdDoc ''
Additional arguments to be passed to
[cage](https://github.com/cage-kiosk/cage).
'';
};
extraCss = lib.mkOption { extraCss = lib.mkOption {
type = lib.types.either lib.types.path lib.types.lines; type = lib.types.either lib.types.path lib.types.lines;
default = ""; default = "";
@ -50,7 +63,7 @@ in
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
services.greetd = { services.greetd = {
enable = lib.mkDefault true; enable = lib.mkDefault true;
settings.default_session.command = lib.mkDefault "${pkgs.dbus}/bin/dbus-run-session ${lib.getExe pkgs.cage} -s -- ${lib.getExe cfg.package}"; settings.default_session.command = lib.mkDefault "${pkgs.dbus}/bin/dbus-run-session ${lib.getExe pkgs.cage} ${lib.escapeShellArgs cfg.cageArgs} -- ${lib.getExe cfg.package}";
}; };
environment.etc = { environment.etc = {

View file

@ -0,0 +1,16 @@
{ config, lib, pkgs, ... }:
let
cfg = config.programs.virt-manager;
in {
options.programs.virt-manager = {
enable = lib.mkEnableOption "virt-manager, an UI for managing virtual machines in libvirt";
package = lib.mkPackageOption pkgs "virt-manager" {};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
programs.dconf.enable = true;
};
}

View file

@ -6,6 +6,92 @@
with lib; with lib;
let let
mkRulesTypeOption = type: mkOption {
# These options are experimental and subject to breaking changes without notice.
description = lib.mdDoc ''
PAM `${type}` rules for this service.
Attribute keys are the name of each rule.
'';
type = types.attrsOf (types.submodule ({ name, config, ... }: {
options = {
name = mkOption {
type = types.str;
description = lib.mdDoc ''
Name of this rule.
'';
internal = true;
readOnly = true;
};
enable = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether this rule is added to the PAM service config file.
'';
};
order = mkOption {
type = types.int;
description = lib.mdDoc ''
Order of this rule in the service file. Rules are arranged in ascending order of this value.
::: {.warning}
The `order` values for the built-in rules are subject to change. If you assign a constant value to this option, a system update could silently reorder your rule. You could be locked out of your system, or your system could be left wide open. When using this option, set it to a relative offset from another rule's `order` value:
```nix
{
security.pam.services.login.rules.auth.foo.order =
config.security.pam.services.login.rules.auth.unix.order + 10;
}
```
:::
'';
};
control = mkOption {
type = types.str;
description = lib.mdDoc ''
Indicates the behavior of the PAM-API should the module fail to succeed in its authentication task. See `control` in {manpage}`pam.conf(5)` for details.
'';
};
modulePath = mkOption {
type = types.str;
description = lib.mdDoc ''
Either the full filename of the PAM to be used by the application (it begins with a '/'), or a relative pathname from the default module location. See `module-path` in {manpage}`pam.conf(5)` for details.
'';
};
args = mkOption {
type = types.listOf types.str;
description = lib.mdDoc ''
Tokens that can be used to modify the specific behavior of the given PAM. Such arguments will be documented for each individual module. See `module-arguments` in {manpage}`pam.conf(5)` for details.
Escaping rules for spaces and square brackets are automatically applied.
{option}`settings` are automatically added as {option}`args`. It's recommended to use the {option}`settings` option whenever possible so that arguments can be overridden.
'';
};
settings = mkOption {
type = with types; attrsOf (nullOr (oneOf [ bool str int pathInStore ]));
default = {};
description = lib.mdDoc ''
Settings to add as `module-arguments`.
Boolean values render just the key if true, and nothing if false. Null values are ignored. All other values are rendered as key-value pairs.
'';
};
};
config = {
inherit name;
# Formats an attrset of settings as args for use as `module-arguments`.
args = concatLists (flip mapAttrsToList config.settings (name: value:
if isBool value
then optional value name
else optional (value != null) "${name}=${toString value}"
));
};
}));
};
parentConfig = config; parentConfig = config;
pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in { pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in {
@ -18,6 +104,28 @@ let
description = lib.mdDoc "Name of the PAM service."; description = lib.mdDoc "Name of the PAM service.";
}; };
rules = mkOption {
# This option is experimental and subject to breaking changes without notice.
visible = false;
description = lib.mdDoc ''
PAM rules for this service.
::: {.warning}
This option and its suboptions are experimental and subject to breaking changes without notice.
If you use this option in your system configuration, you will need to manually monitor this module for any changes. Otherwise, failure to adjust your configuration properly could lead to you being locked out of your system, or worse, your system could be left wide open to attackers.
If you share configuration examples that use this option, you MUST include this warning so that users are informed.
You may freely use this option within `nixpkgs`, and future changes will account for those use sites.
:::
'';
type = types.submodule {
options = genAttrs [ "account" "auth" "password" "session" ] mkRulesTypeOption;
};
};
unixAuth = mkOption { unixAuth = mkOption {
default = true; default = true;
type = types.bool; type = types.bool;
@ -470,90 +578,114 @@ let
setLoginUid = mkDefault cfg.startSession; setLoginUid = mkDefault cfg.startSession;
limits = mkDefault config.security.pam.loginLimits; limits = mkDefault config.security.pam.loginLimits;
text = let
ensureUniqueOrder = type: rules:
let
checkPair = a: b: assert assertMsg (a.order != b.order) "security.pam.services.${name}.rules.${type}: rules '${a.name}' and '${b.name}' cannot have the same order value (${toString a.order})"; b;
checked = zipListsWith checkPair rules (drop 1 rules);
in take 1 rules ++ checked;
# Formats a string for use in `module-arguments`. See `man pam.conf`.
formatModuleArgument = token:
if hasInfix " " token
then "[${replaceStrings ["]"] ["\\]"] token}]"
else token;
formatRules = type: pipe cfg.rules.${type} [
attrValues
(filter (rule: rule.enable))
(sort (a: b: a.order < b.order))
(ensureUniqueOrder type)
(map (rule: concatStringsSep " " (
[ type rule.control rule.modulePath ]
++ map formatModuleArgument rule.args
++ [ "# ${rule.name} (order ${toString rule.order})" ]
)))
(concatStringsSep "\n")
];
in mkDefault ''
# Account management.
${formatRules "account"}
# Authentication management.
${formatRules "auth"}
# Password management.
${formatRules "password"}
# Session management.
${formatRules "session"}
'';
# !!! TODO: move the LDAP stuff to the LDAP module, and the # !!! TODO: move the LDAP stuff to the LDAP module, and the
# Samba stuff to the Samba module. This requires that the PAM # Samba stuff to the Samba module. This requires that the PAM
# module provides the right hooks. # module provides the right hooks.
text = mkDefault rules = let
( autoOrderRules = flip pipe [
'' (imap1 (index: rule: rule // { order = mkDefault (10000 + index * 100); } ))
# Account management. (map (rule: nameValuePair rule.name (removeAttrs rule [ "name" ])))
'' + listToAttrs
optionalString use_ldap '' ];
account sufficient ${pam_ldap}/lib/security/pam_ldap.so in {
'' + account = autoOrderRules [
optionalString cfg.mysqlAuth '' { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
account sufficient ${pkgs.pam_mysql}/lib/security/pam_mysql.so config_file=/etc/security/pam_mysql.conf { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
'' + config_file = "/etc/security/pam_mysql.conf";
optionalString (config.services.kanidm.enablePam) '' }; }
account sufficient ${pkgs.kanidm}/lib/pam_kanidm.so ignore_unknown_user { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; settings = {
'' + ignore_unknown_user = true;
optionalString (config.services.sssd.enable && cfg.sssdStrictAccess==false) '' }; }
account sufficient ${pkgs.sssd}/lib/security/pam_sss.so { name = "sss"; enable = config.services.sssd.enable; control = if cfg.sssdStrictAccess then "[default=bad success=ok user_unknown=ignore]" else "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
'' + { name = "krb5"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; }
optionalString (config.services.sssd.enable && cfg.sssdStrictAccess) '' { name = "oslogin_login"; enable = cfg.googleOsLoginAccountVerification; control = "[success=ok ignore=ignore default=die]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so"; }
account [default=bad success=ok user_unknown=ignore] ${pkgs.sssd}/lib/security/pam_sss.so { name = "oslogin_admin"; enable = cfg.googleOsLoginAccountVerification; control = "[success=ok default=ignore]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so"; }
'' + { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
optionalString config.security.pam.krb5.enable ''
account sufficient ${pam_krb5}/lib/security/pam_krb5.so
'' +
optionalString cfg.googleOsLoginAccountVerification ''
account [success=ok ignore=ignore default=die] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so
account [success=ok default=ignore] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so
'' +
optionalString config.services.homed.enable ''
account sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so
'' +
# The required pam_unix.so module has to come after all the sufficient modules # The required pam_unix.so module has to come after all the sufficient modules
# because otherwise, the account lookup will fail if the user does not exist # because otherwise, the account lookup will fail if the user does not exist
# locally, for example with MySQL- or LDAP-auth. # locally, for example with MySQL- or LDAP-auth.
'' { name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
account required pam_unix.so ];
# Authentication management. auth = autoOrderRules ([
'' + { name = "oslogin_login"; enable = cfg.googleOsLoginAuthentication; control = "[success=done perm_denied=die default=ignore]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so"; }
optionalString cfg.googleOsLoginAuthentication '' { name = "rootok"; enable = cfg.rootOK; control = "sufficient"; modulePath = "pam_rootok.so"; }
auth [success=done perm_denied=die default=ignore] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so { name = "wheel"; enable = cfg.requireWheel; control = "required"; modulePath = "pam_wheel.so"; settings = {
'' + use_uid = true;
optionalString cfg.rootOK '' }; }
auth sufficient pam_rootok.so { name = "faillock"; enable = cfg.logFailures; control = "required"; modulePath = "pam_faillock.so"; }
'' + { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
optionalString cfg.requireWheel '' config_file = "/etc/security/pam_mysql.conf";
auth required pam_wheel.so use_uid }; }
'' + { name = "ssh_agent_auth"; enable = config.security.pam.enableSSHAgentAuth && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = {
optionalString cfg.logFailures '' file = lib.concatStringsSep ":" config.services.openssh.authorizedKeysFiles;
auth required pam_faillock.so }; }
'' + (let p11 = config.security.pam.p11; in { name = "p11"; enable = cfg.p11Auth; control = p11.control; modulePath = "${pkgs.pam_p11}/lib/security/pam_p11.so"; args = [
optionalString cfg.mysqlAuth '' "${pkgs.opensc}/lib/opensc-pkcs11.so"
auth sufficient ${pkgs.pam_mysql}/lib/security/pam_mysql.so config_file=/etc/security/pam_mysql.conf ]; })
'' + (let u2f = config.security.pam.u2f; in { name = "u2f"; enable = cfg.u2fAuth; control = u2f.control; modulePath = "${pkgs.pam_u2f}/lib/security/pam_u2f.so"; settings = {
optionalString (config.security.pam.enableSSHAgentAuth && cfg.sshAgentAuth) '' inherit (u2f) debug interactive cue origin;
auth sufficient ${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so file=${lib.concatStringsSep ":" config.services.openssh.authorizedKeysFiles} authfile = u2f.authFile;
'' + appid = u2f.appId;
(let p11 = config.security.pam.p11; in optionalString cfg.p11Auth '' }; })
auth ${p11.control} ${pkgs.pam_p11}/lib/security/pam_p11.so ${pkgs.opensc}/lib/opensc-pkcs11.so { name = "usb"; enable = cfg.usbAuth; control = "sufficient"; modulePath = "${pkgs.pam_usb}/lib/security/pam_usb.so"; }
'') + (let ussh = config.security.pam.ussh; in { name = "ussh"; enable = config.security.pam.ussh.enable && cfg.usshAuth; control = ussh.control; modulePath = "${pkgs.pam_ussh}/lib/security/pam_ussh.so"; settings = {
(let u2f = config.security.pam.u2f; in optionalString cfg.u2fAuth ('' ca_file = ussh.caFile;
auth ${u2f.control} ${pkgs.pam_u2f}/lib/security/pam_u2f.so ${optionalString u2f.debug "debug"} ${optionalString (u2f.authFile != null) "authfile=${u2f.authFile}"} '' authorized_principals = ussh.authorizedPrincipals;
+ ''${optionalString u2f.interactive "interactive"} ${optionalString u2f.cue "cue"} ${optionalString (u2f.appId != null) "appid=${u2f.appId}"} ${optionalString (u2f.origin != null) "origin=${u2f.origin}"} authorized_principals_file = ussh.authorizedPrincipalsFile;
'')) + inherit (ussh) group;
optionalString cfg.usbAuth '' }; })
auth sufficient ${pkgs.pam_usb}/lib/security/pam_usb.so (let oath = config.security.pam.oath; in { name = "oath"; enable = cfg.oathAuth; control = "requisite"; modulePath = "${pkgs.oath-toolkit}/lib/security/pam_oath.so"; settings = {
'' + inherit (oath) window digits;
(let ussh = config.security.pam.ussh; in optionalString (config.security.pam.ussh.enable && cfg.usshAuth) '' usersfile = oath.usersFile;
auth ${ussh.control} ${pkgs.pam_ussh}/lib/security/pam_ussh.so ${optionalString (ussh.caFile != null) "ca_file=${ussh.caFile}"} ${optionalString (ussh.authorizedPrincipals != null) "authorized_principals=${ussh.authorizedPrincipals}"} ${optionalString (ussh.authorizedPrincipalsFile != null) "authorized_principals_file=${ussh.authorizedPrincipalsFile}"} ${optionalString (ussh.group != null) "group=${ussh.group}"} }; })
'') + (let yubi = config.security.pam.yubico; in { name = "yubico"; enable = cfg.yubicoAuth; control = yubi.control; modulePath = "${pkgs.yubico-pam}/lib/security/pam_yubico.so"; settings = {
(let oath = config.security.pam.oath; in optionalString cfg.oathAuth '' inherit (yubi) mode debug;
auth requisite ${pkgs.oath-toolkit}/lib/security/pam_oath.so window=${toString oath.window} usersfile=${toString oath.usersFile} digits=${toString oath.digits} chalresp_path = yubi.challengeResponsePath;
'') + id = mkIf (yubi.mode == "client") yubi.id;
(let yubi = config.security.pam.yubico; in optionalString cfg.yubicoAuth '' }; })
auth ${yubi.control} ${pkgs.yubico-pam}/lib/security/pam_yubico.so mode=${toString yubi.mode} ${optionalString (yubi.challengeResponsePath != null) "chalresp_path=${yubi.challengeResponsePath}"} ${optionalString (yubi.mode == "client") "id=${toString yubi.id}"} ${optionalString yubi.debug "debug"} (let dp9ik = config.security.pam.dp9ik; in { name = "p9"; enable = dp9ik.enable; control = dp9ik.control; modulePath = "${pkgs.pam_dp9ik}/lib/security/pam_p9.so"; args = [
'') + dp9ik.authserver
(let dp9ik = config.security.pam.dp9ik; in optionalString dp9ik.enable '' ]; })
auth ${dp9ik.control} ${pkgs.pam_dp9ik}/lib/security/pam_p9.so ${dp9ik.authserver} { name = "fprintd"; enable = cfg.fprintAuth; control = "sufficient"; modulePath = "${pkgs.fprintd}/lib/security/pam_fprintd.so"; }
'') + ] ++
optionalString cfg.fprintAuth ''
auth sufficient ${pkgs.fprintd}/lib/security/pam_fprintd.so
'' +
# Modules in this block require having the password set in PAM_AUTHTOK. # Modules in this block require having the password set in PAM_AUTHTOK.
# pam_unix is marked as 'sufficient' on NixOS which means nothing will run # pam_unix is marked as 'sufficient' on NixOS which means nothing will run
# after it succeeds. Certain modules need to run after pam_unix # after it succeeds. Certain modules need to run after pam_unix
@ -562,7 +694,7 @@ let
# We use try_first_pass the second time to avoid prompting password twice. # We use try_first_pass the second time to avoid prompting password twice.
# #
# The same principle applies to systemd-homed # The same principle applies to systemd-homed
(optionalString ((cfg.unixAuth || config.services.homed.enable) && (optionals ((cfg.unixAuth || config.services.homed.enable) &&
(config.security.pam.enableEcryptfs (config.security.pam.enableEcryptfs
|| config.security.pam.enableFscrypt || config.security.pam.enableFscrypt
|| cfg.pamMount || cfg.pamMount
@ -573,199 +705,173 @@ let
|| cfg.failDelay.enable || cfg.failDelay.enable
|| cfg.duoSecurity.enable || cfg.duoSecurity.enable
|| cfg.zfs)) || cfg.zfs))
( [
optionalString config.services.homed.enable '' { name = "systemd_home-early"; enable = config.services.homed.enable; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
auth optional ${config.systemd.package}/lib/security/pam_systemd_home.so { name = "unix-early"; enable = cfg.unixAuth; control = "optional"; modulePath = "pam_unix.so"; settings = {
'' + nullok = cfg.allowNullPassword;
optionalString cfg.unixAuth '' inherit (cfg) nodelay;
auth optional pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth likeauth = true;
'' + }; }
optionalString config.security.pam.enableEcryptfs '' { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; settings = {
auth optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so unwrap unwrap = true;
'' + }; }
optionalString config.security.pam.enableFscrypt '' { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
auth optional ${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
'' + inherit (config.security.pam.zfs) homes;
optionalString cfg.zfs '' }; }
auth optional ${config.boot.zfs.package}/lib/security/pam_zfs_key.so homes=${config.security.pam.zfs.homes} { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; settings = {
'' + disable_interactive = true;
optionalString cfg.pamMount '' }; }
auth optional ${pkgs.pam_mount}/lib/security/pam_mount.so disable_interactive { name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
'' + kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
optionalString cfg.enableKwallet '' }; }
auth optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5 { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; }
'' + { name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
optionalString cfg.enableGnomeKeyring '' store-only = cfg.gnupg.storeOnly;
auth optional ${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so }; }
'' + { name = "faildelay"; enable = cfg.failDelay.enable; control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_faildelay.so"; settings = {
optionalString cfg.gnupg.enable '' inherit (cfg.failDelay) delay;
auth optional ${pkgs.pam_gnupg}/lib/security/pam_gnupg.so ${optionalString cfg.gnupg.storeOnly " store-only"} }; }
'' + { name = "google_authenticator"; enable = cfg.googleAuthenticator.enable; control = "required"; modulePath = "${pkgs.google-authenticator}/lib/security/pam_google_authenticator.so"; settings = {
optionalString cfg.failDelay.enable '' no_increment_hotp = true;
auth optional ${pkgs.pam}/lib/security/pam_faildelay.so delay=${toString cfg.failDelay.delay} }; }
'' + { name = "duo"; enable = cfg.duoSecurity.enable; control = "required"; modulePath = "${pkgs.duo-unix}/lib/security/pam_duo.so"; }
optionalString cfg.googleAuthenticator.enable '' ]) ++ [
auth required ${pkgs.google-authenticator}/lib/security/pam_google_authenticator.so no_increment_hotp { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
'' + { name = "unix"; enable = cfg.unixAuth; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
optionalString cfg.duoSecurity.enable '' nullok = cfg.allowNullPassword;
auth required ${pkgs.duo-unix}/lib/security/pam_duo.so inherit (cfg) nodelay;
'' likeauth = true;
)) + try_first_pass = true;
optionalString config.services.homed.enable '' }; }
auth sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so { name = "otpw"; enable = cfg.otpwAuth; control = "sufficient"; modulePath = "${pkgs.otpw}/lib/security/pam_otpw.so"; }
'' + { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; settings = {
optionalString cfg.unixAuth '' use_first_pass = true;
auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth try_first_pass }; }
'' + { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; settings = {
optionalString cfg.otpwAuth '' ignore_unknown_user = true;
auth sufficient ${pkgs.otpw}/lib/security/pam_otpw.so use_first_pass = true;
'' + }; }
optionalString use_ldap '' { name = "sss"; enable = config.services.sssd.enable; control = "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; settings = {
auth sufficient ${pam_ldap}/lib/security/pam_ldap.so use_first_pass use_first_pass = true;
'' + }; }
optionalString config.services.kanidm.enablePam '' { name = "krb5"; enable = config.security.pam.krb5.enable; control = "[default=ignore success=1 service_err=reset]"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; settings = {
auth sufficient ${pkgs.kanidm}/lib/pam_kanidm.so ignore_unknown_user use_first_pass use_first_pass = true;
'' + }; }
optionalString config.services.sssd.enable '' { name = "ccreds-validate"; enable = config.security.pam.krb5.enable; control = "[default=die success=done]"; modulePath = "${pam_ccreds}/lib/security/pam_ccreds.so"; settings = {
auth sufficient ${pkgs.sssd}/lib/security/pam_sss.so use_first_pass action = "validate";
'' + use_first_pass = true;
optionalString config.security.pam.krb5.enable '' }; }
auth [default=ignore success=1 service_err=reset] ${pam_krb5}/lib/security/pam_krb5.so use_first_pass { name = "ccreds-store"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_ccreds}/lib/security/pam_ccreds.so"; settings = {
auth [default=die success=done] ${pam_ccreds}/lib/security/pam_ccreds.so action=validate use_first_pass action = "store";
auth sufficient ${pam_ccreds}/lib/security/pam_ccreds.so action=store use_first_pass use_first_pass = true;
'' + }; }
'' { name = "deny"; control = "required"; modulePath = "pam_deny.so"; }
auth required pam_deny.so ]);
# Password management. password = autoOrderRules [
'' + { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
optionalString config.services.homed.enable '' { name = "unix"; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
password sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so nullok = true;
'' + '' yescrypt = true;
password sufficient pam_unix.so nullok yescrypt }; }
'' + { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; }
optionalString config.security.pam.enableEcryptfs '' { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
password optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
'' + inherit (config.security.pam.zfs) homes;
optionalString config.security.pam.enableFscrypt '' }; }
password optional ${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; }
'' + { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
optionalString cfg.zfs '' { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
password optional ${config.boot.zfs.package}/lib/security/pam_zfs_key.so homes=${config.security.pam.zfs.homes} config_file = "/etc/security/pam_mysql.conf";
'' + }; }
optionalString cfg.pamMount '' { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; }
password optional ${pkgs.pam_mount}/lib/security/pam_mount.so { name = "sss"; enable = config.services.sssd.enable; control = "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
'' + { name = "krb5"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; settings = {
optionalString use_ldap '' use_first_pass = true;
password sufficient ${pam_ldap}/lib/security/pam_ldap.so }; }
'' + { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; settings = {
optionalString cfg.mysqlAuth '' use_authtok = true;
password sufficient ${pkgs.pam_mysql}/lib/security/pam_mysql.so config_file=/etc/security/pam_mysql.conf }; }
'' + ];
optionalString config.services.kanidm.enablePam ''
password sufficient ${pkgs.kanidm}/lib/pam_kanidm.so
'' +
optionalString config.services.sssd.enable ''
password sufficient ${pkgs.sssd}/lib/security/pam_sss.so
'' +
optionalString config.security.pam.krb5.enable ''
password sufficient ${pam_krb5}/lib/security/pam_krb5.so use_first_pass
'' +
optionalString cfg.enableGnomeKeyring ''
password optional ${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so use_authtok
'' +
''
# Session management. session = autoOrderRules [
'' + { name = "env"; enable = cfg.setEnvironment; control = "required"; modulePath = "pam_env.so"; settings = {
optionalString cfg.setEnvironment '' conffile = "/etc/pam/environment";
session required pam_env.so conffile=/etc/pam/environment readenv=0 readenv = 0;
'' + }; }
'' { name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
session required pam_unix.so { name = "loginuid"; enable = cfg.setLoginUid; control = if config.boot.isContainer then "optional" else "required"; modulePath = "pam_loginuid.so"; }
'' + { name = "tty_audit"; enable = cfg.ttyAudit.enable; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_tty_audit.so"; settings = {
optionalString cfg.setLoginUid '' open_only = cfg.ttyAudit.openOnly;
session ${if config.boot.isContainer then "optional" else "required"} pam_loginuid.so enable = cfg.ttyAudit.enablePattern;
'' + disable = cfg.ttyAudit.disablePattern;
optionalString cfg.ttyAudit.enable (concatStringsSep " \\\n " ([ }; }
"session required ${pkgs.pam}/lib/security/pam_tty_audit.so" { name = "systemd_home"; enable = config.services.homed.enable; control = "required"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
] ++ optional cfg.ttyAudit.openOnly "open_only" { name = "mkhomedir"; enable = cfg.makeHomeDir; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_mkhomedir.so"; settings = {
++ optional (cfg.ttyAudit.enablePattern != null) "enable=${cfg.ttyAudit.enablePattern}" silent = true;
++ optional (cfg.ttyAudit.disablePattern != null) "disable=${cfg.ttyAudit.disablePattern}" skel = config.security.pam.makeHomeDir.skelDirectory;
)) + inherit (config.security.pam.makeHomeDir) umask;
optionalString config.services.homed.enable '' }; }
session required ${config.systemd.package}/lib/security/pam_systemd_home.so { name = "lastlog"; enable = cfg.updateWtmp; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_lastlog.so"; settings = {
'' + silent = true;
optionalString cfg.makeHomeDir '' }; }
session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=${config.security.pam.makeHomeDir.umask} { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; }
'' + # Work around https://github.com/systemd/systemd/issues/8598
optionalString cfg.updateWtmp '' # Skips the pam_fscrypt module for systemd-user sessions which do not have a password
session required ${pkgs.pam}/lib/security/pam_lastlog.so silent # anyways.
'' + # See also https://github.com/google/fscrypt/issues/95
optionalString config.security.pam.enableEcryptfs '' { name = "fscrypt-skip-systemd"; enable = config.security.pam.enableFscrypt; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
session optional ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so "service" "=" "systemd-user"
'' + ]; }
optionalString config.security.pam.enableFscrypt '' { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
# Work around https://github.com/systemd/systemd/issues/8598 { name = "zfs_key-skip-systemd"; enable = cfg.zfs; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
# Skips the pam_fscrypt module for systemd-user sessions which do not have a password "service" "=" "systemd-user"
# anyways. ]; }
# See also https://github.com/google/fscrypt/issues/95 { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
session [success=1 default=ignore] pam_succeed_if.so service = systemd-user inherit (config.security.pam.zfs) homes;
session optional ${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so nounmount = config.security.pam.zfs.noUnmount;
'' + }; }
optionalString cfg.zfs '' { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; settings = {
session [success=1 default=ignore] pam_succeed_if.so service = systemd-user disable_interactive = true;
session optional ${config.boot.zfs.package}/lib/security/pam_zfs_key.so homes=${config.security.pam.zfs.homes} ${optionalString config.security.pam.zfs.noUnmount "nounmount"} }; }
'' + { name = "ldap"; enable = use_ldap; control = "optional"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
optionalString cfg.pamMount '' { name = "mysql"; enable = cfg.mysqlAuth; control = "optional"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
session optional ${pkgs.pam_mount}/lib/security/pam_mount.so disable_interactive config_file = "/etc/security/pam_mysql.conf";
'' + }; }
optionalString use_ldap '' { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "optional"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; }
session optional ${pam_ldap}/lib/security/pam_ldap.so { name = "sss"; enable = config.services.sssd.enable; control = "optional"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
'' + { name = "krb5"; enable = config.security.pam.krb5.enable; control = "optional"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; }
optionalString cfg.mysqlAuth '' { name = "otpw"; enable = cfg.otpwAuth; control = "optional"; modulePath = "${pkgs.otpw}/lib/security/pam_otpw.so"; }
session optional ${pkgs.pam_mysql}/lib/security/pam_mysql.so config_file=/etc/security/pam_mysql.conf { name = "systemd"; enable = cfg.startSession; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd.so"; }
'' + { name = "xauth"; enable = cfg.forwardXAuth; control = "optional"; modulePath = "pam_xauth.so"; settings = {
optionalString config.services.kanidm.enablePam '' xauthpath = "${pkgs.xorg.xauth}/bin/xauth";
session optional ${pkgs.kanidm}/lib/pam_kanidm.so systemuser = 99;
'' + }; }
optionalString config.services.sssd.enable '' { name = "limits"; enable = cfg.limits != []; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_limits.so"; settings = {
session optional ${pkgs.sssd}/lib/security/pam_sss.so conf = "${makeLimitsConf cfg.limits}";
'' + }; }
optionalString config.security.pam.krb5.enable '' { name = "motd"; enable = cfg.showMotd && (config.users.motd != null || config.users.motdFile != null); control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_motd.so"; settings = {
session optional ${pam_krb5}/lib/security/pam_krb5.so inherit motd;
'' + }; }
optionalString cfg.otpwAuth '' { name = "apparmor"; enable = cfg.enableAppArmor && config.security.apparmor.enable; control = "optional"; modulePath = "${pkgs.apparmor-pam}/lib/security/pam_apparmor.so"; settings = {
session optional ${pkgs.otpw}/lib/security/pam_otpw.so order = "user,group,default";
'' + debug = true;
optionalString cfg.startSession '' }; }
session optional ${config.systemd.package}/lib/security/pam_systemd.so { name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
'' + kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
optionalString cfg.forwardXAuth '' }; }
session optional pam_xauth.so xauthpath=${pkgs.xorg.xauth}/bin/xauth systemuser=99 { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; settings = {
'' + auto_start = true;
optionalString (cfg.limits != []) '' }; }
session required ${pkgs.pam}/lib/security/pam_limits.so conf=${makeLimitsConf cfg.limits} { name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
'' + no-autostart = cfg.gnupg.noAutostart;
optionalString (cfg.showMotd && (config.users.motd != null || config.users.motdFile != null)) '' }; }
session optional ${pkgs.pam}/lib/security/pam_motd.so motd=${motd} { name = "cgfs"; enable = config.virtualisation.lxc.lxcfs.enable; control = "optional"; modulePath = "${pkgs.lxc}/lib/security/pam_cgfs.so"; args = [
'' + "-c" "all"
optionalString (cfg.enableAppArmor && config.security.apparmor.enable) '' ]; }
session optional ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so order=user,group,default debug ];
'' + };
optionalString (cfg.enableKwallet) ''
session optional ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so kwalletd=${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5
'' +
optionalString (cfg.enableGnomeKeyring) ''
session optional ${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so auto_start
'' +
optionalString cfg.gnupg.enable ''
session optional ${pkgs.pam_gnupg}/lib/security/pam_gnupg.so ${optionalString cfg.gnupg.noAutostart " no-autostart"}
'' +
optionalString (config.virtualisation.lxc.lxcfs.enable) ''
session optional ${pkgs.lxc}/lib/security/pam_cgfs.so -c all
''
);
}; };
}; };
@ -841,6 +947,8 @@ in
{ {
meta.maintainers = [ maintainers.majiir ];
imports = [ imports = [
(mkRenamedOptionModule [ "security" "pam" "enableU2F" ] [ "security" "pam" "u2f" "enable" ]) (mkRenamedOptionModule [ "security" "pam" "enableU2F" ] [ "security" "pam" "u2f" "enable" ])
]; ];
@ -1402,9 +1510,7 @@ in
fscrypt = {}; fscrypt = {};
}; };
security.apparmor.includes."abstractions/pam" = let security.apparmor.includes."abstractions/pam" =
isEnabled = test: fold or false (map test (attrValues config.security.pam.services));
in
lib.concatMapStrings lib.concatMapStrings
(name: "r ${config.environment.etc."pam.d/${name}".source},\n") (name: "r ${config.environment.etc."pam.d/${name}".source},\n")
(attrNames config.security.pam.services) + (attrNames config.security.pam.services) +
@ -1413,88 +1519,18 @@ in
mr ${getLib pkgs.pam}/lib/security/pam_*.so, mr ${getLib pkgs.pam}/lib/security/pam_*.so,
r ${getLib pkgs.pam}/lib/security/, r ${getLib pkgs.pam}/lib/security/,
'' + '' +
optionalString use_ldap '' (with lib; pipe config.security.pam.services [
mr ${pam_ldap}/lib/security/pam_ldap.so, attrValues
'' + (catAttrs "rules")
optionalString config.services.kanidm.enablePam '' (concatMap attrValues)
mr ${pkgs.kanidm}/lib/pam_kanidm.so, (concatMap attrValues)
'' + (filter (rule: rule.enable))
optionalString config.services.sssd.enable '' (catAttrs "modulePath")
mr ${pkgs.sssd}/lib/security/pam_sss.so, (filter (hasPrefix "/"))
'' + unique
optionalString config.security.pam.krb5.enable '' (map (module: "mr ${module},"))
mr ${pam_krb5}/lib/security/pam_krb5.so, concatLines
mr ${pam_ccreds}/lib/security/pam_ccreds.so, ]);
'' +
optionalString (isEnabled (cfg: cfg.googleOsLoginAccountVerification)) ''
mr ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so,
mr ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so,
'' +
optionalString (isEnabled (cfg: cfg.googleOsLoginAuthentication)) ''
mr ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so,
'' +
optionalString (config.security.pam.enableSSHAgentAuth
&& isEnabled (cfg: cfg.sshAgentAuth)) ''
mr ${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so,
'' +
optionalString (isEnabled (cfg: cfg.fprintAuth)) ''
mr ${pkgs.fprintd}/lib/security/pam_fprintd.so,
'' +
optionalString (isEnabled (cfg: cfg.u2fAuth)) ''
mr ${pkgs.pam_u2f}/lib/security/pam_u2f.so,
'' +
optionalString (isEnabled (cfg: cfg.usbAuth)) ''
mr ${pkgs.pam_usb}/lib/security/pam_usb.so,
'' +
optionalString (isEnabled (cfg: cfg.usshAuth)) ''
mr ${pkgs.pam_ussh}/lib/security/pam_ussh.so,
'' +
optionalString (isEnabled (cfg: cfg.oathAuth)) ''
"mr ${pkgs.oath-toolkit}/lib/security/pam_oath.so,
'' +
optionalString (isEnabled (cfg: cfg.mysqlAuth)) ''
mr ${pkgs.pam_mysql}/lib/security/pam_mysql.so,
'' +
optionalString (isEnabled (cfg: cfg.yubicoAuth)) ''
mr ${pkgs.yubico-pam}/lib/security/pam_yubico.so,
'' +
optionalString (isEnabled (cfg: cfg.duoSecurity.enable)) ''
mr ${pkgs.duo-unix}/lib/security/pam_duo.so,
'' +
optionalString (isEnabled (cfg: cfg.otpwAuth)) ''
mr ${pkgs.otpw}/lib/security/pam_otpw.so,
'' +
optionalString config.security.pam.enableEcryptfs ''
mr ${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so,
'' +
optionalString config.security.pam.enableFscrypt ''
mr ${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so,
'' +
optionalString (isEnabled (cfg: cfg.pamMount)) ''
mr ${pkgs.pam_mount}/lib/security/pam_mount.so,
'' +
optionalString (isEnabled (cfg: cfg.enableGnomeKeyring)) ''
mr ${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so,
'' +
optionalString (isEnabled (cfg: cfg.startSession)) ''
mr ${config.systemd.package}/lib/security/pam_systemd.so,
'' +
optionalString (isEnabled (cfg: cfg.enableAppArmor)
&& config.security.apparmor.enable) ''
mr ${pkgs.apparmor-pam}/lib/security/pam_apparmor.so,
'' +
optionalString (isEnabled (cfg: cfg.enableKwallet)) ''
mr ${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so,
'' +
optionalString config.virtualisation.lxc.lxcfs.enable ''
mr ${pkgs.lxc}/lib/security/pam_cgfs.so,
'' +
optionalString (isEnabled (cfg: cfg.zfs)) ''
mr ${config.boot.zfs.package}/lib/security/pam_zfs_key.so,
'' +
optionalString config.services.homed.enable ''
mr ${config.systemd.package}/lib/security/pam_systemd_home.so
'';
}; };
} }

View file

@ -26,8 +26,6 @@
// aborts when false, printing the failed expression // aborts when false, printing the failed expression
#define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr)) #define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
// aborts when returns non-zero, printing the failed expression and errno
#define MUSTSUCCEED(expr) ((expr) ? print_errno_and_die(#expr) : (void) 0)
extern char **environ; extern char **environ;
@ -48,12 +46,6 @@ static noreturn void assert_failure(const char *assertion) {
abort(); abort();
} }
static noreturn void print_errno_and_die(const char *assertion) {
fprintf(stderr, "Call `%s` in NixOS's wrapper.c failed: %s\n", assertion, strerror(errno));
fflush(stderr);
abort();
}
int get_last_cap(unsigned *last_cap) { int get_last_cap(unsigned *last_cap) {
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r"); FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
if (file == NULL) { if (file == NULL) {

View file

@ -5,7 +5,6 @@ stdenv.mkDerivation {
name = "security-wrapper"; name = "security-wrapper";
buildInputs = [ linuxHeaders ]; buildInputs = [ linuxHeaders ];
dontUnpack = true; dontUnpack = true;
hardeningEnable = [ "pie" ];
CFLAGS = [ CFLAGS = [
''-DSOURCE_PROG="${sourceProg}"'' ''-DSOURCE_PROG="${sourceProg}"''
] ++ (if debug then [ ] ++ (if debug then [

View file

@ -6,32 +6,50 @@ let
cfg = config.services.borgmatic; cfg = config.services.borgmatic;
settingsFormat = pkgs.formats.yaml { }; settingsFormat = pkgs.formats.yaml { };
repository = with types; submodule {
options = {
path = mkOption {
type = str;
description = mdDoc ''
Path to the repository
'';
};
label = mkOption {
type = str;
description = mdDoc ''
Label to the repository
'';
};
};
};
cfgType = with types; submodule { cfgType = with types; submodule {
freeformType = settingsFormat.type; freeformType = settingsFormat.type;
options.location = { options = {
source_directories = mkOption { source_directories = mkOption {
type = listOf str; type = nullOr (listOf str);
default = null;
description = mdDoc '' description = mdDoc ''
List of source directories to backup (required). Globs and List of source directories and files to backup. Globs and tildes are
tildes are expanded. expanded. Do not backslash spaces in path names.
''; '';
example = [ "/home" "/etc" "/var/log/syslog*" ]; example = [ "/home" "/etc" "/var/log/syslog*" "/home/user/path with spaces" ];
}; };
repositories = mkOption { repositories = mkOption {
type = listOf str; type = nullOr (listOf repository);
default = null;
description = mdDoc '' description = mdDoc ''
Paths to local or remote repositories (required). Tildes are A required list of local or remote repositories with paths and
expanded. Multiple repositories are backed up to in optional labels (which can be used with the --repository flag to
sequence. Borg placeholders can be used. See the output of select a repository). Tildes are expanded. Multiple repositories are
"borg help placeholders" for details. See ssh_command for backed up to in sequence. Borg placeholders can be used. See the
SSH options like identity file or port. If systemd service output of "borg help placeholders" for details. See ssh_command for
is used, then add local repository paths in the systemd SSH options like identity file or port. If systemd service is used,
service file to the ReadWritePaths list. then add local repository paths in the systemd service file to the
ReadWritePaths list.
''; '';
example = [ example = [
"ssh://user@backupserver/./sourcehostname.borg" { path="ssh://user@backupserver/./sourcehostname.borg"; label="backupserver"; }
"ssh://user@backupserver/./{fqdn}" { path="/mnt/backup"; label="local"; }
"/var/local/backups/local.borg"
]; ];
}; };
}; };
@ -62,6 +80,13 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
warnings = []
++ optional (cfg.settings != null && cfg.settings.location != null)
"`services.borgmatic.settings.location` is deprecated, please move your options out of sections to the global scope"
++ optional (catAttrs "location" (attrValues cfg.configurations) != [])
"`services.borgmatic.configurations.<name>.location` is deprecated, please move your options out of sections to the global scope"
;
environment.systemPackages = [ pkgs.borgmatic ]; environment.systemPackages = [ pkgs.borgmatic ];
environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) // environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //

View file

@ -0,0 +1,79 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.ferretdb;
in
{
meta.maintainers = with lib.maintainers; [ julienmalka camillemndn ];
options = {
services.ferretdb = {
enable = mkEnableOption "FerretDB, an Open Source MongoDB alternative.";
package = mkOption {
type = types.package;
example = literalExpression "pkgs.ferretdb";
default = pkgs.ferretdb;
defaultText = "pkgs.ferretdb";
description = "FerretDB package to use.";
};
settings = lib.mkOption {
type =
lib.types.submodule { freeformType = with lib.types; attrsOf str; };
example = {
FERRETDB_LOG_LEVEL = "warn";
FERRETDB_MODE = "normal";
};
description = ''
Additional configuration for FerretDB, see
<https://docs.ferretdb.io/flags/>
for supported values.
'';
};
};
};
config = mkIf cfg.enable
{
services.ferretdb.settings = {
FERRETDB_HANDLER = lib.mkDefault "sqlite";
FERRETDB_SQLITE_URL = lib.mkDefault "file:/var/lib/ferretdb/";
};
systemd.services.ferretdb = {
description = "FerretDB";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = cfg.settings;
serviceConfig = {
Type = "simple";
StateDirectory = "ferretdb";
WorkingDirectory = "/var/lib/ferretdb";
ExecStart = "${cfg.package}/bin/ferretdb";
Restart = "on-failure";
ProtectHome = true;
ProtectSystem = "strict";
PrivateTmp = true;
PrivateDevices = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
NoNewPrivileges = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RemoveIPC = true;
PrivateMounts = true;
DynamicUser = true;
};
};
};
}

View file

@ -314,7 +314,7 @@ in {
queue_dir = "$var_dir/queue"; queue_dir = "$var_dir/queue";
template_dir = "$var_dir/templates"; template_dir = "$var_dir/templates";
log_dir = "/var/log/mailman"; log_dir = "/var/log/mailman";
lock_dir = "$var_dir/lock"; lock_dir = "/run/mailman/lock";
etc_dir = "/etc"; etc_dir = "/etc";
pid_file = "/run/mailman/master.pid"; pid_file = "/run/mailman/master.pid";
}; };
@ -644,7 +644,7 @@ in {
}; };
meta = { meta = {
maintainers = with lib.maintainers; [ lheckemann qyliss ma27 ]; maintainers = with lib.maintainers; [ lheckemann qyliss ];
doc = ./mailman.md; doc = ./mailman.md;
}; };

View file

@ -12,7 +12,9 @@ let
usePostgresql = cfg.settings.database.name == "psycopg2"; usePostgresql = cfg.settings.database.name == "psycopg2";
hasLocalPostgresDB = let args = cfg.settings.database.args; in hasLocalPostgresDB = let args = cfg.settings.database.args; in
usePostgresql && (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ])); usePostgresql
&& (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ]))
&& config.services.postgresql.enable;
hasWorkers = cfg.workers != { }; hasWorkers = cfg.workers != { };
listenerSupportsResource = resource: listener: listenerSupportsResource = resource: listener:
@ -70,13 +72,12 @@ let
inherit (cfg) plugins; inherit (cfg) plugins;
}; };
logConfig = logName: { defaultCommonLogConfig = {
version = 1; version = 1;
formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s"; formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
handlers.journal = { handlers.journal = {
class = "systemd.journal.JournalHandler"; class = "systemd.journal.JournalHandler";
formatter = "journal_fmt"; formatter = "journal_fmt";
SYSLOG_IDENTIFIER = logName;
}; };
root = { root = {
level = "INFO"; level = "INFO";
@ -84,33 +85,27 @@ let
}; };
disable_existing_loggers = false; disable_existing_loggers = false;
}; };
defaultCommonLogConfigText = generators.toPretty { } defaultCommonLogConfig;
logConfigText = logName: logConfigText = logName:
let
expr = ''
{
version = 1;
formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
handlers.journal = {
class = "systemd.journal.JournalHandler";
formatter = "journal_fmt";
SYSLOG_IDENTIFIER = "${logName}";
};
root = {
level = "INFO";
handlers = [ "journal" ];
};
disable_existing_loggers = false;
};
'';
in
lib.literalMD '' lib.literalMD ''
Path to a yaml file generated from this Nix expression: Path to a yaml file generated from this Nix expression:
``` ```
${expr} ${generators.toPretty { } (
recursiveUpdate defaultCommonLogConfig { handlers.journal.SYSLOG_IDENTIFIER = logName; }
)}
``` ```
''; '';
genLogConfigFile = logName: format.generate "synapse-log-${logName}.yaml" (logConfig logName);
genLogConfigFile = logName: format.generate
"synapse-log-${logName}.yaml"
(cfg.log // optionalAttrs (cfg.log?handlers.journal) {
handlers.journal = cfg.log.handlers.journal // {
SYSLOG_IDENTIFIER = logName;
};
});
in { in {
imports = [ imports = [
@ -394,6 +389,49 @@ in {
''; '';
}; };
log = mkOption {
type = types.attrsOf format.type;
defaultText = literalExpression defaultCommonLogConfigText;
description = mdDoc ''
Default configuration for the loggers used by `matrix-synapse` and its workers.
The defaults are added with the default priority which means that
these will be merged with additional declarations. These additional
declarations also take precedence over the defaults when declared
with at least normal priority. For instance
the log-level for synapse and its workers can be changed like this:
```nix
{ lib, ... }: {
services.matrix-synapse.log.root.level = "WARNING";
}
```
And another field can be added like this:
```nix
{
services.matrix-synapse.log = {
loggers."synapse.http.matrixfederationclient".level = "DEBUG";
};
}
```
Additionally, the field `handlers.journal.SYSLOG_IDENTIFIER` will be added to
each log config, i.e.
* `synapse` for `matrix-synapse.service`
* `synapse-<worker name>` for `matrix-synapse-worker-<worker name>.service`
This is only done if this option has a `handlers.journal` field declared.
To discard all settings declared by this option for each worker and synapse,
`lib.mkForce` can be used.
To discard all settings declared by this option for a single worker or synapse only,
[](#opt-services.matrix-synapse.workers._name_.worker_log_config) or
[](#opt-services.matrix-synapse.settings.log_config) can be used.
'';
};
settings = mkOption { settings = mkOption {
default = { }; default = { };
description = mdDoc '' description = mdDoc ''
@ -944,23 +982,6 @@ in {
by synapse in `services.matrix-synapse.settings.listeners` or in one of the workers! by synapse in `services.matrix-synapse.settings.listeners` or in one of the workers!
''; '';
} }
{
assertion = hasLocalPostgresDB -> config.services.postgresql.enable;
message = ''
Cannot deploy matrix-synapse with a configuration for a local postgresql database
and a missing postgresql service. Since 20.03 it's mandatory to manually configure the
database (please read the thread in https://github.com/NixOS/nixpkgs/pull/80447 for
further reference).
If you
- try to deploy a fresh synapse, you need to configure the database yourself. An example
for this can be found in <nixpkgs/nixos/tests/matrix/synapse.nix>
- update your existing matrix-synapse instance, you simply need to add `services.postgresql.enable = true`
to your configuration.
For further information about this update, please read the release-notes of 20.03 carefully.
'';
}
{ {
assertion = hasWorkers -> cfg.settings.redis.enabled; assertion = hasWorkers -> cfg.settings.redis.enabled;
message = '' message = ''
@ -1008,6 +1029,8 @@ in {
# default them, so they are additive # default them, so they are additive
services.matrix-synapse.extras = defaultExtras; services.matrix-synapse.extras = defaultExtras;
services.matrix-synapse.log = mapAttrsRecursive (const mkDefault) defaultCommonLogConfig;
users.users.matrix-synapse = { users.users.matrix-synapse = {
group = "matrix-synapse"; group = "matrix-synapse";
home = cfg.dataDir; home = cfg.dataDir;
@ -1034,9 +1057,11 @@ in {
partOf = [ "matrix-synapse.target" ]; partOf = [ "matrix-synapse.target" ];
wantedBy = [ "matrix-synapse.target" ]; wantedBy = [ "matrix-synapse.target" ];
unitConfig.ReloadPropagatedFrom = "matrix-synapse.target"; unitConfig.ReloadPropagatedFrom = "matrix-synapse.target";
requires = optional hasLocalPostgresDB "postgresql.service";
} }
else { else {
after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service"; after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
requires = optional hasLocalPostgresDB "postgresql.service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
}; };
baseServiceConfig = { baseServiceConfig = {
@ -1070,7 +1095,7 @@ in {
ProtectKernelTunables = true; ProtectKernelTunables = true;
ProtectProc = "invisible"; ProtectProc = "invisible";
ProtectSystem = "strict"; ProtectSystem = "strict";
ReadWritePaths = [ cfg.dataDir ]; ReadWritePaths = [ cfg.dataDir cfg.settings.media_store_path ];
RemoveIPC = true; RemoveIPC = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ]; RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictNamespaces = true; RestrictNamespaces = true;

View file

@ -36,18 +36,7 @@ let
# Secure the services # Secure the services
defaultServiceConfig = { defaultServiceConfig = {
TemporaryFileSystem = "/:ro"; ReadWritePaths = [
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/hosts"
"-/etc/localtime"
"-/etc/ssl/certs"
"-/etc/static/ssl/certs"
"-/run/postgresql"
] ++ (optional enableRedis redisServer.unixSocket);
BindPaths = [
cfg.consumptionDir cfg.consumptionDir
cfg.dataDir cfg.dataDir
cfg.mediaDir cfg.mediaDir
@ -66,11 +55,9 @@ let
PrivateUsers = true; PrivateUsers = true;
ProtectClock = true; ProtectClock = true;
# Breaks if the home dir of the user is in /home # Breaks if the home dir of the user is in /home
# Also does not add much value in combination with the TemporaryFileSystem.
# ProtectHome = true; # ProtectHome = true;
ProtectHostname = true; ProtectHostname = true;
# Would re-mount paths ignored by temporary root ProtectSystem = "strict";
#ProtectSystem = "strict";
ProtectControlGroups = true; ProtectControlGroups = true;
ProtectKernelLogs = true; ProtectKernelLogs = true;
ProtectKernelModules = true; ProtectKernelModules = true;
@ -319,17 +306,6 @@ in
Type = "oneshot"; Type = "oneshot";
# Enable internet access # Enable internet access
PrivateNetwork = false; PrivateNetwork = false;
# Restrict write access
BindPaths = [];
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/ssl/certs"
"-/etc/static/ssl/certs"
"-/etc/hosts"
"-/etc/localtime"
];
ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in '' ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in ''
${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords ${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords
''; '';

View file

@ -0,0 +1,624 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.librenms;
settingsFormat = pkgs.formats.json {};
configJson = settingsFormat.generate "librenms-config.json" cfg.settings;
package = pkgs.librenms.override {
logDir = cfg.logDir;
dataDir = cfg.dataDir;
};
phpOptions = ''
log_errors = on
post_max_size = 100M
upload_max_filesize = 100M
date.timezone = "${config.time.timeZone}"
'';
phpIni = pkgs.runCommand "php.ini" {
inherit (package) phpPackage;
inherit phpOptions;
preferLocalBuild = true;
passAsFile = [ "phpOptions" ];
} ''
cat $phpPackage/etc/php.ini $phpOptionsPath > $out
'';
artisanWrapper = pkgs.writeShellScriptBin "librenms-artisan" ''
cd ${package}
sudo=exec
if [[ "$USER" != ${cfg.user} ]]; then
sudo='exec /run/wrappers/bin/sudo -u ${cfg.user}'
fi
$sudo ${package}/artisan $*
'';
lnmsWrapper = pkgs.writeShellScriptBin "lnms" ''
cd ${package}
exec ${package}/lnms $*
'';
configFile = pkgs.writeText "config.php" ''
<?php
$new_config = json_decode(file_get_contents("${cfg.dataDir}/config.json"), true);
$config = ($config == null) ? $new_config : array_merge($config, $new_config);
${lib.optionalString (cfg.extraConfig != null) cfg.extraConfig}
'';
in {
options.services.librenms = with lib; {
enable = mkEnableOption "LibreNMS network monitoring system";
user = mkOption {
type = types.str;
default = "librenms";
description = ''
Name of the LibreNMS user.
'';
};
group = mkOption {
type = types.str;
default = "librenms";
description = ''
Name of the LibreNMS group.
'';
};
hostname = mkOption {
type = types.str;
default = config.networking.fqdnOrHostName;
defaultText = literalExpression "config.networking.fqdnOrHostName";
description = ''
The hostname to serve LibreNMS on.
'';
};
pollerThreads = mkOption {
type = types.int;
default = 16;
description = ''
Amount of threads of the cron-poller.
'';
};
enableOneMinutePolling = mkOption {
type = types.bool;
default = false;
description = ''
Enables the [1-Minute Polling](https://docs.librenms.org/Support/1-Minute-Polling/).
Changing this option will automatically convert your existing rrd files.
'';
};
useDistributedPollers = mkOption {
type = types.bool;
default = false;
description = ''
Enables (distributed pollers)[https://docs.librenms.org/Extensions/Distributed-Poller/]
for this LibreNMS instance. This will enable a local `rrdcached` and `memcached` server.
To use this feature, make sure to configure your firewall that the distributed pollers
can reach the local `mysql`, `rrdcached` and `memcached` ports.
'';
};
distributedPoller = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Configure this LibreNMS instance as a (distributed poller)[https://docs.librenms.org/Extensions/Distributed-Poller/].
This will disable all web features and just configure the poller features.
Use the `mysql` database of your main LibreNMS instance in the database settings.
'';
};
name = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Custom name of this poller.
'';
};
group = mkOption {
type = types.str;
default = "0";
example = "1,2";
description = ''
Group(s) of this poller.
'';
};
distributedBilling = mkOption {
type = types.bool;
default = false;
description = ''
Enable distributed billing on this poller.
'';
};
memcachedHost = mkOption {
type = types.str;
description = ''
Hostname or IP of the `memcached` server.
'';
};
memcachedPort = mkOption {
type = types.port;
default = 11211;
description = ''
Port of the `memcached` server.
'';
};
rrdcachedHost = mkOption {
type = types.str;
description = ''
Hostname or IP of the `rrdcached` server.
'';
};
rrdcachedPort = mkOption {
type = types.port;
default = 42217;
description = ''
Port of the `memcached` server.
'';
};
};
poolConfig = mkOption {
type = with types; attrsOf (oneOf [ str int bool ]);
default = {
"pm" = "dynamic";
"pm.max_children" = 32;
"pm.start_servers" = 2;
"pm.min_spare_servers" = 2;
"pm.max_spare_servers" = 4;
"pm.max_requests" = 500;
};
description = ''
Options for the LibreNMS PHP pool. See the documentation on `php-fpm.conf`
for details on configuration directives.
'';
};
nginx = mkOption {
type = types.submodule (
recursiveUpdate
(import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
);
default = { };
example = literalExpression ''
{
serverAliases = [
"librenms.''${config.networking.domain}"
];
# To enable encryption and let let's encrypt take care of certificate
forceSSL = true;
enableACME = true;
# To set the LibreNMS virtualHost as the default virtualHost;
default = true;
}
'';
description = ''
With this option, you can customize the nginx virtualHost settings.
'';
};
dataDir = mkOption {
type = types.path;
default = "/var/lib/librenms";
description = ''
Path of the LibreNMS state directory.
'';
};
logDir = mkOption {
type = types.path;
default = "/var/log/librenms";
description = ''
Path of the LibreNMS logging directory.
'';
};
database = {
createLocally = mkOption {
type = types.bool;
default = false;
description = ''
Whether to create a local database automatically.
'';
};
host = mkOption {
default = "localhost";
description = ''
Hostname or IP of the MySQL/MariaDB server.
'';
};
port = mkOption {
type = types.port;
default = 3306;
description = ''
Port of the MySQL/MariaDB server.
'';
};
database = mkOption {
type = types.str;
default = "librenms";
description = ''
Name of the database on the MySQL/MariaDB server.
'';
};
username = mkOption {
type = types.str;
default = "librenms";
description = ''
Name of the user on the MySQL/MariaDB server.
'';
};
passwordFile = mkOption {
type = types.path;
example = "/run/secrets/mysql.pass";
description = ''
A file containing the password for the user of the MySQL/MariaDB server.
Must be readable for the LibreNMS user.
'';
};
};
environmentFile = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
File containing env-vars to be substituted into the final config. Useful for secrets.
Does not apply to settings defined in `extraConfig`.
'';
};
settings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {};
};
description = ''
Attrset of the LibreNMS configuration.
See https://docs.librenms.org/Support/Configuration/ for reference.
All possible options are listed [here](https://github.com/librenms/librenms/blob/master/misc/config_definitions.json).
See https://docs.librenms.org/Extensions/Authentication/ for setting other authentication methods.
'';
default = { };
example = {
base_url = "/librenms/";
top_devices = true;
top_ports = false;
};
};
extraConfig = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Additional config for LibreNMS that will be appended to the `config.php`. See
https://github.com/librenms/librenms/blob/master/misc/config_definitions.json
for possible options. Useful if you want to use PHP-Functions in your config.
'';
};
};
config = lib.mkIf cfg.enable {
assertions = [
{
assertion = config.time.timeZone != null;
message = "You must set `time.timeZone` to use the LibreNMS module.";
}
{
assertion = cfg.database.createLocally -> cfg.database.host == "localhost";
message = "The database host must be \"localhost\" if services.librenms.database.createLocally is set to true.";
}
{
assertion = !(cfg.useDistributedPollers && cfg.distributedPoller.enable);
message = "The LibreNMS instance can't be a distributed poller and a full instance at the same time.";
}
];
users.users.${cfg.user} = {
group = "${cfg.group}";
isSystemUser = true;
};
users.groups.${cfg.group} = { };
services.librenms.settings = {
# basic configs
"user" = cfg.user;
"own_hostname" = cfg.hostname;
"base_url" = lib.mkDefault "/";
"auth_mechanism" = lib.mkDefault "mysql";
# disable auto update function (won't work with NixOS)
"update" = false;
# enable fast ping by default
"ping_rrd_step" = 60;
# one minute polling
"rrd.step" = if cfg.enableOneMinutePolling then 60 else 300;
"rrd.heartbeat" = if cfg.enableOneMinutePolling then 120 else 600;
} // (lib.optionalAttrs cfg.distributedPoller.enable {
"distributed_poller" = true;
"distributed_poller_name" = lib.mkIf (cfg.distributedPoller.name != null) cfg.distributedPoller.name;
"distributed_poller_group" = cfg.distributedPoller.group;
"distributed_billing" = cfg.distributedPoller.distributedBilling;
"distributed_poller_memcached_host" = cfg.distributedPoller.memcachedHost;
"distributed_poller_memcached_port" = cfg.distributedPoller.memcachedPort;
"rrdcached" = "${cfg.distributedPoller.rrdcachedHost}:${toString cfg.distributedPoller.rrdcachedPort}";
}) // (lib.optionalAttrs cfg.useDistributedPollers {
"distributed_poller" = true;
# still enable a local poller with distributed polling
"distributed_poller_group" = lib.mkDefault "0";
"distributed_billing" = lib.mkDefault true;
"distributed_poller_memcached_host" = "localhost";
"distributed_poller_memcached_port" = 11211;
"rrdcached" = "localhost:42217";
});
services.memcached = lib.mkIf cfg.useDistributedPollers {
enable = true;
listen = "0.0.0.0";
};
systemd.services.rrdcached = lib.mkIf cfg.useDistributedPollers {
description = "rrdcached";
after = [ "librenms-setup.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "forking";
User = cfg.user;
Group = cfg.group;
LimitNOFILE = 16384;
RuntimeDirectory = "rrdcached";
PidFile = "/run/rrdcached/rrdcached.pid";
# rrdcached params from https://docs.librenms.org/Extensions/Distributed-Poller/#config-sample
ExecStart = "${pkgs.rrdtool}/bin/rrdcached -l 0:42217 -R -j ${cfg.dataDir}/rrdcached-journal/ -F -b ${cfg.dataDir}/rrd -B -w 1800 -z 900 -p /run/rrdcached/rrdcached.pid";
};
};
services.mysql = lib.mkIf cfg.database.createLocally {
enable = true;
package = lib.mkDefault pkgs.mariadb;
settings.mysqld = {
innodb_file_per_table = 1;
lower_case_table_names = 0;
} // (lib.optionalAttrs cfg.useDistributedPollers {
bind-address = "0.0.0.0";
});
ensureDatabases = [ cfg.database.database ];
ensureUsers = [
{
name = cfg.database.username;
ensurePermissions = {
"${cfg.database.database}.*" = "ALL PRIVILEGES";
};
}
];
initialScript = lib.mkIf cfg.useDistributedPollers (pkgs.writeText "mysql-librenms-init" ''
CREATE USER IF NOT EXISTS '${cfg.database.username}'@'%';
GRANT ALL PRIVILEGES ON ${cfg.database.database}.* TO '${cfg.database.username}'@'%';
'');
};
services.nginx = lib.mkIf (!cfg.distributedPoller.enable) {
enable = true;
virtualHosts."${cfg.hostname}" = lib.mkMerge [
cfg.nginx
{
root = lib.mkForce "${package}/html";
locations."/" = {
index = "index.php";
tryFiles = "$uri $uri/ /index.php?$query_string";
};
locations."~ .php$".extraConfig = ''
fastcgi_pass unix:${config.services.phpfpm.pools."librenms".socket};
fastcgi_split_path_info ^(.+\.php)(/.+)$;
'';
}
];
};
services.phpfpm.pools.librenms = lib.mkIf (!cfg.distributedPoller.enable) {
user = cfg.user;
group = cfg.group;
inherit (package) phpPackage;
inherit phpOptions;
settings = {
"listen.mode" = "0660";
"listen.owner" = config.services.nginx.user;
"listen.group" = config.services.nginx.group;
} // cfg.poolConfig;
};
systemd.services.librenms-scheduler = {
description = "LibreNMS Scheduler";
path = [ pkgs.unixtools.whereis ];
serviceConfig = {
Type = "oneshot";
WorkingDirectory = package;
User = cfg.user;
Group = cfg.group;
ExecStart = "${artisanWrapper}/bin/librenms-artisan schedule:run";
};
};
systemd.timers.librenms-scheduler = {
description = "LibreNMS Scheduler";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "minutely";
AccuracySec = "1second";
};
};
systemd.services.librenms-setup = {
description = "Preparation tasks for LibreNMS";
before = [ "phpfpm-librenms.service" ];
after = [ "systemd-tmpfiles-setup.service" ]
++ (lib.optional (cfg.database.host == "localhost") "mysql.service");
wantedBy = [ "multi-user.target" ];
restartTriggers = [ package configFile ];
path = [ pkgs.mariadb pkgs.unixtools.whereis pkgs.gnused ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
User = cfg.user;
Group = cfg.group;
ExecStartPre = lib.mkIf cfg.database.createLocally [ "!${pkgs.writeShellScript "librenms-db-init" ''
DB_PASSWORD=$(cat ${cfg.database.passwordFile} | tr -d '\n')
echo "ALTER USER '${cfg.database.username}'@'localhost' IDENTIFIED BY '$DB_PASSWORD';" | ${pkgs.mariadb}/bin/mysql
${lib.optionalString cfg.useDistributedPollers ''
echo "ALTER USER '${cfg.database.username}'@'%' IDENTIFIED BY '$DB_PASSWORD';" | ${pkgs.mariadb}/bin/mysql
''}
''}"];
};
script = ''
set -euo pipefail
# config setup
ln -sf ${configFile} ${cfg.dataDir}/config.php
${pkgs.envsubst}/bin/envsubst -i ${configJson} -o ${cfg.dataDir}/config.json
export PHPRC=${phpIni}
if [[ ! -s ${cfg.dataDir}/.env ]]; then
# init .env file
echo "APP_KEY=" > ${cfg.dataDir}/.env
${artisanWrapper}/bin/librenms-artisan key:generate --ansi
${artisanWrapper}/bin/librenms-artisan webpush:vapid
echo "" >> ${cfg.dataDir}/.env
echo -n "NODE_ID=" >> ${cfg.dataDir}/.env
${package.phpPackage}/bin/php -r "echo uniqid();" >> ${cfg.dataDir}/.env
echo "" >> ${cfg.dataDir}/.env
else
# .env file already exists --> only update database and cache config
${pkgs.gnused}/bin/sed -i /^DB_/d ${cfg.dataDir}/.env
${pkgs.gnused}/bin/sed -i /^CACHE_DRIVER/d ${cfg.dataDir}/.env
fi
${lib.optionalString (cfg.useDistributedPollers || cfg.distributedPoller.enable) ''
echo "CACHE_DRIVER=memcached" >> ${cfg.dataDir}/.env
''}
echo "DB_HOST=${cfg.database.host}" >> ${cfg.dataDir}/.env
echo "DB_PORT=${toString cfg.database.port}" >> ${cfg.dataDir}/.env
echo "DB_DATABASE=${cfg.database.database}" >> ${cfg.dataDir}/.env
echo "DB_USERNAME=${cfg.database.username}" >> ${cfg.dataDir}/.env
echo -n "DB_PASSWORD=" >> ${cfg.dataDir}/.env
cat ${cfg.database.passwordFile} >> ${cfg.dataDir}/.env
# clear cache after update
OLD_VERSION=$(cat ${cfg.dataDir}/version)
if [[ $OLD_VERSION != "${package.version}" ]]; then
rm -r ${cfg.dataDir}/cache/*
echo "${package.version}" > ${cfg.dataDir}/version
fi
# convert rrd files when the oneMinutePolling option is changed
OLD_ENABLED=$(cat ${cfg.dataDir}/one_minute_enabled)
if [[ $OLD_ENABLED != "${lib.boolToString cfg.enableOneMinutePolling}" ]]; then
${package}/scripts/rrdstep.php -h all
echo "${lib.boolToString cfg.enableOneMinutePolling}" > ${cfg.dataDir}/one_minute_enabled
fi
# migrate db
${artisanWrapper}/bin/librenms-artisan migrate --force --no-interaction
'';
};
programs.mtr.enable = true;
services.logrotate = {
enable = true;
settings."${cfg.logDir}/librenms.log" = {
su = "${cfg.user} ${cfg.group}";
create = "0640 ${cfg.user} ${cfg.group}";
rotate = 6;
frequency = "weekly";
compress = true;
delaycompress = true;
missingok = true;
notifempty = true;
};
};
services.cron = {
enable = true;
systemCronJobs = let
env = "PHPRC=${phpIni}";
in [
# based on crontab provided by LibreNMS
"33 */6 * * * ${cfg.user} ${env} ${package}/cronic ${package}/discovery-wrapper.py 1"
"*/5 * * * * ${cfg.user} ${env} ${package}/discovery.php -h new >> /dev/null 2>&1"
"${if cfg.enableOneMinutePolling then "*" else "*/5"} * * * * ${cfg.user} ${env} ${package}/cronic ${package}/poller-wrapper.py ${toString cfg.pollerThreads}"
"* * * * * ${cfg.user} ${env} ${package}/alerts.php >> /dev/null 2>&1"
"*/5 * * * * ${cfg.user} ${env} ${package}/poll-billing.php >> /dev/null 2>&1"
"01 * * * * ${cfg.user} ${env} ${package}/billing-calculate.php >> /dev/null 2>&1"
"*/5 * * * * ${cfg.user} ${env} ${package}/check-services.php >> /dev/null 2>&1"
# extra: fast ping
"* * * * * ${cfg.user} ${env} ${package}/ping.php >> /dev/null 2>&1"
# daily.sh tasks are split to exclude update
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh cleanup >> /dev/null 2>&1"
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh notifications >> /dev/null 2>&1"
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh peeringdb >> /dev/null 2>&1"
"19 0 * * * ${cfg.user} ${env} ${package}/daily.sh mac_oui >> /dev/null 2>&1"
];
};
security.wrappers = {
fping = {
setuid = true;
owner = "root";
group = "root";
source = "${pkgs.fping}/bin/fping";
};
};
environment.systemPackages = [ artisanWrapper lnmsWrapper ];
systemd.tmpfiles.rules = [
"d ${cfg.logDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.logDir}/librenms.log 0640 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir} 0750 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/.env 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/version 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/one_minute_enabled 0600 ${cfg.user} ${cfg.group} - -"
"f ${cfg.dataDir}/config.json 0600 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/app 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/debugbar 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/cache 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/sessions 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/framework/views 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/storage/logs 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/rrd 0700 ${cfg.user} ${cfg.group} - -"
"d ${cfg.dataDir}/cache 0700 ${cfg.user} ${cfg.group} - -"
] ++ lib.optionals cfg.useDistributedPollers [
"d ${cfg.dataDir}/rrdcached-journal 0700 ${cfg.user} ${cfg.group} - -"
];
};
meta.maintainers = lib.teams.wdz.members;
}

View file

@ -8,7 +8,7 @@ let
checkedConfig = file: checkedConfig = file:
if cfg.checkConfig then if cfg.checkConfig then
pkgs.runCommand "checked-config" { buildInputs = [ cfg.package ]; } '' pkgs.runCommand "checked-config" { nativeBuildInputs = [ cfg.package ]; } ''
ln -s ${file} $out ln -s ${file} $out
amtool check-config $out amtool check-config $out
'' else file; '' else file;

View file

@ -31,7 +31,7 @@ let
if checkConfigEnabled then if checkConfigEnabled then
pkgs.runCommandLocal pkgs.runCommandLocal
"${name}-${replaceStrings [" "] [""] what}-checked" "${name}-${replaceStrings [" "] [""] what}-checked"
{ buildInputs = [ cfg.package.cli ]; } '' { nativeBuildInputs = [ cfg.package.cli ]; } ''
ln -s ${file} $out ln -s ${file} $out
promtool ${what} $out promtool ${what} $out
'' else file; '' else file;

View file

@ -37,6 +37,7 @@ let
"fritzbox" "fritzbox"
"graphite" "graphite"
"idrac" "idrac"
"imap-mailstat"
"influxdb" "influxdb"
"ipmi" "ipmi"
"json" "json"

View file

@ -25,7 +25,7 @@ let
checkConfig = file: checkConfig = file:
pkgs.runCommand "checked-blackbox-exporter.conf" { pkgs.runCommand "checked-blackbox-exporter.conf" {
preferLocalBuild = true; preferLocalBuild = true;
buildInputs = [ pkgs.buildPackages.prometheus-blackbox-exporter ]; nativeBuildInputs = [ pkgs.buildPackages.prometheus-blackbox-exporter ];
} '' } ''
ln -s ${coerceConfigFile file} $out ln -s ${coerceConfigFile file} $out
blackbox_exporter --config.check --config.file $out blackbox_exporter --config.check --config.file $out

View file

@ -0,0 +1,71 @@
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.imap-mailstat;
valueToString = value:
if (builtins.typeOf value == "string") then "\"${value}\""
else (
if (builtins.typeOf value == "int") then "${toString value}"
else (
if (builtins.typeOf value == "bool") then (if value then "true" else "false")
else "XXX ${toString value}"
)
);
createConfigFile = accounts:
# unfortunately on toTOML yet
# https://github.com/NixOS/nix/issues/3929
pkgs.writeText "imap-mailstat-exporter.conf" ''
${concatStrings (attrValues (mapAttrs (name: config: "[[Accounts]]\nname = \"${name}\"\n${concatStrings (attrValues (mapAttrs (k: v: "${k} = ${valueToString v}\n") config))}") accounts))}
'';
mkOpt = type: description: mkOption {
type = types.nullOr type;
default = null;
description = lib.mdDoc description;
};
accountOptions.options = {
mailaddress = mkOpt types.str "Your email address (at the moment used as login name)";
username = mkOpt types.str "If empty string mailaddress value is used";
password = mkOpt types.str "";
serveraddress = mkOpt types.str "mailserver name or address";
serverport = mkOpt types.int "imap port number (at the moment only tls connection is supported)";
starttls = mkOpt types.bool "set to true for using STARTTLS to start a TLS connection";
};
in
{
port = 8081;
extraOpts = {
oldestUnseenDate = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Enable metric with timestamp of oldest unseen mail
'';
};
accounts = mkOption {
type = types.attrsOf (types.submodule accountOptions);
default = {};
description = lib.mdDoc ''
Accounts to monitor
'';
};
configurationFile = mkOption {
type = types.path;
example = "/path/to/config-file";
description = lib.mdDoc ''
File containing the configuration
'';
};
};
serviceOpts = {
serviceConfig = {
ExecStart = ''
${pkgs.prometheus-imap-mailstat-exporter}/bin/imap-mailstat-exporter \
-config ${createConfigFile cfg.accounts} \
${optionalString cfg.oldestUnseenDate "-oldestunseendate"} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
}

View file

@ -203,10 +203,8 @@ in
default = [ default = [
"/ip4/0.0.0.0/tcp/4001" "/ip4/0.0.0.0/tcp/4001"
"/ip6/::/tcp/4001" "/ip6/::/tcp/4001"
"/ip4/0.0.0.0/udp/4001/quic"
"/ip4/0.0.0.0/udp/4001/quic-v1" "/ip4/0.0.0.0/udp/4001/quic-v1"
"/ip4/0.0.0.0/udp/4001/quic-v1/webtransport" "/ip4/0.0.0.0/udp/4001/quic-v1/webtransport"
"/ip6/::/udp/4001/quic"
"/ip6/::/udp/4001/quic-v1" "/ip6/::/udp/4001/quic-v1"
"/ip6/::/udp/4001/quic-v1/webtransport" "/ip6/::/udp/4001/quic-v1/webtransport"
]; ];

View file

@ -39,7 +39,7 @@ let
daemonService = appName: args: daemonService = appName: args:
{ description = "Samba Service Daemon ${appName}"; { description = "Samba Service Daemon ${appName}";
after = [ (mkIf (cfg.enableNmbd && "${appName}" == "smbd") "samba-nmbd.service") ]; after = [ (mkIf (cfg.enableNmbd && "${appName}" == "smbd") "samba-nmbd.service") "network.target" ];
requiredBy = [ "samba.target" ]; requiredBy = [ "samba.target" ];
partOf = [ "samba.target" ]; partOf = [ "samba.target" ];

View file

@ -1,55 +1,59 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with pkgs;
with lib;
let let
cfg = config.services.connman; cfg = config.services.connman;
configFile = pkgs.writeText "connman.conf" '' configFile = pkgs.writeText "connman.conf" ''
[General] [General]
NetworkInterfaceBlacklist=${concatStringsSep "," cfg.networkInterfaceBlacklist} NetworkInterfaceBlacklist=${lib.concatStringsSep "," cfg.networkInterfaceBlacklist}
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
enableIwd = cfg.wifi.backend == "iwd"; enableIwd = cfg.wifi.backend == "iwd";
in { in {
meta.maintainers = with lib.maintainers; [ AndersonTorres ];
imports = [ imports = [
(mkRenamedOptionModule [ "networking" "connman" ] [ "services" "connman" ]) (lib.mkRenamedOptionModule [ "networking" "connman" ] [ "services" "connman" ])
]; ];
###### interface ###### interface
options = { options = {
services.connman = { services.connman = {
enable = lib.mkOption {
enable = mkOption { type = lib.types.bool;
type = types.bool;
default = false; default = false;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether to use ConnMan for managing your network connections. Whether to use ConnMan for managing your network connections.
''; '';
}; };
enableVPN = mkOption { package = lib.mkOption {
type = types.bool; type = lib.types.package;
description = lib.mdDoc "The connman package / build flavor";
default = pkgs.connman;
defaultText = lib.literalExpression "pkgs.connman";
example = lib.literalExpression "pkgs.connmanFull";
};
enableVPN = lib.mkOption {
type = lib.types.bool;
default = true; default = true;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether to enable ConnMan VPN service. Whether to enable ConnMan VPN service.
''; '';
}; };
extraConfig = mkOption { extraConfig = lib.mkOption {
type = types.lines; type = lib.types.lines;
default = ""; default = "";
description = lib.mdDoc '' description = lib.mdDoc ''
Configuration lines appended to the generated connman configuration file. Configuration lines appended to the generated connman configuration file.
''; '';
}; };
networkInterfaceBlacklist = mkOption { networkInterfaceBlacklist = lib.mkOption {
type = with types; listOf str; type = with lib.types; listOf str;
default = [ "vmnet" "vboxnet" "virbr" "ifb" "ve" ]; default = [ "vmnet" "vboxnet" "virbr" "ifb" "ve" ];
description = lib.mdDoc '' description = lib.mdDoc ''
Default blacklisted interfaces, this includes NixOS containers interfaces (ve). Default blacklisted interfaces, this includes NixOS containers interfaces (ve).
@ -57,8 +61,8 @@ in {
}; };
wifi = { wifi = {
backend = mkOption { backend = lib.mkOption {
type = types.enum [ "wpa_supplicant" "iwd" ]; type = lib.types.enum [ "wpa_supplicant" "iwd" ];
default = "wpa_supplicant"; default = "wpa_supplicant";
description = lib.mdDoc '' description = lib.mdDoc ''
Specify the Wi-Fi backend used. Specify the Wi-Fi backend used.
@ -67,31 +71,20 @@ in {
}; };
}; };
extraFlags = mkOption { extraFlags = lib.mkOption {
type = with types; listOf str; type = with lib.types; listOf str;
default = [ ]; default = [ ];
example = [ "--nodnsproxy" ]; example = [ "--nodnsproxy" ];
description = lib.mdDoc '' description = lib.mdDoc ''
Extra flags to pass to connmand Extra flags to pass to connmand
''; '';
}; };
package = mkOption {
type = types.package;
description = lib.mdDoc "The connman package / build flavor";
default = connman;
defaultText = literalExpression "pkgs.connman";
example = literalExpression "pkgs.connmanFull";
};
}; };
}; };
###### implementation ###### implementation
config = mkIf cfg.enable { config = lib.mkIf cfg.enable {
assertions = [{ assertions = [{
assertion = !config.networking.useDHCP; assertion = !config.networking.useDHCP;
message = "You can not use services.connman with networking.useDHCP"; message = "You can not use services.connman with networking.useDHCP";
@ -107,8 +100,8 @@ in {
systemd.services.connman = { systemd.services.connman = {
description = "Connection service"; description = "Connection service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "syslog.target" ] ++ optional enableIwd "iwd.service"; after = [ "syslog.target" ] ++ lib.optional enableIwd "iwd.service";
requires = optional enableIwd "iwd.service"; requires = lib.optional enableIwd "iwd.service";
serviceConfig = { serviceConfig = {
Type = "dbus"; Type = "dbus";
BusName = "net.connman"; BusName = "net.connman";
@ -117,13 +110,13 @@ in {
"${cfg.package}/sbin/connmand" "${cfg.package}/sbin/connmand"
"--config=${configFile}" "--config=${configFile}"
"--nodaemon" "--nodaemon"
] ++ optional enableIwd "--wifi=iwd_agent" ] ++ lib.optional enableIwd "--wifi=iwd_agent"
++ cfg.extraFlags); ++ cfg.extraFlags);
StandardOutput = "null"; StandardOutput = "null";
}; };
}; };
systemd.services.connman-vpn = mkIf cfg.enableVPN { systemd.services.connman-vpn = lib.mkIf cfg.enableVPN {
description = "ConnMan VPN service"; description = "ConnMan VPN service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "syslog.target" ]; after = [ "syslog.target" ];
@ -136,7 +129,7 @@ in {
}; };
}; };
systemd.services.net-connman-vpn = mkIf cfg.enableVPN { systemd.services.net-connman-vpn = lib.mkIf cfg.enableVPN {
description = "D-BUS Service"; description = "D-BUS Service";
serviceConfig = { serviceConfig = {
Name = "net.connman.vpn"; Name = "net.connman.vpn";
@ -150,9 +143,9 @@ in {
networking = { networking = {
useDHCP = false; useDHCP = false;
wireless = { wireless = {
enable = mkIf (!enableIwd) true; enable = lib.mkIf (!enableIwd) true;
dbusControlled = true; dbusControlled = true;
iwd = mkIf enableIwd { iwd = lib.mkIf enableIwd {
enable = true; enable = true;
}; };
}; };

View file

@ -0,0 +1,125 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.deconz;
name = "deconz";
stateDir = "/var/lib/${name}";
# ref. upstream deconz.service
capabilities =
lib.optionals (cfg.httpPort < 1024 || cfg.wsPort < 1024) [ "CAP_NET_BIND_SERVICE" ]
++ lib.optionals (cfg.allowRebootSystem) [ "CAP_SYS_BOOT" ]
++ lib.optionals (cfg.allowRestartService) [ "CAP_KILL" ]
++ lib.optionals (cfg.allowSetSystemTime) [ "CAP_SYS_TIME" ];
in
{
options.services.deconz = {
enable = lib.mkEnableOption "deCONZ, a Zigbee gateway for use with ConBee hardware (https://phoscon.de/en/conbee2)";
package = lib.mkOption {
type = lib.types.package;
default = pkgs.deconz;
defaultText = lib.literalExpression "pkgs.deconz";
description = "Which deCONZ package to use.";
};
device = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
Force deCONZ to use a specific USB device (e.g. /dev/ttyACM0). By
default it does a search.
'';
};
listenAddress = lib.mkOption {
type = lib.types.str;
default = "127.0.0.1";
description = ''
Pin deCONZ to the network interface specified through the provided IP
address. This applies for the webserver as well as the websocket
notifications.
'';
};
httpPort = lib.mkOption {
type = lib.types.port;
default = 80;
description = "TCP port for the web server.";
};
wsPort = lib.mkOption {
type = lib.types.port;
default = 443;
description = "TCP port for the WebSocket.";
};
openFirewall = lib.mkEnableOption "open up the service ports in the firewall";
allowRebootSystem = lib.mkEnableOption "allow rebooting the system";
allowRestartService = lib.mkEnableOption "allow killing/restarting processes";
allowSetSystemTime = lib.mkEnableOption "allow setting the system time";
extraArgs = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ ];
example = [
"--dbg-info=1"
"--dbg-err=2"
];
description = ''
Extra command line arguments for deCONZ, see
https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/deCONZ-command-line-parameters.
'';
};
};
config = lib.mkIf cfg.enable {
networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [
cfg.httpPort
cfg.wsPort
];
services.udev.packages = [ cfg.package ];
systemd.services.deconz = {
description = "deCONZ Zigbee gateway";
wantedBy = [ "multi-user.target" ];
preStart = ''
# The service puts a nix store path reference in here, and that path can
# be garbage collected. Ensure the file gets "refreshed" on every start.
rm -f ${stateDir}/.local/share/dresden-elektronik/deCONZ/zcldb.txt
'';
environment = {
HOME = stateDir;
XDG_RUNTIME_DIR = "/run/${name}";
};
serviceConfig = {
ExecStart =
"${lib.getExe cfg.package}"
+ " -platform minimal"
+ " --http-listen=${cfg.listenAddress}"
+ " --http-port=${toString cfg.httpPort}"
+ " --ws-port=${toString cfg.wsPort}"
+ " --auto-connect=1"
+ (lib.optionalString (cfg.device != null) " --dev=${cfg.device}")
+ " " + (lib.escapeShellArgs cfg.extraArgs);
Restart = "on-failure";
AmbientCapabilities = capabilities;
CapabilityBoundingSet = capabilities;
UMask = "0027";
DynamicUser = true;
RuntimeDirectory = name;
RuntimeDirectoryMode = "0700";
StateDirectory = name;
WorkingDirectory = stateDir;
# For access to /dev/ttyACM0 (ConBee).
SupplementaryGroups = [ "dialout" ];
ProtectHome = true;
};
};
};
}

View file

@ -45,7 +45,7 @@ this instance, and `url`, which holds the URL under which the sync server can be
accessed. The `url` can be configured automatically when using nginx. accessed. The `url` can be configured automatically when using nginx.
Options that affect the surroundings of the sync server are `enableNginx`, Options that affect the surroundings of the sync server are `enableNginx`,
`enableTLS` and `hostnam`. If `enableNginx` is set the sync server module will `enableTLS` and `hostname`. If `enableNginx` is set the sync server module will
automatically add an nginx virtual host to the system using `hostname` as the automatically add an nginx virtual host to the system using `hostname` as the
domain and set `url` accordingly. If `enableTLS` is set the module will also domain and set `url` accordingly. If `enableTLS` is set the module will also
enable ACME certificates on the new virtual host and force all connections to enable ACME certificates on the new virtual host and force all connections to

View file

@ -224,10 +224,12 @@ in
Settings for the sync server. These take priority over values computed Settings for the sync server. These take priority over values computed
from NixOS options. from NixOS options.
See the doc comments on the `Settings` structs in See the example config in
<https://github.com/mozilla-services/syncstorage-rs/blob/master/syncstorage/src/settings.rs> <https://github.com/mozilla-services/syncstorage-rs/blob/master/config/local.example.toml>
and the doc comments on the `Settings` structs in
<https://github.com/mozilla-services/syncstorage-rs/blob/master/syncstorage-settings/src/lib.rs>
and and
<https://github.com/mozilla-services/syncstorage-rs/blob/master/syncstorage/src/tokenserver/settings.rs> <https://github.com/mozilla-services/syncstorage-rs/blob/master/tokenserver-settings/src/lib.rs>
for available options. for available options.
''; '';
}; };

View file

@ -43,12 +43,8 @@ in
[ "services" "searx" "settingsFile" ]) [ "services" "searx" "settingsFile" ])
]; ];
###### interface
options = { options = {
services.searx = { services.searx = {
enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
@ -149,8 +145,8 @@ in
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = pkgs.searx; default = pkgs.searxng;
defaultText = literalExpression "pkgs.searx"; defaultText = literalExpression "pkgs.searxng";
description = lib.mdDoc "searx package to use."; description = lib.mdDoc "searx package to use.";
}; };
@ -190,21 +186,7 @@ in
}; };
###### implementation
config = mkIf cfg.enable { config = mkIf cfg.enable {
assertions = [
{
assertion = (cfg.limiterSettings != { }) -> cfg.package.pname == "searxng";
message = "services.searx.limiterSettings requires services.searx.package to be searxng.";
}
{
assertion = cfg.redisCreateLocally -> cfg.package.pname == "searxng";
message = "services.searx.redisCreateLocally requires services.searx.package to be searxng.";
}
];
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
users.users.searx = users.users.searx =
@ -245,10 +227,10 @@ in
}; };
}; };
systemd.services.uwsgi = mkIf (cfg.runInUwsgi) systemd.services.uwsgi = mkIf cfg.runInUwsgi {
{ requires = [ "searx-init.service" ]; requires = [ "searx-init.service" ];
after = [ "searx-init.service" ]; after = [ "searx-init.service" ];
}; };
services.searx.settings = { services.searx.settings = {
# merge NixOS settings with defaults settings.yml # merge NixOS settings with defaults settings.yml
@ -256,7 +238,7 @@ in
redis.url = lib.mkIf cfg.redisCreateLocally "unix://${config.services.redis.servers.searx.unixSocket}"; redis.url = lib.mkIf cfg.redisCreateLocally "unix://${config.services.redis.servers.searx.unixSocket}";
}; };
services.uwsgi = mkIf (cfg.runInUwsgi) { services.uwsgi = mkIf cfg.runInUwsgi {
enable = true; enable = true;
plugins = [ "python3" ]; plugins = [ "python3" ];
@ -270,6 +252,7 @@ in
enable-threads = true; enable-threads = true;
module = "searx.webapp"; module = "searx.webapp";
env = [ env = [
# TODO: drop this as it is only required for searx
"SEARX_SETTINGS_PATH=${cfg.settingsFile}" "SEARX_SETTINGS_PATH=${cfg.settingsFile}"
# searxng compatibility https://github.com/searxng/searxng/issues/1519 # searxng compatibility https://github.com/searxng/searxng/issues/1519
"SEARXNG_SETTINGS_PATH=${cfg.settingsFile}" "SEARXNG_SETTINGS_PATH=${cfg.settingsFile}"

View file

@ -74,6 +74,19 @@ let
}; };
}; };
options.openssh.authorizedPrincipals = mkOption {
type = with types; listOf types.singleLineStr;
default = [];
description = mdDoc ''
A list of verbatim principal names that should be added to the user's
authorized principals.
'';
example = [
"example@host"
"foo@bar"
];
};
}; };
authKeysFiles = let authKeysFiles = let
@ -89,6 +102,16 @@ let
)); ));
in listToAttrs (map mkAuthKeyFile usersWithKeys); in listToAttrs (map mkAuthKeyFile usersWithKeys);
authPrincipalsFiles = let
mkAuthPrincipalsFile = u: nameValuePair "ssh/authorized_principals.d/${u.name}" {
mode = "0444";
text = concatStringsSep "\n" u.openssh.authorizedPrincipals;
};
usersWithPrincipals = attrValues (flip filterAttrs config.users.users (n: u:
length u.openssh.authorizedPrincipals != 0
));
in listToAttrs (map mkAuthPrincipalsFile usersWithPrincipals);
in in
{ {
@ -285,6 +308,14 @@ in
type = types.submodule ({name, ...}: { type = types.submodule ({name, ...}: {
freeformType = settingsFormat.type; freeformType = settingsFormat.type;
options = { options = {
AuthorizedPrincipalsFile = mkOption {
type = types.str;
default = "none"; # upstream default
description = lib.mdDoc ''
Specifies a file that lists principal names that are accepted for certificate authentication. The default
is `"none"`, i.e. not to use a principals file.
'';
};
LogLevel = mkOption { LogLevel = mkOption {
type = types.enum [ "QUIET" "FATAL" "ERROR" "INFO" "VERBOSE" "DEBUG" "DEBUG1" "DEBUG2" "DEBUG3" ]; type = types.enum [ "QUIET" "FATAL" "ERROR" "INFO" "VERBOSE" "DEBUG" "DEBUG1" "DEBUG2" "DEBUG3" ];
default = "INFO"; # upstream default default = "INFO"; # upstream default
@ -444,7 +475,7 @@ in
services.openssh.moduliFile = mkDefault "${cfgc.package}/etc/ssh/moduli"; services.openssh.moduliFile = mkDefault "${cfgc.package}/etc/ssh/moduli";
services.openssh.sftpServerExecutable = mkDefault "${cfgc.package}/libexec/sftp-server"; services.openssh.sftpServerExecutable = mkDefault "${cfgc.package}/libexec/sftp-server";
environment.etc = authKeysFiles // environment.etc = authKeysFiles // authPrincipalsFiles //
{ "ssh/moduli".source = cfg.moduliFile; { "ssh/moduli".source = cfg.moduliFile;
"ssh/sshd_config".source = sshconf; "ssh/sshd_config".source = sshconf;
}; };
@ -541,6 +572,8 @@ in
services.openssh.authorizedKeysFiles = services.openssh.authorizedKeysFiles =
[ "%h/.ssh/authorized_keys" "/etc/ssh/authorized_keys.d/%u" ]; [ "%h/.ssh/authorized_keys" "/etc/ssh/authorized_keys.d/%u" ];
services.openssh.settings.AuthorizedPrincipalsFile = mkIf (authPrincipalsFiles != {}) "/etc/ssh/authorized_principals.d/%u";
services.openssh.extraConfig = mkOrder 0 services.openssh.extraConfig = mkOrder 0
'' ''
UsePAM yes UsePAM yes

View file

@ -36,17 +36,15 @@ let
# be careful not to leak secrets in the filesystem or in process listings # be careful not to leak secrets in the filesystem or in process listings
umask 0077 umask 0077
# get the api key by parsing the config.xml
while
! ${pkgs.libxml2}/bin/xmllint \
--xpath 'string(configuration/gui/apikey)' \
${cfg.configDir}/config.xml \
>"$RUNTIME_DIRECTORY/api_key"
do sleep 1; done
(printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers"
curl() { curl() {
# get the api key by parsing the config.xml
while
! ${pkgs.libxml2}/bin/xmllint \
--xpath 'string(configuration/gui/apikey)' \
${cfg.configDir}/config.xml \
>"$RUNTIME_DIRECTORY/api_key"
do sleep 1; done
(printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers"
${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \ ${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \
--retry 1000 --retry-delay 1 --retry-all-errors \ --retry 1000 --retry-delay 1 --retry-all-errors \
"$@" "$@"

View file

@ -0,0 +1,103 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.tinyproxy;
mkValueStringTinyproxy = with lib; v:
if true == v then "yes"
else if false == v then "no"
else generators.mkValueStringDefault {} v;
mkKeyValueTinyproxy = {
mkValueString ? mkValueStringDefault {}
}: sep: k: v:
if null == v then ""
else "${lib.strings.escape [sep] k}${sep}${mkValueString v}";
settingsFormat = (pkgs.formats.keyValue {
mkKeyValue = mkKeyValueTinyproxy {
mkValueString = mkValueStringTinyproxy;
} " ";
listsAsDuplicateKeys= true;
});
configFile = settingsFormat.generate "tinyproxy.conf" cfg.settings;
in
{
options = {
services.tinyproxy = {
enable = mkEnableOption (lib.mdDoc "Tinyproxy daemon");
package = mkPackageOptionMD pkgs "tinyproxy" {};
settings = mkOption {
description = lib.mdDoc "Configuration for [tinyproxy](https://tinyproxy.github.io/).";
default = { };
example = literalExpression ''{
Port 8888;
Listen 127.0.0.1;
Timeout 600;
Allow 127.0.0.1;
Anonymous = ['"Host"' '"Authorization"'];
ReversePath = '"/example/" "http://www.example.com/"';
}'';
type = types.submodule ({name, ...}: {
freeformType = settingsFormat.type;
options = {
Listen = mkOption {
type = types.str;
default = "127.0.0.1";
description = lib.mdDoc ''
Specify which address to listen to.
'';
};
Port = mkOption {
type = types.int;
default = 8888;
description = lib.mdDoc ''
Specify which port to listen to.
'';
};
Anonymous = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc ''
If an `Anonymous` keyword is present, then anonymous proxying is enabled. The headers listed with `Anonymous` are allowed through, while all others are denied. If no Anonymous keyword is present, then all headers are allowed through. You must include quotes around the headers.
'';
};
Filter = mkOption {
type = types.nullOr types.path;
default = null;
description = lib.mdDoc ''
Tinyproxy supports filtering of web sites based on URLs or domains. This option specifies the location of the file containing the filter rules, one rule per line.
'';
};
};
});
};
};
};
config = mkIf cfg.enable {
systemd.services.tinyproxy = {
description = "TinyProxy daemon";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
User = "tinyproxy";
Group = "tinyproxy";
Type = "simple";
ExecStart = "${getExe pkgs.tinyproxy} -d -c ${configFile}";
ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
KillSignal = "SIGINT";
TimeoutStopSec = "30s";
Restart = "on-failure";
};
};
users.users.tinyproxy = {
group = "tinyproxy";
isSystemUser = true;
};
users.groups.tinyproxy = {};
};
meta.maintainers = with maintainers; [ tcheronneau ];
}

View file

@ -393,7 +393,7 @@ in
) )
) // { ) // {
# Miscellaneous options # Miscellaneous options
inherit (cfg) banaction maxretry; inherit (cfg) banaction maxretry bantime;
ignoreip = ''127.0.0.1/8 ${optionalString config.networking.enableIPv6 "::1"} ${concatStringsSep " " cfg.ignoreIP}''; ignoreip = ''127.0.0.1/8 ${optionalString config.networking.enableIPv6 "::1"} ${concatStringsSep " " cfg.ignoreIP}'';
backend = "systemd"; backend = "systemd";
# Actions # Actions

View file

@ -172,7 +172,7 @@ in {
ln -sf '${file}' "${local}" ln -sf '${file}' "${local}"
'') rules} '') rules}
if [ ! -f /etc/opensnitch-system-fw.json ]; then if [ ! -f /etc/opensnitchd/system-fw.json ]; then
cp "${pkgs.opensnitch}/etc/opensnitchd/system-fw.json" "/etc/opensnitchd/system-fw.json" cp "${pkgs.opensnitch}/etc/opensnitchd/system-fw.json" "/etc/opensnitchd/system-fw.json"
fi fi
''); '');

View file

@ -0,0 +1,95 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.tang;
in
{
options.services.tang = {
enable = mkEnableOption "tang";
package = mkOption {
type = types.package;
default = pkgs.tang;
defaultText = literalExpression "pkgs.tang";
description = mdDoc "The tang package to use.";
};
listenStream = mkOption {
type = with types; listOf str;
default = [ "7654" ];
example = [ "198.168.100.1:7654" "[2001:db8::1]:7654" "7654" ];
description = mdDoc ''
Addresses and/or ports on which tang should listen.
For detailed syntax see ListenStream in {manpage}`systemd.socket(5)`.
'';
};
ipAddressAllow = mkOption {
example = [ "192.168.1.0/24" ];
type = types.listOf types.str;
description = ''
Whitelist a list of address prefixes.
Preferably, internal addresses should be used.
'';
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
systemd.services."tangd@" = {
description = "Tang server";
path = [ cfg.package ];
serviceConfig = {
StandardInput = "socket";
StandardOutput = "socket";
StandardError = "journal";
DynamicUser = true;
StateDirectory = "tang";
RuntimeDirectory = "tang";
StateDirectoryMode = "700";
UMask = "0077";
CapabilityBoundingSet = [ "" ];
ExecStart = "${cfg.package}/libexec/tangd %S/tang";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
DeviceAllow = [ "/dev/stdin" ];
RestrictAddressFamilies = [ "AF_UNIX" ];
DevicePolicy = "strict";
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
IPAddressDeny = "any";
IPAddressAllow = cfg.ipAddressAllow;
};
};
systemd.sockets.tangd = {
description = "Tang server";
wantedBy = [ "sockets.target" ];
socketConfig = {
ListenStream = cfg.listenStream;
Accept = "yes";
IPAddressDeny = "any";
IPAddressAllow = cfg.ipAddressAllow;
};
};
};
meta.maintainers = with lib.maintainers; [ jfroche julienmalka ];
}

View file

@ -119,13 +119,7 @@ Auto updates for Nextcloud apps can be enabled using
- **Server-side encryption.** - **Server-side encryption.**
Nextcloud supports [server-side encryption (SSE)](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html). Nextcloud supports [server-side encryption (SSE)](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html).
This is not an end-to-end encryption, but can be used to encrypt files that will be persisted This is not an end-to-end encryption, but can be used to encrypt files that will be persisted
to external storage such as S3. Please note that this won't work anymore when using OpenSSL 3 to external storage such as S3.
for PHP's openssl extension and **Nextcloud 25 or older** because this is implemented using the
legacy cipher RC4. For Nextcloud26 this isn't relevant anymore, because Nextcloud has an RC4 implementation
written in native PHP and thus doesn't need `ext-openssl` for that anymore.
If [](#opt-system.stateVersion) is *above* `22.05`,
this is disabled by default. To turn it on again and for further information please refer to
[](#opt-services.nextcloud.enableBrokenCiphersForSSE).
## Using an alternative webserver as reverse-proxy (e.g. `httpd`) {#module-services-nextcloud-httpd} ## Using an alternative webserver as reverse-proxy (e.g. `httpd`) {#module-services-nextcloud-httpd}

View file

@ -27,13 +27,7 @@ let
phpPackage = cfg.phpPackage.buildEnv { phpPackage = cfg.phpPackage.buildEnv {
extensions = { enabled, all }: extensions = { enabled, all }:
(with all; (with all; enabled
# disable default openssl extension
(lib.filter (e: e.pname != "php-openssl") enabled)
# use OpenSSL 1.1 for RC4 Nextcloud encryption if user
# has acknowledged the brokenness of the ciphers (RC4).
# TODO: remove when https://github.com/nextcloud/server/issues/32003 is fixed.
++ (if cfg.enableBrokenCiphersForSSE then [ cfg.phpPackage.extensions.openssl-legacy ] else [ cfg.phpPackage.extensions.openssl ])
++ optional cfg.enableImagemagick imagick ++ optional cfg.enableImagemagick imagick
# Optionally enabled depending on caching settings # Optionally enabled depending on caching settings
++ optional cfg.caching.apcu apcu ++ optional cfg.caching.apcu apcu
@ -66,6 +60,9 @@ let
mysqlLocal = cfg.database.createLocally && cfg.config.dbtype == "mysql"; mysqlLocal = cfg.database.createLocally && cfg.config.dbtype == "mysql";
pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql"; pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
# https://github.com/nextcloud/documentation/pull/11179
ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2";
in { in {
imports = [ imports = [
@ -87,6 +84,10 @@ in {
Further details about this can be found in the `Nextcloud`-section of the NixOS-manual Further details about this can be found in the `Nextcloud`-section of the NixOS-manual
(which can be opened e.g. by running `nixos-help`). (which can be opened e.g. by running `nixos-help`).
'') '')
(mkRemovedOptionModule [ "services" "nextcloud" "enableBrokenCiphersForSSE" ] ''
This option has no effect since there's no supported Nextcloud version packaged here
using OpenSSL for RC4 SSE.
'')
(mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] '' (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] ''
Use services.nextcloud.enableImagemagick instead. Use services.nextcloud.enableImagemagick instead.
'') '')
@ -95,39 +96,6 @@ in {
options.services.nextcloud = { options.services.nextcloud = {
enable = mkEnableOption (lib.mdDoc "nextcloud"); enable = mkEnableOption (lib.mdDoc "nextcloud");
enableBrokenCiphersForSSE = mkOption {
type = types.bool;
default = versionOlder stateVersion "22.11";
defaultText = literalExpression "versionOlder system.stateVersion \"22.11\"";
description = lib.mdDoc ''
This option enables using the OpenSSL PHP extension linked against OpenSSL 1.1
rather than latest OpenSSL ( 3), this is not recommended unless you need
it for server-side encryption (SSE). SSE uses the legacy RC4 cipher which is
considered broken for several years now. See also [RFC7465](https://datatracker.ietf.org/doc/html/rfc7465).
This cipher has been disabled in OpenSSL 3 and requires
a specific legacy profile to re-enable it.
If you deploy Nextcloud using OpenSSL  3 for PHP and have
server-side encryption configured, you will not be able to access
your files anymore. Enabling this option can restore access to your files.
Upon testing we didn't encounter any data corruption when turning
this on and off again, but this cannot be guaranteed for
each Nextcloud installation.
It is `true` by default for systems with a [](#opt-system.stateVersion) below
`22.11` to make sure that existing installations won't break on update. On newer
NixOS systems you have to explicitly enable it on your own.
Please note that this only provides additional value when using
external storage such as S3 since it's not an end-to-end encryption.
If this is not the case,
it is advised to [disable server-side encryption](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html#disabling-encryption) and set this to `false`.
In the future, Nextcloud may move to AES-256-GCM, by then,
this option will be removed.
'';
};
hostName = mkOption { hostName = mkOption {
type = types.str; type = types.str;
description = lib.mdDoc "FQDN for the nextcloud instance."; description = lib.mdDoc "FQDN for the nextcloud instance.";
@ -225,7 +193,7 @@ in {
package = mkOption { package = mkOption {
type = types.package; type = types.package;
description = lib.mdDoc "Which package to use for the Nextcloud instance."; description = lib.mdDoc "Which package to use for the Nextcloud instance.";
relatedPackages = [ "nextcloud25" "nextcloud26" "nextcloud27" ]; relatedPackages = [ "nextcloud26" "nextcloud27" ];
}; };
phpPackage = mkOption { phpPackage = mkOption {
type = types.package; type = types.package;
@ -740,28 +708,7 @@ in {
'') '')
++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11")) ++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05")) ++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11")) ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"));
++ (optional cfg.enableBrokenCiphersForSSE ''
You're using PHP's openssl extension built against OpenSSL 1.1 for Nextcloud.
This is only necessary if you're using Nextcloud's server-side encryption.
Please keep in mind that it's using the broken RC4 cipher.
If you don't use that feature, you can switch to OpenSSL 3 and get
rid of this warning by declaring
services.nextcloud.enableBrokenCiphersForSSE = false;
If you need to use server-side encryption you can ignore this warning.
Otherwise you'd have to disable server-side encryption first in order
to be able to safely disable this option and get rid of this warning.
See <https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html#disabling-encryption> on how to achieve this.
For more context, here is the implementing pull request: https://github.com/NixOS/nixpkgs/pull/198470
'')
++ (optional (cfg.enableBrokenCiphersForSSE && versionAtLeast cfg.package.version "26") ''
Nextcloud26 supports RC4 without requiring legacy OpenSSL, so
`services.nextcloud.enableBrokenCiphersForSSE` can be set to `false`.
'');
services.nextcloud.package = with pkgs; services.nextcloud.package = with pkgs;
mkDefault ( mkDefault (
@ -1136,10 +1083,6 @@ in {
} }
''; '';
}; };
"/" = {
priority = 900;
extraConfig = "rewrite ^ /index.php;";
};
"~ ^/store-apps" = { "~ ^/store-apps" = {
priority = 201; priority = 201;
extraConfig = "root ${cfg.home};"; extraConfig = "root ${cfg.home};";
@ -1164,15 +1107,23 @@ in {
try_files $uri $uri/ =404; try_files $uri $uri/ =404;
''; '';
}; };
"~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/)".extraConfig = '' "~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/)" = {
return 404; priority = 450;
''; extraConfig = ''
"~ ^/(?:\\.(?!well-known)|autotest|occ|issue|indie|db_|console)".extraConfig = '' return 404;
return 404; '';
''; };
"~ ^\\/(?:index|remote|public|cron|core\\/ajax\\/update|status|ocs\\/v[12]|updater\\/.+|oc[ms]-provider\\/.+|.+\\/richdocumentscode\\/proxy)\\.php(?:$|\\/)" = { "~ ^/(?:\\.|autotest|occ|issue|indie|db_|console)" = {
priority = 450;
extraConfig = ''
return 404;
'';
};
"~ \\.php(?:$|/)" = {
priority = 500; priority = 500;
extraConfig = '' extraConfig = ''
# legacy support (i.e. static files and directories in cfg.package)
rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[s${optionalString (!ocmProviderIsNotAStaticDirAnymore) "m"}]-provider\/.+|.+\/richdocumentscode\/proxy) /index.php$request_uri;
include ${config.services.nginx.package}/conf/fastcgi.conf; include ${config.services.nginx.package}/conf/fastcgi.conf;
fastcgi_split_path_info ^(.+?\.php)(\\/.*)$; fastcgi_split_path_info ^(.+?\.php)(\\/.*)$;
set $path_info $fastcgi_path_info; set $path_info $fastcgi_path_info;
@ -1188,19 +1139,30 @@ in {
fastcgi_read_timeout ${builtins.toString cfg.fastcgiTimeout}s; fastcgi_read_timeout ${builtins.toString cfg.fastcgiTimeout}s;
''; '';
}; };
"~ \\.(?:css|js|woff2?|svg|gif|map)$".extraConfig = '' "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm)$".extraConfig = ''
try_files $uri /index.php$request_uri; try_files $uri /index.php$request_uri;
expires 6M; expires 6M;
access_log off; access_log off;
location ~ \.wasm$ {
default_type application/wasm;
}
''; '';
"~ ^\\/(?:updater|ocs-provider|ocm-provider)(?:$|\\/)".extraConfig = '' "~ ^\\/(?:updater|ocs-provider${optionalString (!ocmProviderIsNotAStaticDirAnymore) "|ocm-provider"})(?:$|\\/)".extraConfig = ''
try_files $uri/ =404; try_files $uri/ =404;
index index.php; index index.php;
''; '';
"~ \\.(?:png|html|ttf|ico|jpg|jpeg|bcmap|mp4|webm)$".extraConfig = '' "/remote" = {
try_files $uri /index.php$request_uri; priority = 1500;
access_log off; extraConfig = ''
''; return 301 /remote.php$request_uri;
'';
};
"/" = {
priority = 1600;
extraConfig = ''
try_files $uri $uri/ /index.php$request_uri;
'';
};
}; };
extraConfig = '' extraConfig = ''
index index.php index.html /index.php$request_uri; index index.php index.html /index.php$request_uri;

View file

@ -35,7 +35,15 @@ in {
Enable Peering Manager. Enable Peering Manager.
This module requires a reverse proxy that serves `/static` separately. This module requires a reverse proxy that serves `/static` separately.
See this [example](https://github.com/peering-manager-community/peering-manager/blob/develop/contrib/nginx.conf/) on how to configure this. See this [example](https://github.com/peering-manager/contrib/blob/main/nginx.conf on how to configure this.
'';
};
enableScheduledTasks = mkOption {
type = types.bool;
default = true;
description = ''
Set up [scheduled tasks](https://peering-manager.readthedocs.io/en/stable/setup/8-scheduled-tasks/)
''; '';
}; };
@ -194,32 +202,30 @@ in {
}; };
systemd.services = let systemd.services = let
defaultServiceConfig = { defaults = {
WorkingDirectory = "/var/lib/peering-manager";
User = "peering-manager";
Group = "peering-manager";
StateDirectory = "peering-manager";
StateDirectoryMode = "0750";
Restart = "on-failure";
};
in {
peering-manager-migration = {
description = "Peering Manager migrations";
wantedBy = [ "peering-manager.target" ];
environment = { environment = {
PYTHONPATH = pkg.pythonPath; PYTHONPATH = pkg.pythonPath;
}; };
serviceConfig = {
serviceConfig = defaultServiceConfig // { WorkingDirectory = "/var/lib/peering-manager";
User = "peering-manager";
Group = "peering-manager";
StateDirectory = "peering-manager";
StateDirectoryMode = "0750";
Restart = "on-failure";
};
};
in {
peering-manager-migration = lib.recursiveUpdate defaults {
description = "Peering Manager migrations";
wantedBy = [ "peering-manager.target" ];
serviceConfig = {
Type = "oneshot"; Type = "oneshot";
ExecStart = '' ExecStart = "${pkg}/bin/peering-manager migrate";
${pkg}/bin/peering-manager migrate
'';
}; };
}; };
peering-manager = { peering-manager = lib.recursiveUpdate defaults {
description = "Peering Manager WSGI Service"; description = "Peering Manager WSGI Service";
wantedBy = [ "peering-manager.target" ]; wantedBy = [ "peering-manager.target" ];
after = [ "peering-manager-migration.service" ]; after = [ "peering-manager-migration.service" ];
@ -228,11 +234,7 @@ in {
${pkg}/bin/peering-manager remove_stale_contenttypes --no-input ${pkg}/bin/peering-manager remove_stale_contenttypes --no-input
''; '';
environment = { serviceConfig = {
PYTHONPATH = pkg.pythonPath;
};
serviceConfig = defaultServiceConfig // {
ExecStart = '' ExecStart = ''
${pkg.python.pkgs.gunicorn}/bin/gunicorn peering_manager.wsgi \ ${pkg.python.pkgs.gunicorn}/bin/gunicorn peering_manager.wsgi \
--bind ${cfg.listenAddress}:${toString cfg.port} \ --bind ${cfg.listenAddress}:${toString cfg.port} \
@ -241,45 +243,92 @@ in {
}; };
}; };
peering-manager-rq = { peering-manager-rq = lib.recursiveUpdate defaults {
description = "Peering Manager Request Queue Worker"; description = "Peering Manager Request Queue Worker";
wantedBy = [ "peering-manager.target" ]; wantedBy = [ "peering-manager.target" ];
after = [ "peering-manager.service" ]; after = [ "peering-manager.service" ];
serviceConfig.ExecStart = "${pkg}/bin/peering-manager rqworker high default low";
};
environment = { peering-manager-housekeeping = lib.recursiveUpdate defaults {
PYTHONPATH = pkg.pythonPath; description = "Peering Manager housekeeping job";
}; after = [ "peering-manager.service" ];
serviceConfig = {
serviceConfig = defaultServiceConfig // { Type = "oneshot";
ExecStart = '' ExecStart = "${pkg}/bin/peering-manager housekeeping";
${pkg}/bin/peering-manager rqworker high default low
'';
}; };
}; };
peering-manager-housekeeping = { peering-manager-peeringdb-sync = lib.recursiveUpdate defaults {
description = "Peering Manager housekeeping job"; description = "PeeringDB sync";
after = [ "peering-manager.service" ]; after = [ "peering-manager.service" ];
serviceConfig = {
environment = {
PYTHONPATH = pkg.pythonPath;
};
serviceConfig = defaultServiceConfig // {
Type = "oneshot"; Type = "oneshot";
ExecStart = '' ExecStart = "${pkg}/bin/peering-manager peeringdb_sync";
${pkg}/bin/peering-manager housekeeping };
''; };
peering-manager-prefix-fetch = lib.recursiveUpdate defaults {
description = "Fetch IRR AS-SET prefixes";
after = [ "peering-manager.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkg}/bin/peering-manager grab_prefixes";
};
};
peering-manager-configuration-deployment = lib.recursiveUpdate defaults {
description = "Push configuration to routers";
after = [ "peering-manager.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkg}/bin/peering-manager configure_routers";
};
};
peering-manager-session-poll = lib.recursiveUpdate defaults {
description = "Poll peering sessions from routers";
after = [ "peering-manager.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkg}/bin/peering-manager poll_bgp_sessions --all";
}; };
}; };
}; };
systemd.timers.peering-manager-housekeeping = { systemd.timers = {
description = "Run Peering Manager housekeeping job"; peering-manager-housekeeping = {
wantedBy = [ "timers.target" ]; description = "Run Peering Manager housekeeping job";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "daily";
};
timerConfig = { peering-manager-peeringdb-sync = {
OnCalendar = "daily"; enable = lib.mkDefault cfg.enableScheduledTasks;
description = "Sync PeeringDB at 2:30";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "02:30:00";
};
peering-manager-prefix-fetch = {
enable = lib.mkDefault cfg.enableScheduledTasks;
description = "Fetch IRR AS-SET prefixes at 4:30";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "04:30:00";
};
peering-manager-configuration-deployment = {
enable = lib.mkDefault cfg.enableScheduledTasks;
description = "Push router configuration every hour 5 minutes before full hour";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "*:55:00";
};
peering-manager-session-poll = {
enable = lib.mkDefault cfg.enableScheduledTasks;
description = "Poll peering sessions from routers every hour";
wantedBy = [ "timers.target" ];
timerConfig.OnCalendar = "*:00:00";
}; };
}; };

View file

@ -296,6 +296,6 @@ in {
]; ];
}; };
meta.maintainers = with maintainers; [ ma27 ]; meta.maintainers = with maintainers; [ ];
meta.doc = ./plausible.md; meta.doc = ./plausible.md;
} }

View file

@ -120,7 +120,7 @@ let
withConfigFile '' withConfigFile ''
query () { query () {
local result=$(${sqlite}/bin/sqlite3 \ local result=$(${sqlite}/bin/sqlite3 \
'${cfg.stateDir}/${settings.database.filename}' '${cfg.stateDir}/${settings.database.filename}' \
"$1" \ "$1" \
) )

View file

@ -4,7 +4,7 @@ with lib;
let let
cfg = config.services.garage; cfg = config.services.garage;
toml = pkgs.formats.toml {}; toml = pkgs.formats.toml { };
configFile = toml.generate "garage.toml" cfg.settings; configFile = toml.generate "garage.toml" cfg.settings;
in in
{ {
@ -19,8 +19,8 @@ in
extraEnvironment = mkOption { extraEnvironment = mkOption {
type = types.attrsOf types.str; type = types.attrsOf types.str;
description = lib.mdDoc "Extra environment variables to pass to the Garage server."; description = lib.mdDoc "Extra environment variables to pass to the Garage server.";
default = {}; default = { };
example = { RUST_BACKTRACE="yes"; }; example = { RUST_BACKTRACE = "yes"; };
}; };
environmentFile = mkOption { environmentFile = mkOption {
@ -30,7 +30,7 @@ in
}; };
logLevel = mkOption { logLevel = mkOption {
type = types.enum (["info" "debug" "trace"]); type = types.enum ([ "info" "debug" "trace" ]);
default = "info"; default = "info";
example = "debug"; example = "debug";
description = lib.mdDoc "Garage log level, see <https://garagehq.deuxfleurs.fr/documentation/quick-start/#launching-the-garage-server> for examples."; description = lib.mdDoc "Garage log level, see <https://garagehq.deuxfleurs.fr/documentation/quick-start/#launching-the-garage-server> for examples.";
@ -65,12 +65,8 @@ in
}; };
package = mkOption { package = mkOption {
# TODO: when 23.05 is released and if Garage 0.9 is the default, put a stateVersion check.
default = if versionAtLeast config.system.stateVersion "23.05" then pkgs.garage_0_8
else pkgs.garage_0_7;
defaultText = literalExpression "pkgs.garage_0_7";
type = types.package; type = types.package;
description = lib.mdDoc "Garage package to use, if you are upgrading from a major version, please read NixOS and Garage release notes for upgrade instructions."; description = lib.mdDoc "Garage package to use, needs to be set explicitly. If you are upgrading from a major version, please read NixOS and Garage release notes for upgrade instructions.";
}; };
}; };

View file

@ -221,7 +221,7 @@ in
# Default Fonts # Default Fonts
fonts.packages = with pkgs; [ fonts.packages = with pkgs; [
source-code-pro # Default monospace font in 3.32 dejavu_fonts # Default monospace font in LMDE 6+
ubuntu_font_family # required for default theme ubuntu_font_family # required for default theme
]; ];
}) })

View file

@ -90,7 +90,7 @@ in
}; };
}; };
environment.etc."X11/xkb".source = xcfg.xkbDir; environment.etc."X11/xkb".source = xcfg.xkb.dir;
fonts.packages = [ pkgs.dejavu_fonts pkgs.ubuntu_font_family ]; fonts.packages = [ pkgs.dejavu_fonts pkgs.ubuntu_font_family ];

View file

@ -309,7 +309,7 @@ in
"/share" "/share"
]; ];
environment.etc."X11/xkb".source = xcfg.xkbDir; environment.etc."X11/xkb".source = xcfg.xkb.dir;
environment.sessionVariables = { environment.sessionVariables = {
PLASMA_USE_QT_SCALING = mkIf cfg.useQtScaling "1"; PLASMA_USE_QT_SCALING = mkIf cfg.useQtScaling "1";

View file

@ -204,10 +204,10 @@ in
left-handed = xcfg.libinput.mouse.leftHanded; left-handed = xcfg.libinput.mouse.leftHanded;
}; };
keyboard = { keyboard = {
keymap_model = xcfg.xkbModel; keymap_model = xcfg.xkb.model;
keymap_layout = xcfg.layout; keymap_layout = xcfg.xkb.layout;
keymap_variant = xcfg.xkbVariant; keymap_variant = xcfg.xkb.variant;
keymap_options = xcfg.xkbOptions; keymap_options = xcfg.xkb.options;
}; };
}; in "${pkgs.weston}/bin/weston --shell=fullscreen-shell.so -c ${westonIni}"; }; in "${pkgs.weston}/bin/weston --shell=fullscreen-shell.so -c ${westonIni}";
description = lib.mdDoc "Command used to start the selected compositor"; description = lib.mdDoc "Command used to start the selected compositor";

View file

@ -121,11 +121,11 @@ in
environment.sessionVariables = { environment.sessionVariables = {
# runtime override supported by multiple libraries e. g. libxkbcommon # runtime override supported by multiple libraries e. g. libxkbcommon
# https://xkbcommon.org/doc/current/group__include-path.html # https://xkbcommon.org/doc/current/group__include-path.html
XKB_CONFIG_ROOT = config.services.xserver.xkbDir; XKB_CONFIG_ROOT = config.services.xserver.xkb.dir;
}; };
services.xserver = { services.xserver = {
xkbDir = "${xkb_patched}/etc/X11/xkb"; xkb.dir = "${xkb_patched}/etc/X11/xkb";
exportConfiguration = config.services.xserver.displayManager.startx.enable exportConfiguration = config.services.xserver.displayManager.startx.enable
|| config.services.xserver.displayManager.sx.enable; || config.services.xserver.displayManager.sx.enable;
}; };

View file

@ -175,6 +175,31 @@ in
"Use services.xserver.fontPath instead of useXFS") "Use services.xserver.fontPath instead of useXFS")
(mkRemovedOptionModule [ "services" "xserver" "useGlamor" ] (mkRemovedOptionModule [ "services" "xserver" "useGlamor" ]
"Option services.xserver.useGlamor was removed because it is unnecessary. Drivers that uses Glamor will use it automatically.") "Option services.xserver.useGlamor was removed because it is unnecessary. Drivers that uses Glamor will use it automatically.")
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2311;
from = [ "services" "xserver" "layout" ];
to = [ "services" "xserver" "xkb" "layout" ];
})
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2311;
from = [ "services" "xserver" "xkbModel" ];
to = [ "services" "xserver" "xkb" "model" ];
})
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2311;
from = [ "services" "xserver" "xkbOptions" ];
to = [ "services" "xserver" "xkb" "options" ];
})
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2311;
from = [ "services" "xserver" "xkbVariant" ];
to = [ "services" "xserver" "xkb" "variant" ];
})
(lib.mkRenamedOptionModuleWith {
sinceRelease = 2311;
from = [ "services" "xserver" "xkbDir" ];
to = [ "services" "xserver" "xkb" "dir" ];
})
]; ];
@ -339,48 +364,50 @@ in
''; '';
}; };
layout = mkOption { xkb = {
type = types.str; layout = mkOption {
default = "us"; type = types.str;
description = lib.mdDoc '' default = "us";
Keyboard layout, or multiple keyboard layouts separated by commas. description = lib.mdDoc ''
''; X keyboard layout, or multiple keyboard layouts separated by commas.
}; '';
};
xkbModel = mkOption { model = mkOption {
type = types.str; type = types.str;
default = "pc104"; default = "pc104";
example = "presario"; example = "presario";
description = lib.mdDoc '' description = lib.mdDoc ''
Keyboard model. X keyboard model.
''; '';
}; };
xkbOptions = mkOption { options = mkOption {
type = types.commas; type = types.commas;
default = "terminate:ctrl_alt_bksp"; default = "terminate:ctrl_alt_bksp";
example = "grp:caps_toggle,grp_led:scroll"; example = "grp:caps_toggle,grp_led:scroll";
description = lib.mdDoc '' description = lib.mdDoc ''
X keyboard options; layout switching goes here. X keyboard options; layout switching goes here.
''; '';
}; };
xkbVariant = mkOption { variant = mkOption {
type = types.str; type = types.str;
default = ""; default = "";
example = "colemak"; example = "colemak";
description = lib.mdDoc '' description = lib.mdDoc ''
X keyboard variant. X keyboard variant.
''; '';
}; };
xkbDir = mkOption { dir = mkOption {
type = types.path; type = types.path;
default = "${pkgs.xkeyboard_config}/etc/X11/xkb"; default = "${pkgs.xkeyboard_config}/etc/X11/xkb";
defaultText = literalExpression ''"''${pkgs.xkeyboard_config}/etc/X11/xkb"''; defaultText = literalExpression ''"''${pkgs.xkeyboard_config}/etc/X11/xkb"'';
description = lib.mdDoc '' description = lib.mdDoc ''
Path used for -xkbdir xserver parameter. Path used for -xkbdir xserver parameter.
''; '';
};
}; };
config = mkOption { config = mkOption {
@ -667,7 +694,7 @@ in
{ {
"X11/xorg.conf".source = "${configFile}"; "X11/xorg.conf".source = "${configFile}";
# -xkbdir command line option does not seems to be passed to xkbcomp. # -xkbdir command line option does not seems to be passed to xkbcomp.
"X11/xkb".source = "${cfg.xkbDir}"; "X11/xkb".source = "${cfg.xkb.dir}";
}) })
# localectl looks into 00-keyboard.conf # localectl looks into 00-keyboard.conf
//{ //{
@ -675,10 +702,10 @@ in
Section "InputClass" Section "InputClass"
Identifier "Keyboard catchall" Identifier "Keyboard catchall"
MatchIsKeyboard "on" MatchIsKeyboard "on"
Option "XkbModel" "${cfg.xkbModel}" Option "XkbModel" "${cfg.xkb.model}"
Option "XkbLayout" "${cfg.layout}" Option "XkbLayout" "${cfg.xkb.layout}"
Option "XkbOptions" "${cfg.xkbOptions}" Option "XkbOptions" "${cfg.xkb.options}"
Option "XkbVariant" "${cfg.xkbVariant}" Option "XkbVariant" "${cfg.xkb.variant}"
EndSection EndSection
''; '';
} }
@ -759,7 +786,7 @@ in
services.xserver.displayManager.xserverArgs = services.xserver.displayManager.xserverArgs =
[ "-config ${configFile}" [ "-config ${configFile}"
"-xkbdir" "${cfg.xkbDir}" "-xkbdir" "${cfg.xkb.dir}"
] ++ optional (cfg.display != null) ":${toString cfg.display}" ] ++ optional (cfg.display != null) ":${toString cfg.display}"
++ optional (cfg.tty != null) "vt${toString cfg.tty}" ++ optional (cfg.tty != null) "vt${toString cfg.tty}"
++ optional (cfg.dpi != null) "-dpi ${toString cfg.dpi}" ++ optional (cfg.dpi != null) "-dpi ${toString cfg.dpi}"
@ -777,14 +804,14 @@ in
]; ];
system.checks = singleton (pkgs.runCommand "xkb-validated" { system.checks = singleton (pkgs.runCommand "xkb-validated" {
inherit (cfg) xkbModel layout xkbVariant xkbOptions; inherit (cfg.xkb) model layout variant options;
nativeBuildInputs = with pkgs.buildPackages; [ xkbvalidate ]; nativeBuildInputs = with pkgs.buildPackages; [ xkbvalidate ];
preferLocalBuild = true; preferLocalBuild = true;
} '' } ''
${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT) ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
"export XKB_CONFIG_ROOT=${config.environment.sessionVariables.XKB_CONFIG_ROOT}" "export XKB_CONFIG_ROOT=${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
} }
xkbvalidate "$xkbModel" "$layout" "$xkbVariant" "$xkbOptions" xkbvalidate "$model" "$layout" "$variant" "$options"
touch "$out" touch "$out"
''); '');

View file

@ -1,27 +1,25 @@
#! @python3@/bin/python3 -B #! @python3@/bin/python3 -B
import argparse import argparse
import shutil
import os
import sys
import errno
import subprocess
import glob
import tempfile
import errno
import warnings
import ctypes import ctypes
libc = ctypes.CDLL("libc.so.6")
import re
import datetime import datetime
import errno
import glob import glob
import os
import os.path import os.path
from typing import NamedTuple, List, Optional import re
from packaging import version import shutil
import subprocess
import sys
import warnings
from typing import NamedTuple
libc = ctypes.CDLL("libc.so.6")
class SystemIdentifier(NamedTuple): class SystemIdentifier(NamedTuple):
profile: Optional[str] profile: str | None
generation: int generation: int
specialisation: Optional[str] specialisation: str | None
def copy_if_not_exists(source: str, dest: str) -> None: def copy_if_not_exists(source: str, dest: str) -> None:
@ -29,13 +27,13 @@ def copy_if_not_exists(source: str, dest: str) -> None:
shutil.copyfile(source, dest) shutil.copyfile(source, dest)
def generation_dir(profile: Optional[str], generation: int) -> str: def generation_dir(profile: str | None, generation: int) -> str:
if profile: if profile:
return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation) return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation)
else: else:
return "/nix/var/nix/profiles/system-%d-link" % (generation) return "/nix/var/nix/profiles/system-%d-link" % (generation)
def system_dir(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: def system_dir(profile: str | None, generation: int, specialisation: str | None) -> str:
d = generation_dir(profile, generation) d = generation_dir(profile, generation)
if specialisation: if specialisation:
return os.path.join(d, "specialisation", specialisation) return os.path.join(d, "specialisation", specialisation)
@ -49,7 +47,7 @@ initrd {initrd}
options {kernel_params} options {kernel_params}
""" """
def generation_conf_filename(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: def generation_conf_filename(profile: str | None, generation: int, specialisation: str | None) -> str:
pieces = [ pieces = [
"nixos", "nixos",
profile or None, profile or None,
@ -60,22 +58,24 @@ def generation_conf_filename(profile: Optional[str], generation: int, specialisa
return "-".join(p for p in pieces if p) + ".conf" return "-".join(p for p in pieces if p) + ".conf"
def write_loader_conf(profile: Optional[str], generation: int, specialisation: Optional[str]) -> None: def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f: with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
if "@timeout@" != "": if "@timeout@" != "":
f.write("timeout @timeout@\n") f.write("timeout @timeout@\n")
f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation)) f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
if not @editor@: if not @editor@:
f.write("editor 0\n"); f.write("editor 0\n")
f.write("console-mode @consoleMode@\n"); f.write("console-mode @consoleMode@\n")
f.flush()
os.fsync(f.fileno())
os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf") os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
def profile_path(profile: Optional[str], generation: int, specialisation: Optional[str], name: str) -> str: def profile_path(profile: str | None, generation: int, specialisation: str | None, name: str) -> str:
return os.path.realpath("%s/%s" % (system_dir(profile, generation, specialisation), name)) return os.path.realpath("%s/%s" % (system_dir(profile, generation, specialisation), name))
def copy_from_profile(profile: Optional[str], generation: int, specialisation: Optional[str], name: str, dry_run: bool = False) -> str: def copy_from_profile(profile: str | None, generation: int, specialisation: str | None, name: str, dry_run: bool = False) -> str:
store_file_path = profile_path(profile, generation, specialisation, name) store_file_path = profile_path(profile, generation, specialisation, name)
suffix = os.path.basename(store_file_path) suffix = os.path.basename(store_file_path)
store_dir = os.path.basename(os.path.dirname(store_file_path)) store_dir = os.path.basename(os.path.dirname(store_file_path))
@ -85,7 +85,7 @@ def copy_from_profile(profile: Optional[str], generation: int, specialisation: O
return efi_file_path return efi_file_path
def describe_generation(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str: def describe_generation(profile: str | None, generation: int, specialisation: str | None) -> str:
try: try:
with open(profile_path(profile, generation, specialisation, "nixos-version")) as f: with open(profile_path(profile, generation, specialisation, "nixos-version")) as f:
nixos_version = f.read() nixos_version = f.read()
@ -106,7 +106,7 @@ def describe_generation(profile: Optional[str], generation: int, specialisation:
return description return description
def write_entry(profile: Optional[str], generation: int, specialisation: Optional[str], def write_entry(profile: str | None, generation: int, specialisation: str | None,
machine_id: str, current: bool) -> None: machine_id: str, current: bool) -> None:
kernel = copy_from_profile(profile, generation, specialisation, "kernel") kernel = copy_from_profile(profile, generation, specialisation, "kernel")
initrd = copy_from_profile(profile, generation, specialisation, "initrd") initrd = copy_from_profile(profile, generation, specialisation, "initrd")
@ -145,18 +145,12 @@ def write_entry(profile: Optional[str], generation: int, specialisation: Optiona
description=describe_generation(profile, generation, specialisation))) description=describe_generation(profile, generation, specialisation)))
if machine_id is not None: if machine_id is not None:
f.write("machine-id %s\n" % machine_id) f.write("machine-id %s\n" % machine_id)
f.flush()
os.fsync(f.fileno())
os.rename(tmp_path, entry_file) os.rename(tmp_path, entry_file)
def mkdir_p(path: str) -> None: def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def get_generations(profile: Optional[str] = None) -> List[SystemIdentifier]:
gen_list = subprocess.check_output([ gen_list = subprocess.check_output([
"@nix@/bin/nix-env", "@nix@/bin/nix-env",
"--list-generations", "--list-generations",
@ -179,7 +173,7 @@ def get_generations(profile: Optional[str] = None) -> List[SystemIdentifier]:
return configurations[-configurationLimit:] return configurations[-configurationLimit:]
def get_specialisations(profile: Optional[str], generation: int, _: Optional[str]) -> List[SystemIdentifier]: def get_specialisations(profile: str | None, generation: int, _: str | None) -> list[SystemIdentifier]:
specialisations_dir = os.path.join( specialisations_dir = os.path.join(
system_dir(profile, generation, None), "specialisation") system_dir(profile, generation, None), "specialisation")
if not os.path.exists(specialisations_dir): if not os.path.exists(specialisations_dir):
@ -187,9 +181,9 @@ def get_specialisations(profile: Optional[str], generation: int, _: Optional[str
return [SystemIdentifier(profile, generation, spec) for spec in os.listdir(specialisations_dir)] return [SystemIdentifier(profile, generation, spec) for spec in os.listdir(specialisations_dir)]
def remove_old_entries(gens: List[SystemIdentifier]) -> None: def remove_old_entries(gens: list[SystemIdentifier]) -> None:
rex_profile = re.compile("^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$") rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
rex_generation = re.compile("^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$") rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
known_paths = [] known_paths = []
for gen in gens: for gen in gens:
known_paths.append(copy_from_profile(*gen, "kernel", True)) known_paths.append(copy_from_profile(*gen, "kernel", True))
@ -210,7 +204,7 @@ def remove_old_entries(gens: List[SystemIdentifier]) -> None:
os.unlink(path) os.unlink(path)
def get_profiles() -> List[str]: def get_profiles() -> list[str]:
if os.path.isdir("/nix/var/nix/profiles/system-profiles/"): if os.path.isdir("/nix/var/nix/profiles/system-profiles/"):
return [x return [x
for x in os.listdir("/nix/var/nix/profiles/system-profiles/") for x in os.listdir("/nix/var/nix/profiles/system-profiles/")
@ -218,11 +212,7 @@ def get_profiles() -> List[str]:
else: else:
return [] return []
def main() -> None: def install_bootloader(args: argparse.Namespace) -> None:
parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files')
parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot')
args = parser.parse_args()
try: try:
with open("/etc/machine-id") as machine_file: with open("/etc/machine-id") as machine_file:
machine_id = machine_file.readlines()[0] machine_id = machine_file.readlines()[0]
@ -273,21 +263,15 @@ def main() -> None:
if available_match is None: if available_match is None:
raise Exception("could not determine systemd-boot version") raise Exception("could not determine systemd-boot version")
installed_version = version.parse(installed_match.group(1)) installed_version = installed_match.group(1)
available_version = version.parse(available_match.group(1)) available_version = available_match.group(1)
# systemd 252 has a regression that leaves some machines unbootable, so we skip that update.
# The fix is in 252.2
# See https://github.com/systemd/systemd/issues/25363 and https://github.com/NixOS/nixpkgs/pull/201558#issuecomment-1348603263
if installed_version < available_version: if installed_version < available_version:
if version.parse('252') <= available_version < version.parse('252.2'): print("updating systemd-boot from %s to %s" % (installed_version, available_version))
print("skipping systemd-boot update to %s because of known regression" % available_version) subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@", "update"])
else:
print("updating systemd-boot from %s to %s" % (installed_version, available_version))
subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["update"])
mkdir_p("@efiSysMountPoint@/efi/nixos") os.makedirs("@efiSysMountPoint@/efi/nixos", exist_ok=True)
mkdir_p("@efiSysMountPoint@/loader/entries") os.makedirs("@efiSysMountPoint@/loader/entries", exist_ok=True)
gens = get_generations() gens = get_generations()
for profile in get_profiles(): for profile in get_profiles():
@ -324,17 +308,26 @@ def main() -> None:
os.rmdir(actual_root) os.rmdir(actual_root)
os.rmdir(root) os.rmdir(root)
mkdir_p("@efiSysMountPoint@/efi/nixos/.extra-files") os.makedirs("@efiSysMountPoint@/efi/nixos/.extra-files", exist_ok=True)
subprocess.check_call("@copyExtraFiles@") subprocess.check_call("@copyExtraFiles@")
# Since fat32 provides little recovery facilities after a crash,
# it can leave the system in an unbootable state, when a crash/outage def main() -> None:
# happens shortly after an update. To decrease the likelihood of this parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files')
# event sync the efi filesystem after each update. parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot')
rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY)) args = parser.parse_args()
if rc != 0:
print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr) try:
install_bootloader(args)
finally:
# Since fat32 provides little recovery facilities after a crash,
# it can leave the system in an unbootable state, when a crash/outage
# happens shortly after an update. To decrease the likelihood of this
# event sync the efi filesystem after each update.
rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY))
if rc != 0:
print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr)
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -7,14 +7,12 @@ let
efi = config.boot.loader.efi; efi = config.boot.loader.efi;
python3 = pkgs.python3.withPackages (ps: [ ps.packaging ]);
systemdBootBuilder = pkgs.substituteAll { systemdBootBuilder = pkgs.substituteAll {
src = ./systemd-boot-builder.py; src = ./systemd-boot-builder.py;
isExecutable = true; isExecutable = true;
inherit python3; inherit (pkgs) python3;
systemd = config.systemd.package; systemd = config.systemd.package;
@ -52,7 +50,7 @@ let
}; };
checkedSystemdBootBuilder = pkgs.runCommand "systemd-boot" { checkedSystemdBootBuilder = pkgs.runCommand "systemd-boot" {
nativeBuildInputs = [ pkgs.mypy python3 ]; nativeBuildInputs = [ pkgs.mypy ];
} '' } ''
install -m755 ${systemdBootBuilder} $out install -m755 ${systemdBootBuilder} $out
mypy \ mypy \

View file

@ -61,8 +61,6 @@ let
MACAddress = i.macAddress; MACAddress = i.macAddress;
} // optionalAttrs (i.mtu != null) { } // optionalAttrs (i.mtu != null) {
MTUBytes = toString i.mtu; MTUBytes = toString i.mtu;
} // optionalAttrs (i.wakeOnLan.enable == true) {
WakeOnLan = concatStringsSep " " i.wakeOnLan.policy;
}; };
}; };
in listToAttrs (map createNetworkLink interfaces); in listToAttrs (map createNetworkLink interfaces);

View file

@ -28,18 +28,20 @@ let
# TODO: warn the user that any address configured on those interfaces will be useless # TODO: warn the user that any address configured on those interfaces will be useless
++ concatMap (i: attrNames (filterAttrs (_: config: config.type != "internal") i.interfaces)) (attrValues cfg.vswitches); ++ concatMap (i: attrNames (filterAttrs (_: config: config.type != "internal") i.interfaces)) (attrValues cfg.vswitches);
genericNetwork = override: defaultGateways = mkMerge (forEach [ cfg.defaultGateway cfg.defaultGateway6 ] (gateway:
let gateway = optional (cfg.defaultGateway != null && (cfg.defaultGateway.address or "") != "") cfg.defaultGateway.address optionalAttrs (gateway != null && gateway.interface != null) {
++ optional (cfg.defaultGateway6 != null && (cfg.defaultGateway6.address or "") != "") cfg.defaultGateway6.address; networks."40-${gateway.interface}" = {
makeGateway = gateway: { matchConfig.Name = gateway.interface;
routes = [{
routeConfig = { routeConfig = {
Gateway = gateway; Gateway = gateway.address;
GatewayOnLink = false; } // optionalAttrs (gateway.metric != null) {
Metric = gateway.metric;
}; };
}; }];
in optionalAttrs (gateway != [ ]) { };
routes = override (map makeGateway gateway); }
}; ));
genericDhcpNetworks = initrd: mkIf cfg.useDHCP { genericDhcpNetworks = initrd: mkIf cfg.useDHCP {
networks."99-ethernet-default-dhcp" = { networks."99-ethernet-default-dhcp" = {
@ -86,10 +88,10 @@ let
}; };
}; };
}); });
networks."40-${i.name}" = mkMerge [ (genericNetwork id) { networks."40-${i.name}" = {
name = mkDefault i.name; name = mkDefault i.name;
DHCP = mkForce (dhcpStr DHCP = mkForce (dhcpStr
(if i.useDHCP != null then i.useDHCP else false)); (if i.useDHCP != null then i.useDHCP else (config.networking.useDHCP && i.ipv4.addresses == [ ])));
address = forEach (interfaceIps i) address = forEach (interfaceIps i)
(ip: "${ip.address}/${toString ip.prefixLength}"); (ip: "${ip.address}/${toString ip.prefixLength}");
routes = forEach (interfaceRoutes i) routes = forEach (interfaceRoutes i)
@ -158,7 +160,7 @@ let
} // optionalAttrs (i.mtu != null) { } // optionalAttrs (i.mtu != null) {
MTUBytes = toString i.mtu; MTUBytes = toString i.mtu;
}; };
}]; };
})); }));
bridgeNetworks = mkMerge (flip mapAttrsToList cfg.bridges (name: bridge: { bridgeNetworks = mkMerge (flip mapAttrsToList cfg.bridges (name: bridge: {
@ -169,10 +171,10 @@ let
}; };
}; };
networks = listToAttrs (forEach bridge.interfaces (bi: networks = listToAttrs (forEach bridge.interfaces (bi:
nameValuePair "40-${bi}" (mkMerge [ (genericNetwork (mkOverride 999)) { nameValuePair "40-${bi}" {
DHCP = mkOverride 0 (dhcpStr false); DHCP = mkOverride 0 (dhcpStr false);
networkConfig.Bridge = name; networkConfig.Bridge = name;
} ]))); }));
})); }));
vlanNetworks = mkMerge (flip mapAttrsToList cfg.vlans (name: vlan: { vlanNetworks = mkMerge (flip mapAttrsToList cfg.vlans (name: vlan: {
@ -183,9 +185,9 @@ let
}; };
vlanConfig.Id = vlan.id; vlanConfig.Id = vlan.id;
}; };
networks."40-${vlan.interface}" = (mkMerge [ (genericNetwork (mkOverride 999)) { networks."40-${vlan.interface}" = {
vlan = [ name ]; vlan = [ name ];
} ]); };
})); }));
in in
@ -198,6 +200,7 @@ in
# initrd.systemd.network.enable. By setting the latter and not the # initrd.systemd.network.enable. By setting the latter and not the
# former, the user retains full control over the configuration. # former, the user retains full control over the configuration.
boot.initrd.systemd.network = mkMerge [ boot.initrd.systemd.network = mkMerge [
defaultGateways
(genericDhcpNetworks true) (genericDhcpNetworks true)
interfaceNetworks interfaceNetworks
bridgeNetworks bridgeNetworks
@ -214,11 +217,11 @@ in
assertion = cfg.defaultGatewayWindowSize == null; assertion = cfg.defaultGatewayWindowSize == null;
message = "networking.defaultGatewayWindowSize is not supported by networkd."; message = "networking.defaultGatewayWindowSize is not supported by networkd.";
} { } {
assertion = cfg.defaultGateway == null || cfg.defaultGateway.interface == null; assertion = cfg.defaultGateway != null -> cfg.defaultGateway.interface != null;
message = "networking.defaultGateway.interface is not supported by networkd."; message = "networking.defaultGateway.interface is not optional when using networkd.";
} { } {
assertion = cfg.defaultGateway6 == null || cfg.defaultGateway6.interface == null; assertion = cfg.defaultGateway6 != null -> cfg.defaultGateway6.interface != null;
message = "networking.defaultGateway6.interface is not supported by networkd."; message = "networking.defaultGateway6.interface is not optional when using networkd.";
} ] ++ flip mapAttrsToList cfg.bridges (n: { rstp, ... }: { } ] ++ flip mapAttrsToList cfg.bridges (n: { rstp, ... }: {
assertion = !rstp; assertion = !rstp;
message = "networking.bridges.${n}.rstp is not supported by networkd."; message = "networking.bridges.${n}.rstp is not supported by networkd.";
@ -233,6 +236,7 @@ in
mkMerge [ { mkMerge [ {
enable = true; enable = true;
} }
defaultGateways
(genericDhcpNetworks false) (genericDhcpNetworks false)
interfaceNetworks interfaceNetworks
bridgeNetworks bridgeNetworks
@ -302,10 +306,10 @@ in
}; };
networks = listToAttrs (forEach bond.interfaces (bi: networks = listToAttrs (forEach bond.interfaces (bi:
nameValuePair "40-${bi}" (mkMerge [ (genericNetwork (mkOverride 999)) { nameValuePair "40-${bi}" {
DHCP = mkOverride 0 (dhcpStr false); DHCP = mkOverride 0 (dhcpStr false);
networkConfig.Bond = name; networkConfig.Bond = name;
} ]))); }));
}))) })))
(mkMerge (flip mapAttrsToList cfg.macvlans (name: macvlan: { (mkMerge (flip mapAttrsToList cfg.macvlans (name: macvlan: {
netdevs."40-${name}" = { netdevs."40-${name}" = {
@ -315,9 +319,9 @@ in
}; };
macvlanConfig = optionalAttrs (macvlan.mode != null) { Mode = macvlan.mode; }; macvlanConfig = optionalAttrs (macvlan.mode != null) { Mode = macvlan.mode; };
}; };
networks."40-${macvlan.interface}" = (mkMerge [ (genericNetwork (mkOverride 999)) { networks."40-${macvlan.interface}" = {
macvlan = [ name ]; macvlan = [ name ];
} ]); };
}))) })))
(mkMerge (flip mapAttrsToList cfg.fooOverUDP (name: fou: { (mkMerge (flip mapAttrsToList cfg.fooOverUDP (name: fou: {
netdevs."40-${name}" = { netdevs."40-${name}" = {
@ -362,9 +366,9 @@ in
}))); })));
}; };
networks = mkIf (sit.dev != null) { networks = mkIf (sit.dev != null) {
"40-${sit.dev}" = (mkMerge [ (genericNetwork (mkOverride 999)) { "40-${sit.dev}" = {
tunnel = [ name ]; tunnel = [ name ];
} ]); };
}; };
}))) })))
(mkMerge (flip mapAttrsToList cfg.greTunnels (name: gre: { (mkMerge (flip mapAttrsToList cfg.greTunnels (name: gre: {
@ -383,9 +387,9 @@ in
}); });
}; };
networks = mkIf (gre.dev != null) { networks = mkIf (gre.dev != null) {
"40-${gre.dev}" = (mkMerge [ (genericNetwork (mkOverride 999)) { "40-${gre.dev}" = {
tunnel = [ name ]; tunnel = [ name ];
} ]); };
}; };
}))) })))
vlanNetworks vlanNetworks

View file

@ -190,9 +190,11 @@ let
type = types.nullOr types.bool; type = types.nullOr types.bool;
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether this interface should be configured with dhcp. Whether this interface should be configured with DHCP. Overrides the
Null implies the old behavior which depends on whether ip addresses default set by {option}`networking.useDHCP`. If `null` (the default),
are specified or not. DHCP is enabled if the interface has no IPv4 addresses configured
with {option}`networking.interfaces.<name>.ipv4.addresses`, and
disabled otherwise.
''; '';
}; };
@ -640,9 +642,7 @@ in
} ]; } ];
}; };
description = lib.mdDoc '' description = lib.mdDoc ''
The configuration for each network interface. If The configuration for each network interface.
{option}`networking.useDHCP` is true, then every
interface not listed here will be configured using DHCP.
Please note that {option}`systemd.network.netdevs` has more features Please note that {option}`systemd.network.netdevs` has more features
and is better maintained. When building new things, it is advised to and is better maintained. When building new things, it is advised to
@ -1304,8 +1304,8 @@ in
default = true; default = true;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether to use DHCP to obtain an IP address and other Whether to use DHCP to obtain an IP address and other
configuration for all network interfaces that are not manually configuration for all network interfaces that do not have any manually
configured. configured IPv4 addresses.
''; '';
}; };
@ -1344,7 +1344,10 @@ in
config = { config = {
warnings = concatMap (i: i.warnings) interfaces; warnings = (concatMap (i: i.warnings) interfaces) ++ (lib.optional
(config.systemd.network.enable && cfg.useDHCP && !cfg.useNetworkd) ''
The combination of `systemd.network.enable = true`, `networking.useDHCP = true` and `networking.useNetworkd = false` can cause both networkd and dhcpcd to manage the same interfaces. This can lead to loss of networking. It is recommended you choose only one of networkd (by also enabling `networking.useNetworkd`) or scripting (by disabling `systemd.network.enable`)
'');
assertions = assertions =
(forEach interfaces (i: { (forEach interfaces (i: {
@ -1460,6 +1463,16 @@ in
] ]
++ bridgeStp; ++ bridgeStp;
# Wake-on-LAN configuration is shared by the scripted and networkd backends.
systemd.network.links = pipe interfaces [
(filter (i: i.wakeOnLan.enable))
(map (i: nameValuePair "40-${i.name}" {
matchConfig.OriginalName = i.name;
linkConfig.WakeOnLan = concatStringsSep " " i.wakeOnLan.policy;
}))
listToAttrs
];
# The network-interfaces target is kept for backwards compatibility. # The network-interfaces target is kept for backwards compatibility.
# New modules must NOT use it. # New modules must NOT use it.
systemd.targets.network-interfaces = systemd.targets.network-interfaces =

View file

@ -128,7 +128,7 @@ in
boot.consoleLogLevel = 7; boot.consoleLogLevel = 7;
# Prevent tests from accessing the Internet. # Prevent tests from accessing the Internet.
networking.defaultGateway = mkOverride 150 ""; networking.defaultGateway = mkOverride 150 null;
networking.nameservers = mkOverride 150 [ ]; networking.nameservers = mkOverride 150 [ ];
system.requiredKernelConfig = with config.lib.kernelConfig; [ system.requiredKernelConfig = with config.lib.kernelConfig; [

View file

@ -9,15 +9,16 @@ in {
options = { options = {
virtualisation.lxc = { virtualisation.lxc = {
privilegedContainer = lib.mkOption { nestedContainer = lib.mkEnableOption (lib.mdDoc ''
type = lib.types.bool; Whether this container is configured as a nested container. On LXD containers this is recommended
default = false; for all containers and is enabled with `security.nesting = true`.
description = lib.mdDoc '' '');
Whether this LXC container will be running as a privileged container or not. If set to `true` then
additional configuration will be applied to the `systemd` instance running within the container as privilegedContainer = lib.mkEnableOption (lib.mdDoc ''
recommended by [distrobuilder](https://linuxcontainers.org/distrobuilder/introduction/). Whether this LXC container will be running as a privileged container or not. If set to `true` then
''; additional configuration will be applied to the `systemd` instance running within the container as
}; recommended by [distrobuilder](https://linuxcontainers.org/distrobuilder/introduction/).
'');
}; };
}; };
@ -68,6 +69,8 @@ in {
ln -fs "$1/init" /sbin/init ln -fs "$1/init" /sbin/init
''; '';
systemd.additionalUpstreamSystemUnits = lib.mkIf cfg.nestedContainer ["systemd-udev-trigger.service"];
# Add the overrides from lxd distrobuilder # Add the overrides from lxd distrobuilder
# https://github.com/lxc/distrobuilder/blob/05978d0d5a72718154f1525c7d043e090ba7c3e0/distrobuilder/main.go#L630 # https://github.com/lxc/distrobuilder/blob/05978d0d5a72718154f1525c7d043e090ba7c3e0/distrobuilder/main.go#L630
systemd.packages = [ systemd.packages = [

View file

@ -145,9 +145,7 @@ in {
}; };
ui = { ui = {
enable = lib.mkEnableOption (lib.mdDoc '' enable = lib.mkEnableOption (lib.mdDoc "(experimental) LXD UI");
Enables the (experimental) LXD UI.
'');
package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { }; package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { };
}; };

View file

@ -649,6 +649,15 @@ in
''; '';
}; };
restartIfChanged = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether the container should be restarted during a NixOS
configuration switch if its definition has changed.
'';
};
timeoutStartSec = mkOption { timeoutStartSec = mkOption {
type = types.str; type = types.str;
default = "1min"; default = "1min";
@ -826,7 +835,7 @@ in
containerConfig.path containerConfig.path
config.environment.etc."${configurationDirectoryName}/${name}.conf".source config.environment.etc."${configurationDirectoryName}/${name}.conf".source
]; ];
restartIfChanged = true; restartIfChanged = containerConfig.restartIfChanged;
} }
) )
)) config.containers) )) config.containers)

View file

@ -67,8 +67,16 @@ in rec {
(onSystems ["x86_64-linux"] "nixos.tests.docker") (onSystems ["x86_64-linux"] "nixos.tests.docker")
(onFullSupported "nixos.tests.ecryptfs") (onFullSupported "nixos.tests.ecryptfs")
(onFullSupported "nixos.tests.env") (onFullSupported "nixos.tests.env")
(onFullSupported "nixos.tests.firefox-esr")
(onFullSupported "nixos.tests.firefox") # Way too many manual retries required on Hydra.
# Apparently it's hard to track down the cause.
# So let's depend just on the packages for now.
#(onFullSupported "nixos.tests.firefox-esr")
#(onFullSupported "nixos.tests.firefox")
# Note: only -unwrapped variants have a Hydra job.
(onFullSupported "nixpkgs.firefox-esr-unwrapped")
(onFullSupported "nixpkgs.firefox-unwrapped")
(onFullSupported "nixos.tests.firewall") (onFullSupported "nixos.tests.firewall")
(onFullSupported "nixos.tests.fontconfig-default-fonts") (onFullSupported "nixos.tests.fontconfig-default-fonts")
(onFullSupported "nixos.tests.gnome") (onFullSupported "nixos.tests.gnome")

View file

@ -216,6 +216,7 @@ in {
darling = handleTest ./darling.nix {}; darling = handleTest ./darling.nix {};
dae = handleTest ./dae.nix {}; dae = handleTest ./dae.nix {};
dconf = handleTest ./dconf.nix {}; dconf = handleTest ./dconf.nix {};
deconz = handleTest ./deconz.nix {};
deepin = handleTest ./deepin.nix {}; deepin = handleTest ./deepin.nix {};
deluge = handleTest ./deluge.nix {}; deluge = handleTest ./deluge.nix {};
dendrite = handleTest ./matrix/dendrite.nix {}; dendrite = handleTest ./matrix/dendrite.nix {};
@ -274,6 +275,7 @@ in {
fcitx5 = handleTest ./fcitx5 {}; fcitx5 = handleTest ./fcitx5 {};
fenics = handleTest ./fenics.nix {}; fenics = handleTest ./fenics.nix {};
ferm = handleTest ./ferm.nix {}; ferm = handleTest ./ferm.nix {};
ferretdb = handleTest ./ferretdb.nix {};
firefox = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox; }; firefox = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox; };
firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; }; firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; };
firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; }; firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
@ -424,7 +426,7 @@ in {
ksm = handleTest ./ksm.nix {}; ksm = handleTest ./ksm.nix {};
kthxbye = handleTest ./kthxbye.nix {}; kthxbye = handleTest ./kthxbye.nix {};
kubernetes = handleTestOn ["x86_64-linux"] ./kubernetes {}; kubernetes = handleTestOn ["x86_64-linux"] ./kubernetes {};
kubo = runTest ./kubo.nix; kubo = import ./kubo { inherit recurseIntoAttrs runTest; };
ladybird = handleTest ./ladybird.nix {}; ladybird = handleTest ./ladybird.nix {};
languagetool = handleTest ./languagetool.nix {}; languagetool = handleTest ./languagetool.nix {};
latestKernel.login = handleTest ./login.nix { latestKernel = true; }; latestKernel.login = handleTest ./login.nix { latestKernel = true; };
@ -432,6 +434,7 @@ in {
lemmy = handleTest ./lemmy.nix {}; lemmy = handleTest ./lemmy.nix {};
libinput = handleTest ./libinput.nix {}; libinput = handleTest ./libinput.nix {};
libreddit = handleTest ./libreddit.nix {}; libreddit = handleTest ./libreddit.nix {};
librenms = handleTest ./librenms.nix {};
libresprite = handleTest ./libresprite.nix {}; libresprite = handleTest ./libresprite.nix {};
libreswan = handleTest ./libreswan.nix {}; libreswan = handleTest ./libreswan.nix {};
librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; }; librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
@ -736,8 +739,8 @@ in {
spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {}; spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {}; sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {};
sslh = handleTest ./sslh.nix {}; sslh = handleTest ./sslh.nix {};
sssd = handleTestOn ["x86_64-linux"] ./sssd.nix {}; sssd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd.nix {};
sssd-ldap = handleTestOn ["x86_64-linux"] ./sssd-ldap.nix {}; sssd-ldap = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd-ldap.nix {};
stalwart-mail = handleTest ./stalwart-mail.nix {}; stalwart-mail = handleTest ./stalwart-mail.nix {};
stargazer = runTest ./web-servers/stargazer.nix; stargazer = runTest ./web-servers/stargazer.nix;
starship = handleTest ./starship.nix {}; starship = handleTest ./starship.nix {};
@ -757,6 +760,7 @@ in {
syncthing = handleTest ./syncthing.nix {}; syncthing = handleTest ./syncthing.nix {};
syncthing-no-settings = handleTest ./syncthing-no-settings.nix {}; syncthing-no-settings = handleTest ./syncthing-no-settings.nix {};
syncthing-init = handleTest ./syncthing-init.nix {}; syncthing-init = handleTest ./syncthing-init.nix {};
syncthing-many-devices = handleTest ./syncthing-many-devices.nix {};
syncthing-relay = handleTest ./syncthing-relay.nix {}; syncthing-relay = handleTest ./syncthing-relay.nix {};
systemd = handleTest ./systemd.nix {}; systemd = handleTest ./systemd.nix {};
systemd-analyze = handleTest ./systemd-analyze.nix {}; systemd-analyze = handleTest ./systemd-analyze.nix {};
@ -805,6 +809,7 @@ in {
systemd-userdbd = handleTest ./systemd-userdbd.nix {}; systemd-userdbd = handleTest ./systemd-userdbd.nix {};
systemd-homed = handleTest ./systemd-homed.nix {}; systemd-homed = handleTest ./systemd-homed.nix {};
tandoor-recipes = handleTest ./tandoor-recipes.nix {}; tandoor-recipes = handleTest ./tandoor-recipes.nix {};
tang = handleTest ./tang.nix {};
taskserver = handleTest ./taskserver.nix {}; taskserver = handleTest ./taskserver.nix {};
tayga = handleTest ./tayga.nix {}; tayga = handleTest ./tayga.nix {};
teeworlds = handleTest ./teeworlds.nix {}; teeworlds = handleTest ./teeworlds.nix {};
@ -819,6 +824,7 @@ in {
timezone = handleTest ./timezone.nix {}; timezone = handleTest ./timezone.nix {};
tinc = handleTest ./tinc {}; tinc = handleTest ./tinc {};
tinydns = handleTest ./tinydns.nix {}; tinydns = handleTest ./tinydns.nix {};
tinyproxy = handleTest ./tinyproxy.nix {};
tinywl = handleTest ./tinywl.nix {}; tinywl = handleTest ./tinywl.nix {};
tmate-ssh-server = handleTest ./tmate-ssh-server.nix { }; tmate-ssh-server = handleTest ./tmate-ssh-server.nix { };
tomcat = handleTest ./tomcat.nix {}; tomcat = handleTest ./tomcat.nix {};
@ -855,8 +861,7 @@ in {
uwsgi = handleTest ./uwsgi.nix {}; uwsgi = handleTest ./uwsgi.nix {};
v2ray = handleTest ./v2ray.nix {}; v2ray = handleTest ./v2ray.nix {};
varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; }; varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
varnish72 = handleTest ./varnish.nix { package = pkgs.varnish72; }; varnish74 = handleTest ./varnish.nix { package = pkgs.varnish74; };
varnish73 = handleTest ./varnish.nix { package = pkgs.varnish73; };
vault = handleTest ./vault.nix {}; vault = handleTest ./vault.nix {};
vault-agent = handleTest ./vault-agent.nix {}; vault-agent = handleTest ./vault-agent.nix {};
vault-dev = handleTest ./vault-dev.nix {}; vault-dev = handleTest ./vault-dev.nix {};

View file

@ -1,11 +1,6 @@
# Test ensures buildbot master comes up correctly and workers can connect # Test ensures buildbot master comes up correctly and workers can connect
{ system ? builtins.currentSystem, import ./make-test-python.nix ({ pkgs, ... }: {
config ? {},
pkgs ? import ../.. { inherit system config; }
}:
import ./make-test-python.nix {
name = "buildbot"; name = "buildbot";
nodes = { nodes = {
@ -110,4 +105,4 @@ import ./make-test-python.nix {
''; '';
meta.maintainers = with pkgs.lib.maintainers; [ ]; meta.maintainers = with pkgs.lib.maintainers; [ ];
} {} })

View file

@ -0,0 +1,28 @@
import ./make-test-python.nix ({ pkgs, lib, ... }:
let
httpPort = 800;
in
{
name = "deconz";
meta.maintainers = with lib.maintainers; [
bjornfor
];
nodes.machine = { config, pkgs, lib, ... }: {
nixpkgs.config.allowUnfree = true;
services.deconz = {
enable = true;
inherit httpPort;
extraArgs = [
"--dbg-err=2"
"--dbg-info=2"
];
};
};
testScript = ''
machine.wait_for_unit("deconz.service")
machine.succeed("curl -sfL http://localhost:${toString httpPort}")
'';
})

View file

@ -3,7 +3,7 @@
import ./make-test-python.nix ({ pkgs, ...} : { import ./make-test-python.nix ({ pkgs, ...} : {
name = "docker-registry"; name = "docker-registry";
meta = with pkgs.lib.maintainers; { meta = with pkgs.lib.maintainers; {
maintainers = [ globin ma27 ironpinguin ]; maintainers = [ globin ironpinguin ];
}; };
nodes = { nodes = {

View file

@ -1,7 +1,7 @@
import ./make-test-python.nix ({ pkgs, lib, ...} : { import ./make-test-python.nix ({ pkgs, lib, ...} : {
name = "documize"; name = "documize";
meta = with pkgs.lib.maintainers; { meta = with pkgs.lib.maintainers; {
maintainers = [ ma27 ]; maintainers = [ ];
}; };
nodes.machine = { pkgs, ... }: { nodes.machine = { pkgs, ... }: {

Some files were not shown because too many files have changed in this diff Show more