Project import generated by Copybara.

GitOrigin-RevId: 439b1605227b8adb1357b55ce8529d541abbe9eb
This commit is contained in:
Default email 2021-08-06 00:33:18 +03:00
parent 1238334f21
commit a3bb8d7922
1780 changed files with 68955 additions and 26788 deletions

View file

@ -46,7 +46,6 @@
/nixos/default.nix @nbp @infinisil /nixos/default.nix @nbp @infinisil
/nixos/lib/from-env.nix @nbp @infinisil /nixos/lib/from-env.nix @nbp @infinisil
/nixos/lib/eval-config.nix @nbp @infinisil /nixos/lib/eval-config.nix @nbp @infinisil
/nixos/doc @ryantm
/nixos/doc/manual/configuration/abstractions.xml @nbp /nixos/doc/manual/configuration/abstractions.xml @nbp
/nixos/doc/manual/configuration/config-file.xml @nbp /nixos/doc/manual/configuration/config-file.xml @nbp
/nixos/doc/manual/configuration/config-syntax.xml @nbp /nixos/doc/manual/configuration/config-syntax.xml @nbp

View file

@ -70,6 +70,7 @@
"6.topic: nixos": "6.topic: nixos":
- nixos/**/* - nixos/**/*
- pkgs/os-specific/linux/nixos-rebuild/**/*
"6.topic: ocaml": "6.topic: ocaml":
- doc/languages-frameworks/ocaml.section.md - doc/languages-frameworks/ocaml.section.md

View file

@ -15,13 +15,13 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs - name: Create backport PRs
# should be kept in sync with `version` # should be kept in sync with `version`
uses: zeebe-io/backport-action@2b994724142df0774855690db56bc6308fb99ffa uses: zeebe-io/backport-action@v0.0.5
with: with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action # Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }} github_workspace: ${{ github.workspace }}
# should be kept in sync with `uses` # should be kept in sync with `uses`
version: 2b994724142df0774855690db56bc6308fb99ffa version: v0.0.5
pull_description: |- pull_description: |-
Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}. Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.

View file

@ -22,7 +22,7 @@ jobs:
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true
- uses: cachix/cachix-action@v9 - uses: cachix/cachix-action@v10
with: with:
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere. # This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
name: nixpkgs-ci name: nixpkgs-ci

View file

@ -22,7 +22,7 @@ jobs:
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true
- uses: cachix/cachix-action@v9 - uses: cachix/cachix-action@v10
with: with:
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere. # This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
name: nixpkgs-ci name: nixpkgs-ci

View file

@ -97,7 +97,8 @@ Foundation](https://nixos.org/nixos/foundation.html). To ensure the
continuity and expansion of the NixOS infrastructure, we are looking continuity and expansion of the NixOS infrastructure, we are looking
for donations to our organization. for donations to our organization.
You can donate to the NixOS foundation by using Open Collective: You can donate to the NixOS foundation through [SEPA bank
transfers](https://nixos.org/donate.html) or by using Open Collective:
<a href="https://opencollective.com/nixos#support"><img src="https://opencollective.com/nixos/tiers/supporter.svg?width=890" /></a> <a href="https://opencollective.com/nixos#support"><img src="https://opencollective.com/nixos/tiers/supporter.svg?width=890" /></a>

View file

@ -110,7 +110,7 @@ overrides = self: super: rec {
haskell-mode = self.melpaPackages.haskell-mode; haskell-mode = self.melpaPackages.haskell-mode;
... ...
}; };
((emacsPackagesFor emacs).overrideScope' overrides).emacs.pkgs.withPackages ((emacsPackagesFor emacs).overrideScope' overrides).withPackages
(p: with p; [ (p: with p; [
# here both these package will use haskell-mode of our own choice # here both these package will use haskell-mode of our own choice
ghc-mod ghc-mod

View file

@ -520,7 +520,7 @@ If you do need to do create this sort of patch file, one way to do so is with gi
4. Use git to create a diff, and pipe the output to a patch file: 4. Use git to create a diff, and pipe the output to a patch file:
```ShellSession ```ShellSession
$ git diff > nixpkgs/pkgs/the/package/0001-changes.patch $ git diff -a > nixpkgs/pkgs/the/package/0001-changes.patch
``` ```
If a patch is available online but does not cleanly apply, it can be modified in some fixed ways by using additional optional arguments for `fetchpatch`: If a patch is available online but does not cleanly apply, it can be modified in some fixed ways by using additional optional arguments for `fetchpatch`:
@ -537,7 +537,13 @@ Note that because the checksum is computed after applying these effects, using o
Tests are important to ensure quality and make reviews and automatic updates easy. Tests are important to ensure quality and make reviews and automatic updates easy.
Nix package tests are a lightweight alternative to [NixOS module tests](https://nixos.org/manual/nixos/stable/#sec-nixos-tests). They can be used to create simple integration tests for packages while the module tests are used to test services or programs with a graphical user interface on a NixOS VM. Unittests that are included in the source code of a package should be executed in the `checkPhase`. The following types of tests exists:
* [NixOS **module tests**](https://nixos.org/manual/nixos/stable/#sec-nixos-tests), which spawn one or more NixOS VMs. They exercise both NixOS modules and the packaged programs used within them. For example, a NixOS module test can start a web server VM running the `nginx` module, and a client VM running `curl` or a graphical `firefox`, and test that they can talk to each other and display the correct content.
* Nix **package tests** are a lightweight alternative to NixOS module tests. They should be used to create simple integration tests for packages, but cannot test NixOS services, and some programs with graphical user interfaces may also be difficult to test with them.
* The **`checkPhase` of a package**, which should execute the unit tests that are included in the source code of a package.
Here in the nixpkgs manual we describe mostly _package tests_; for _module tests_ head over to the corresponding [section in the NixOS manual](https://nixos.org/manual/nixos/stable/#sec-nixos-tests).
### Writing package tests {#ssec-package-tests-writing} ### Writing package tests {#ssec-package-tests-writing}
@ -568,7 +574,7 @@ let
inherit (phoronix-test-suite) pname version; inherit (phoronix-test-suite) pname version;
in in
runCommand "${pname}-tests" { meta.timeout = 3; } runCommand "${pname}-tests" { meta.timeout = 60; }
'' ''
# automatic initial setup to prevent interactive questions # automatic initial setup to prevent interactive questions
${phoronix-test-suite}/bin/phoronix-test-suite enterprise-setup >/dev/null ${phoronix-test-suite}/bin/phoronix-test-suite enterprise-setup >/dev/null
@ -602,3 +608,23 @@ Here are examples of package tests:
- [Spacy annotation test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/spacy/annotation-test/default.nix) - [Spacy annotation test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/spacy/annotation-test/default.nix)
- [Libtorch test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/science/math/libtorch/test/default.nix) - [Libtorch test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/science/math/libtorch/test/default.nix)
- [Multiple tests for nanopb](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/nanopb/default.nix) - [Multiple tests for nanopb](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/nanopb/default.nix)
### Linking NixOS module tests to a package {#ssec-nixos-tests-linking}
Like [package tests](#ssec-package-tests-writing) as shown above, [NixOS module tests](https://nixos.org/manual/nixos/stable/#sec-nixos-tests) can also be linked to a package, so that the tests can be easily run when changing the related package.
For example, assuming we're packaging `nginx`, we can link its module test via `passthru.tests`:
```nix
{ stdenv, lib, nixosTests }:
stdenv.mkDerivation {
...
passthru.tests = {
nginx = nixosTests.nginx;
};
...
}
```

View file

@ -39,7 +39,7 @@ To add a package from NPM to nixpkgs:
1. Modify `pkgs/development/node-packages/node-packages.json` to add, update 1. Modify `pkgs/development/node-packages/node-packages.json` to add, update
or remove package entries to have it included in `nodePackages` and or remove package entries to have it included in `nodePackages` and
`nodePackages_latest`. `nodePackages_latest`.
2. Run the script: `(cd pkgs/development/node-packages && ./generate.sh)`. 2. Run the script: `cd pkgs/development/node-packages && ./generate.sh`.
3. Build your new package to test your changes: 3. Build your new package to test your changes:
`cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`. `cd /path/to/nixpkgs && nix-build -A nodePackages.<new-or-updated-package>`.
To build against the latest stable Current Node.js version (e.g. 14.x): To build against the latest stable Current Node.js version (e.g. 14.x):

View file

@ -129,7 +129,15 @@ rustPlatform.buildRustPackage rec {
``` ```
This will retrieve the dependencies using fixed-output derivations from This will retrieve the dependencies using fixed-output derivations from
the specified lockfile. the specified lockfile. Note that setting `cargoLock.lockFile` doesn't
add a `Cargo.lock` to your `src`, and a `Cargo.lock` is still required
to build a rust package. A simple fix is to use:
```nix
postPatch = ''
cp ${./Cargo.lock} Cargo.lock
'';
```
The output hash of each dependency that uses a git source must be The output hash of each dependency that uses a git source must be
specified in the `outputHashes` attribute. For example: specified in the `outputHashes` attribute. For example:
@ -144,7 +152,7 @@ rustPlatform.buildRustPackage rec {
outputHashes = { outputHashes = {
"finalfusion-0.14.0" = "17f4bsdzpcshwh74w5z119xjy2if6l2wgyjy56v621skr2r8y904"; "finalfusion-0.14.0" = "17f4bsdzpcshwh74w5z119xjy2if6l2wgyjy56v621skr2r8y904";
}; };
} };
# ... # ...
} }

View file

@ -5,7 +5,7 @@ let
inherit (builtins) head tail length; inherit (builtins) head tail length;
inherit (lib.trivial) and; inherit (lib.trivial) and;
inherit (lib.strings) concatStringsSep sanitizeDerivationName; inherit (lib.strings) concatStringsSep sanitizeDerivationName;
inherit (lib.lists) fold concatMap concatLists; inherit (lib.lists) fold foldr concatMap concatLists;
in in
rec { rec {
@ -152,8 +152,8 @@ rec {
=> { a = [ 2 3 ]; } => { a = [ 2 3 ]; }
*/ */
foldAttrs = op: nul: list_of_attrs: foldAttrs = op: nul: list_of_attrs:
fold (n: a: foldr (n: a:
fold (name: o: foldr (name: o:
o // { ${name} = op n.${name} (a.${name} or nul); } o // { ${name} = op n.${name} (a.${name} or nul); }
) a (attrNames n) ) a (attrNames n)
) {} list_of_attrs; ) {} list_of_attrs;
@ -455,7 +455,7 @@ rec {
=> true => true
*/ */
matchAttrs = pattern: attrs: assert isAttrs pattern; matchAttrs = pattern: attrs: assert isAttrs pattern;
fold and true (attrValues (zipAttrsWithNames (attrNames pattern) (n: values: foldr and true (attrValues (zipAttrsWithNames (attrNames pattern) (n: values:
let pat = head values; val = head (tail values); in let pat = head values; val = head (tail values); in
if length values == 1 then false if length values == 1 then false
else if isAttrs pat then isAttrs val && matchAttrs pat val else if isAttrs pat then isAttrs val && matchAttrs pat val

View file

@ -77,11 +77,11 @@ rec {
# Output : are reqs satisfied? It's asserted. # Output : are reqs satisfied? It's asserted.
checkReqs = attrSet: argList: condList: checkReqs = attrSet: argList: condList:
( (
fold lib.and true foldr lib.and true
(map (x: let name = (head x); in (map (x: let name = (head x); in
((checkFlag attrSet name) -> ((checkFlag attrSet name) ->
(fold lib.and true (foldr lib.and true
(map (y: let val=(getValue attrSet argList y); in (map (y: let val=(getValue attrSet argList y); in
(val!=null) && (val!=false)) (val!=null) && (val!=false))
(tail x))))) condList)); (tail x))))) condList));
@ -177,7 +177,7 @@ rec {
# merge attributes with custom function handling the case that the attribute # merge attributes with custom function handling the case that the attribute
# exists in both sets # exists in both sets
mergeAttrsWithFunc = f: set1: set2: mergeAttrsWithFunc = f: set1: set2:
fold (n: set: if set ? ${n} foldr (n: set: if set ? ${n}
then setAttr set n (f set.${n} set2.${n}) then setAttr set n (f set.${n} set2.${n})
else set ) else set )
(set2 // set1) (attrNames set2); (set2 // set1) (attrNames set2);
@ -196,7 +196,7 @@ rec {
mergeAttrsNoOverride = { mergeLists ? ["buildInputs" "propagatedBuildInputs"], mergeAttrsNoOverride = { mergeLists ? ["buildInputs" "propagatedBuildInputs"],
overrideSnd ? [ "buildPhase" ] overrideSnd ? [ "buildPhase" ]
}: attrs1: attrs2: }: attrs1: attrs2:
fold (n: set: foldr (n: set:
setAttr set n ( if set ? ${n} setAttr set n ( if set ? ${n}
then # merge then # merge
if elem n mergeLists # attribute contains list, merge them by concatenating if elem n mergeLists # attribute contains list, merge them by concatenating
@ -224,7 +224,7 @@ rec {
mergeAttrBy2 = { mergeAttrBy = lib.mergeAttrs; } mergeAttrBy2 = { mergeAttrBy = lib.mergeAttrs; }
// (maybeAttr "mergeAttrBy" {} x) // (maybeAttr "mergeAttrBy" {} x)
// (maybeAttr "mergeAttrBy" {} y); in // (maybeAttr "mergeAttrBy" {} y); in
fold lib.mergeAttrs {} [ foldr lib.mergeAttrs {} [
x y x y
(mapAttrs ( a: v: # merge special names using given functions (mapAttrs ( a: v: # merge special names using given functions
if x ? ${a} if x ? ${a}

View file

@ -26,21 +26,22 @@ let
# Linux # Linux
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"
"armv7l-linux" "i686-linux" "mipsel-linux" "powerpc64-linux" "armv7l-linux" "i686-linux" "m68k-linux" "mipsel-linux"
"powerpc64le-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" "riscv32-linux"
"riscv64-linux" "s390-linux" "x86_64-linux"
# MMIXware # MMIXware
"mmix-mmixware" "mmix-mmixware"
# NetBSD # NetBSD
"aarch64-netbsd" "armv6l-netbsd" "armv7a-netbsd" "armv7l-netbsd" "aarch64-netbsd" "armv6l-netbsd" "armv7a-netbsd" "armv7l-netbsd"
"i686-netbsd" "mipsel-netbsd" "powerpc-netbsd" "riscv32-netbsd" "i686-netbsd" "m68k-netbsd" "mipsel-netbsd" "powerpc-netbsd"
"riscv64-netbsd" "x86_64-netbsd" "riscv32-netbsd" "riscv64-netbsd" "x86_64-netbsd"
# none # none
"aarch64-none" "arm-none" "armv6l-none" "avr-none" "i686-none" "msp430-none" "aarch64-none" "arm-none" "armv6l-none" "avr-none" "i686-none"
"or1k-none" "powerpc-none" "riscv32-none" "riscv64-none" "vc4-none" "msp430-none" "or1k-none" "m68k-none" "powerpc-none"
"x86_64-none" "riscv32-none" "riscv64-none" "s390-none" "vc4-none" "x86_64-none"
# OpenBSD # OpenBSD
"i686-openbsd" "x86_64-openbsd" "i686-openbsd" "x86_64-openbsd"
@ -74,6 +75,8 @@ in {
riscv = filterDoubles predicates.isRiscV; riscv = filterDoubles predicates.isRiscV;
vc4 = filterDoubles predicates.isVc4; vc4 = filterDoubles predicates.isVc4;
or1k = filterDoubles predicates.isOr1k; or1k = filterDoubles predicates.isOr1k;
m68k = filterDoubles predicates.isM68k;
s390 = filterDoubles predicates.isS390;
js = filterDoubles predicates.isJavaScript; js = filterDoubles predicates.isJavaScript;
bigEndian = filterDoubles predicates.isBigEndian; bigEndian = filterDoubles predicates.isBigEndian;

View file

@ -144,6 +144,14 @@ rec {
libc = "newlib"; libc = "newlib";
}; };
m68k = {
config = "m68k-unknown-linux-gnu";
};
s390 = {
config = "s390-unknown-linux-gnu";
};
arm-embedded = { arm-embedded = {
config = "arm-none-eabi"; config = "arm-none-eabi";
libc = "newlib"; libc = "newlib";

View file

@ -26,6 +26,8 @@ rec {
isAvr = { cpu = { family = "avr"; }; }; isAvr = { cpu = { family = "avr"; }; };
isAlpha = { cpu = { family = "alpha"; }; }; isAlpha = { cpu = { family = "alpha"; }; };
isOr1k = { cpu = { family = "or1k"; }; }; isOr1k = { cpu = { family = "or1k"; }; };
isM68k = { cpu = { family = "m68k"; }; };
isS390 = { cpu = { family = "s390"; }; };
isJavaScript = { cpu = cpuTypes.js; }; isJavaScript = { cpu = cpuTypes.js; };
is32bit = { cpu = { bits = 32; }; }; is32bit = { cpu = { bits = 32; }; };

View file

@ -95,6 +95,8 @@ rec {
mmix = { bits = 64; significantByte = bigEndian; family = "mmix"; }; mmix = { bits = 64; significantByte = bigEndian; family = "mmix"; };
m68k = { bits = 32; significantByte = bigEndian; family = "m68k"; };
powerpc = { bits = 32; significantByte = bigEndian; family = "power"; }; powerpc = { bits = 32; significantByte = bigEndian; family = "power"; };
powerpc64 = { bits = 64; significantByte = bigEndian; family = "power"; }; powerpc64 = { bits = 64; significantByte = bigEndian; family = "power"; };
powerpc64le = { bits = 64; significantByte = littleEndian; family = "power"; }; powerpc64le = { bits = 64; significantByte = littleEndian; family = "power"; };
@ -103,6 +105,8 @@ rec {
riscv32 = { bits = 32; significantByte = littleEndian; family = "riscv"; }; riscv32 = { bits = 32; significantByte = littleEndian; family = "riscv"; };
riscv64 = { bits = 64; significantByte = littleEndian; family = "riscv"; }; riscv64 = { bits = 64; significantByte = littleEndian; family = "riscv"; };
s390 = { bits = 32; significantByte = bigEndian; family = "s390"; };
sparc = { bits = 32; significantByte = bigEndian; family = "sparc"; }; sparc = { bits = 32; significantByte = bigEndian; family = "sparc"; };
sparc64 = { bits = 64; significantByte = bigEndian; family = "sparc"; }; sparc64 = { bits = 64; significantByte = bigEndian; family = "sparc"; };
@ -123,9 +127,10 @@ rec {
# GNU build systems assume that older NetBSD architectures are using a.out. # GNU build systems assume that older NetBSD architectures are using a.out.
gnuNetBSDDefaultExecFormat = cpu: gnuNetBSDDefaultExecFormat = cpu:
if (cpu.family == "x86" && cpu.bits == 32) || if (cpu.family == "arm" && cpu.bits == 32) ||
(cpu.family == "arm" && cpu.bits == 32) || (cpu.family == "sparc" && cpu.bits == 32) ||
(cpu.family == "sparc" && cpu.bits == 32) (cpu.family == "m68k" && cpu.bits == 32) ||
(cpu.family == "x86" && cpu.bits == 32)
then execFormats.aout then execFormats.aout
else execFormats.elf; else execFormats.elf;

View file

@ -315,6 +315,12 @@ rec {
# Disable OABI to have seccomp_filter (required for systemd) # Disable OABI to have seccomp_filter (required for systemd)
# https://github.com/raspberrypi/firmware/issues/651 # https://github.com/raspberrypi/firmware/issues/651
OABI_COMPAT n OABI_COMPAT n
# >=5.12 fails with:
# drivers/net/ethernet/micrel/ks8851_common.o: in function `ks8851_probe_common':
# ks8851_common.c:(.text+0x179c): undefined reference to `__this_module'
# See: https://lore.kernel.org/netdev/20210116164828.40545-1-marex@denx.de/T/
KS8851_MLL y
''; '';
}; };
gcc = { gcc = {

View file

@ -132,6 +132,16 @@ runTests {
expected = [ 1 1 0 ]; expected = [ 1 1 0 ];
}; };
testFunctionArgsFunctor = {
expr = functionArgs { __functor = self: { a, b }: null; };
expected = { a = false; b = false; };
};
testFunctionArgsSetFunctionArgs = {
expr = functionArgs (setFunctionArgs (args: args.x) { x = false; });
expected = { x = false; };
};
# STRINGS # STRINGS
testConcatMapStrings = { testConcatMapStrings = {

View file

@ -28,8 +28,8 @@ with lib.systems.doubles; lib.runTests {
testredox = mseteq redox [ "x86_64-redox" ]; testredox = mseteq redox [ "x86_64-redox" ];
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */); testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
testillumos = mseteq illumos [ "x86_64-solaris" ]; testillumos = mseteq illumos [ "x86_64-solaris" ];
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" ]; testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64-linux" "powerpc64le-linux" "m68k-linux" "s390-linux" ];
testnetbsd = mseteq netbsd [ "aarch64-netbsd" "armv6l-netbsd" "armv7a-netbsd" "armv7l-netbsd" "i686-netbsd" "mipsel-netbsd" "powerpc-netbsd" "riscv32-netbsd" "riscv64-netbsd" "x86_64-netbsd" ]; testnetbsd = mseteq netbsd [ "aarch64-netbsd" "armv6l-netbsd" "armv7a-netbsd" "armv7l-netbsd" "i686-netbsd" "m68k-netbsd" "mipsel-netbsd" "powerpc-netbsd" "riscv32-netbsd" "riscv64-netbsd" "x86_64-netbsd" ];
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ]; testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ]; testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];
testunix = mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ cygwin ++ redox); testunix = mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ cygwin ++ redox);

View file

@ -308,7 +308,7 @@ rec {
info = msg: builtins.trace "INFO: ${msg}"; info = msg: builtins.trace "INFO: ${msg}";
showWarnings = warnings: res: lib.fold (w: x: warn w x) res warnings; showWarnings = warnings: res: lib.foldr (w: x: warn w x) res warnings;
## Function annotations ## Function annotations
@ -334,7 +334,10 @@ rec {
has the same return type and semantics as builtins.functionArgs. has the same return type and semantics as builtins.functionArgs.
setFunctionArgs : (a b) Map String Bool. setFunctionArgs : (a b) Map String Bool.
*/ */
functionArgs = f: f.__functionArgs or (builtins.functionArgs f); functionArgs = f:
if f ? __functor
then f.__functionArgs or (lib.functionArgs (f.__functor f))
else builtins.functionArgs f;
/* Check whether something is a function or something /* Check whether something is a function or something
annotated with function args. annotated with function args.

View file

@ -1412,6 +1412,12 @@
githubId = 10221570; githubId = 10221570;
name = "Bo Bakker"; name = "Bo Bakker";
}; };
bobby285271 = {
name = "Bobby Rong";
email = "rjl931189261@126.com";
github = "bobby285271";
githubId = 20080233;
};
bobvanderlinden = { bobvanderlinden = {
email = "bobvanderlinden@gmail.com"; email = "bobvanderlinden@gmail.com";
github = "bobvanderlinden"; github = "bobvanderlinden";
@ -1530,6 +1536,12 @@
githubId = 1111035; githubId = 1111035;
name = "Break Yang"; name = "Break Yang";
}; };
brecht = {
email = "brecht.savelkoul@alumni.lse.ac.uk";
github = "brechtcs";
githubId = 6107054;
name = "Brecht Savelkoul";
};
brettlyons = { brettlyons = {
email = "blyons@fastmail.com"; email = "blyons@fastmail.com";
github = "brettlyons"; github = "brettlyons";
@ -1899,6 +1911,12 @@
githubId = 811527; githubId = 811527;
name = "Christopher Jefferson"; name = "Christopher Jefferson";
}; };
chrispickard = {
email = "chrispickard9@gmail.com";
github = "chrispickard";
githubId = 1438690;
name = "Chris Pickard";
};
chrisrosset = { chrisrosset = {
email = "chris@rosset.org.uk"; email = "chris@rosset.org.uk";
github = "chrisrosset"; github = "chrisrosset";
@ -3989,6 +4007,16 @@
fingerprint = "5214 2D39 A7CE F8FA 872B CA7F DE62 E1E2 A614 5556"; fingerprint = "5214 2D39 A7CE F8FA 872B CA7F DE62 E1E2 A614 5556";
}]; }];
}; };
gpanders = {
name = "Gregory Anders";
email = "greg@gpanders.com";
github = "gpanders";
githubId = 8965202;
keys = [{
longkeyid = "rsa2048/0x56E93C2FB6B08BDB";
fingerprint = "B9D5 0EDF E95E ECD0 C135 00A9 56E9 3C2F B6B0 8BDB";
}];
};
gpyh = { gpyh = {
email = "yacine.hmito@gmail.com"; email = "yacine.hmito@gmail.com";
github = "yacinehmito"; github = "yacinehmito";
@ -4249,6 +4277,12 @@
githubId = 131599; githubId = 131599;
name = "Martin Weinelt"; name = "Martin Weinelt";
}; };
hexagonal-sun = {
email = "dev@mattleach.net";
github = "hexagonal-sun";
githubId = 222664;
name = "Matthew Leach";
};
hh = { hh = {
email = "hh@m-labs.hk"; email = "hh@m-labs.hk";
github = "HarryMakes"; github = "HarryMakes";
@ -4529,6 +4563,12 @@
githubId = 592849; githubId = 592849;
name = "Ilya Kolpakov"; name = "Ilya Kolpakov";
}; };
ilyakooo0 = {
name = "Ilya Kostyuchenko";
email = "ilyakooo0@gmail.com";
github = "ilyakooo0";
githubId = 6209627;
};
imalison = { imalison = {
email = "IvanMalison@gmail.com"; email = "IvanMalison@gmail.com";
github = "IvanMalison"; github = "IvanMalison";
@ -4927,6 +4967,12 @@
fingerprint = "7EB1 C02A B62B B464 6D7C E4AE D1D0 9DE1 69EA 19A0"; fingerprint = "7EB1 C02A B62B B464 6D7C E4AE D1D0 9DE1 69EA 19A0";
}]; }];
}; };
jgart = {
email = "jgart@dismail.de";
github = "jgarte";
githubId = 47760695;
name = "Jorge Gomez";
};
jgeerds = { jgeerds = {
email = "jascha@geerds.org"; email = "jascha@geerds.org";
github = "jgeerds"; github = "jgeerds";
@ -7459,6 +7505,12 @@
email = "natedevv@gmail.com"; email = "natedevv@gmail.com";
name = "Nathan Moore"; name = "Nathan Moore";
}; };
nathanruiz = {
email = "nathanruiz@protonmail.com";
github = "nathanruiz";
githubId = 18604892;
name = "Nathan Ruiz";
};
nathan-gs = { nathan-gs = {
email = "nathan@nathan.gs"; email = "nathan@nathan.gs";
github = "nathan-gs"; github = "nathan-gs";
@ -7814,6 +7866,12 @@
githubId = 1839979; githubId = 1839979;
name = "Niklas Thörne"; name = "Niklas Thörne";
}; };
nukaduka = {
email = "ksgokte@gmail.com";
github = "NukaDuka";
githubId = 22592293;
name = "Kartik Gokte";
};
nullx76 = { nullx76 = {
email = "nix@xirion.net"; email = "nix@xirion.net";
github = "NULLx76"; github = "NULLx76";
@ -10982,6 +11040,12 @@
fingerprint = "E631 8869 586F 99B4 F6E6 D785 5942 58F0 389D 2802"; fingerprint = "E631 8869 586F 99B4 F6E6 D785 5942 58F0 389D 2802";
}]; }];
}; };
twitchyliquid64 = {
name = "Tom";
email = "twitchyliquid64@ciphersink.net";
github = "twitchyliquid64";
githubId = 6328589;
};
typetetris = { typetetris = {
email = "ericwolf42@mail.com"; email = "ericwolf42@mail.com";
github = "typetetris"; github = "typetetris";
@ -11304,10 +11368,6 @@
githubId = 3413119; githubId = 3413119;
name = "Vonfry"; name = "Vonfry";
}; };
vozz = {
email = "oliver.huntuk@gmail.com";
name = "Oliver Hunt";
};
vq = { vq = {
email = "vq@erq.se"; email = "vq@erq.se";
name = "Daniel Nilsson"; name = "Daniel Nilsson";
@ -11668,6 +11728,12 @@
githubId = 3705333; githubId = 3705333;
name = "Dmitry V."; name = "Dmitry V.";
}; };
yayayayaka = {
email = "nixpkgs@uwu.is";
github = "yayayayaka";
githubId = 73759599;
name = "Lara A.";
};
yegortimoshenko = { yegortimoshenko = {
email = "yegortimoshenko@riseup.net"; email = "yegortimoshenko@riseup.net";
github = "yegortimoshenko"; github = "yegortimoshenko";

View file

@ -30,9 +30,10 @@ EOF
# clear environment here to avoid things like allowing broken builds in # clear environment here to avoid things like allowing broken builds in
sort -iu "$tmpfile" >> "$broken_config" sort -iu "$tmpfile" >> "$broken_config"
env -i maintainers/scripts/haskell/regenerate-hackage-packages.sh clear="env -u HOME -u NIXPKGS_CONFIG"
env -i maintainers/scripts/haskell/regenerate-transitive-broken-packages.sh $clear maintainers/scripts/haskell/regenerate-hackage-packages.sh
env -i maintainers/scripts/haskell/regenerate-hackage-packages.sh $clear maintainers/scripts/haskell/regenerate-transitive-broken-packages.sh
$clear maintainers/scripts/haskell/regenerate-hackage-packages.sh
if [[ "${1:-}" == "--do-commit" ]]; then if [[ "${1:-}" == "--do-commit" ]]; then
git add $broken_config git add $broken_config

View file

@ -0,0 +1,21 @@
#! /usr/bin/env nix-shell
#! nix-shell -i bash -p nix curl gnused -I nixpkgs=.
# On Hackage every package description shows a category "Distributions" which
# lists a "NixOS" version.
# This script uploads a csv to hackage which will update the displayed versions
# based on the current versions in nixpkgs. This happens with a simple http
# request.
# For authorization you just need to have any valid hackage account. This
# script uses the `username` and `password-command` field from your
# ~/.cabal/config file.
# e.g. username: maralorn
# password-command: pass hackage.haskell.org (this can be any command, but not an arbitrary shell expression. Like cabal we only read the first output line and ignore the rest.)
# Those fields are specified under `upload` on the `cabal` man page.
package_list="$(nix-build -A haskell.package-list)/nixos-hackage-packages.csv"
username=$(grep "^username:" ~/.cabal/config | sed "s/^username: //")
password_command=$(grep "^password-command:" ~/.cabal/config | sed "s/^password-command: //")
curl -u "$username:$($password_command | head -n1)" --digest -H "Content-type: text/csv" -T "$package_list" http://hackage.haskell.org/distro/NixOS/packages.csv

View file

@ -78,10 +78,11 @@ mpack,,,,,
moonscript,,,,,arobyn moonscript,,,,,arobyn
nvim-client,,,,, nvim-client,,,,,
penlight,,,,, penlight,,,,,
plenary.nvim,,,,lua5_1,
rapidjson,,,,, rapidjson,,,,,
readline,,,,, readline,,,,,
say,,,,, say,,,,,
std__debug,std._debug,,,, std-_debug,std._debug,,,,
std_normalize,std.normalize,,,, std_normalize,std.normalize,,,,
stdlib,,,,,vyp stdlib,,,,,vyp
vstruct,,,,, vstruct,,,,,

1 # nix name luarocks name server version luaversion maintainers
78 moonscript arobyn
79 nvim-client
80 penlight
81 plenary.nvim lua5_1
82 rapidjson
83 readline
84 say
85 std__debug std-_debug std._debug
86 std_normalize std.normalize
87 stdlib vyp
88 vstruct

View file

@ -89,6 +89,10 @@ function convert_pkg() {
echo "Skipping comment ${*}" >&2 echo "Skipping comment ${*}" >&2
return return
fi fi
# Normalize package name
nix_pkg_name_normalized=$(sed 's/\./-/' <(echo "$nix_pkg_name"))
if [ -z "$lua_pkg_name" ]; then if [ -z "$lua_pkg_name" ]; then
echo "Using nix_name as lua_pkg_name for '$nix_pkg_name'" >&2 echo "Using nix_name as lua_pkg_name for '$nix_pkg_name'" >&2
lua_pkg_name="$nix_pkg_name" lua_pkg_name="$nix_pkg_name"
@ -111,7 +115,7 @@ function convert_pkg() {
luarocks_args+=("$pkg_version") luarocks_args+=("$pkg_version")
fi fi
echo "Running 'luarocks ${luarocks_args[*]}'" >&2 echo "Running 'luarocks ${luarocks_args[*]}'" >&2
if drv="$nix_pkg_name = $(luarocks "${luarocks_args[@]}")"; then if drv="$nix_pkg_name_normalized = $(luarocks "${luarocks_args[@]}")"; then
echo "$drv" echo "$drv"
else else
echo "Failed to convert $nix_pkg_name" >&2 echo "Failed to convert $nix_pkg_name" >&2

View file

@ -114,8 +114,9 @@ with lib.maintainers; {
haskell = { haskell = {
members = [ members = [
maralorn
cdepillabout cdepillabout
expipiplus1
maralorn
sternenseemann sternenseemann
]; ];
scope = "Maintain Haskell packages and infrastructure."; scope = "Maintain Haskell packages and infrastructure.";
@ -161,10 +162,19 @@ with lib.maintainers; {
ralith ralith
mjlbach mjlbach
dandellion dandellion
sumnerevans
]; ];
scope = "Maintain the ecosystem around Matrix, a decentralized messenger."; scope = "Maintain the ecosystem around Matrix, a decentralized messenger.";
}; };
pantheon = {
members = [
davidak
bobby285271
];
scope = "Maintain Pantheon desktop environment and platform.";
};
php = { php = {
members = [ members = [
aanderse aanderse

View file

@ -12,7 +12,7 @@ let
# E.g. if some `options` came from modules in ${pkgs.customModules}/nix, # E.g. if some `options` came from modules in ${pkgs.customModules}/nix,
# you'd need to include `extraSources = [ pkgs.customModules ]` # you'd need to include `extraSources = [ pkgs.customModules ]`
prefixesToStrip = map (p: "${toString p}/") ([ ../../.. ] ++ extraSources); prefixesToStrip = map (p: "${toString p}/") ([ ../../.. ] ++ extraSources);
stripAnyPrefixes = lib.flip (lib.fold lib.removePrefix) prefixesToStrip; stripAnyPrefixes = lib.flip (lib.foldr lib.removePrefix) prefixesToStrip;
optionsDoc = buildPackages.nixosOptionsDoc { optionsDoc = buildPackages.nixosOptionsDoc {
inherit options revision; inherit options revision;

View file

@ -0,0 +1,6 @@
# Linking NixOS tests to packages {#sec-linking-nixos-tests-to-packages}
You can link NixOS module tests to the packages that they exercised,
so that the tests can be run automatically during code review when the package gets changed.
This is
[described in the nixpkgs manual](https://nixos.org/manual/nixpkgs/stable/#ssec-nixos-tests-linking).

View file

@ -16,4 +16,5 @@ xlink:href="https://github.com/NixOS/nixpkgs/tree/master/nixos/tests">nixos/test
<xi:include href="../from_md/development/writing-nixos-tests.section.xml" /> <xi:include href="../from_md/development/writing-nixos-tests.section.xml" />
<xi:include href="../from_md/development/running-nixos-tests.section.xml" /> <xi:include href="../from_md/development/running-nixos-tests.section.xml" />
<xi:include href="../from_md/development/running-nixos-tests-interactively.section.xml" /> <xi:include href="../from_md/development/running-nixos-tests-interactively.section.xml" />
<xi:include href="../from_md/development/linking-nixos-tests-to-packages.section.xml" />
</chapter> </chapter>

View file

@ -0,0 +1,10 @@
<section xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="sec-linking-nixos-tests-to-packages">
<title>Linking NixOS tests to packages</title>
<para>
You can link NixOS module tests to the packages that they exercised,
so that the tests can be run automatically during code review when
the package gets changed. This is
<link xlink:href="https://nixos.org/manual/nixpkgs/stable/#ssec-nixos-tests-linking">described
in the nixpkgs manual</link>.
</para>
</section>

View file

@ -32,6 +32,11 @@
from Python 3.8. from Python 3.8.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
PostgreSQL now defaults to major version 13.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-21.11-new-services"> <section xml:id="sec-release-21.11-new-services">
@ -120,6 +125,37 @@
<link linkend="opt-services.prometheus.exporters.buildkite-agent.enable">services.prometheus.exporters.buildkite-agent</link>. <link linkend="opt-services.prometheus.exporters.buildkite-agent.enable">services.prometheus.exporters.buildkite-agent</link>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<link xlink:href="https://github.com/prometheus/influxdb_exporter">influxdb-exporter</link>
a Prometheus exporter that exports metrics received on an
InfluxDB compatible endpoint is now available as
<link linkend="opt-services.prometheus.exporters.influxdb.enable">services.prometheus.exporters.influxdb</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/matrix-discord/mx-puppet-discord">mx-puppet-discord</link>,
a discord puppeting bridge for matrix. Available as
<link linkend="opt-services.mx-puppet-discord.enable">services.mx-puppet-discord</link>.
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://www.meshcommander.com/meshcentral2/overview">MeshCentral</link>,
a remote administration service (<quote>TeamViewer but
self-hosted and with more features</quote>) is now available
with a package and a module:
<link linkend="opt-services.meshcentral.enable">services.meshcentral.enable</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/Arksine/moonraker">moonraker</link>,
an API web server for Klipper. Available as
<link linkend="opt-services.moonraker.enable">moonraker</link>.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-21.11-incompatibilities"> <section xml:id="sec-release-21.11-incompatibilities">
@ -550,6 +586,66 @@
6.0.0 to 9.0.0 6.0.0 to 9.0.0
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>tt-rss</literal> was upgraded to the commit on
2021-06-21, which has breaking changes. If you use
<literal>services.tt-rss.extraConfig</literal> you should
migrate to the <literal>putenv</literal>-style configuration.
See
<link xlink:href="https://community.tt-rss.org/t/rip-config-php-hello-classes-config-php/4337">this
Discourse post</link> in the tt-rss forums for more details.
</para>
</listitem>
<listitem>
<para>
The following Visual Studio Code extensions were renamed to
keep the naming convention uniform.
</para>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>bbenoist.Nix</literal> -&gt;
<literal>bbenoist.nix</literal>
</para>
</listitem>
<listitem>
<para>
<literal>CoenraadS.bracket-pair-colorizer</literal> -&gt;
<literal>coenraads.bracket-pair-colorizer</literal>
</para>
</listitem>
<listitem>
<para>
<literal>golang.Go</literal> -&gt;
<literal>golang.go</literal>
</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>
<literal>services.uptimed</literal> now uses
<literal>/var/lib/uptimed</literal> as its stateDirectory
instead of <literal>/var/spool/uptimed</literal>. Make sure to
move all files to the new directory.
</para>
</listitem>
<listitem>
<para>
Deprecated package aliases in <literal>emacs.pkgs.*</literal>
have been removed. These aliases were remnants of the old
Emacs package infrastructure. We now use exact upstream names
wherever possible.
</para>
</listitem>
<listitem>
<para>
<literal>programs.neovim.runtime</literal> switched to a
<literal>linkFarm</literal> internally, making it impossible
to use wildcards in the <literal>source</literal> argument.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-21.11-notable-changes"> <section xml:id="sec-release-21.11-notable-changes">
@ -688,6 +784,37 @@
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</listitem> </listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-networking.wireless.iwd.enable">networking.wireless.iwd</link>
module has a new
<link xlink:href="options.html#opt-networking.wireless.iwd.settings">networking.wireless.iwd.settings</link>
option.
</para>
</listitem>
<listitem>
<para>
The
<link xlink:href="options.html#opt-services.syncoid.enable">services.syncoid.enable</link>
module now properly drops ZFS permissions after usage. Before
it delegated permissions to whole pools instead of datasets
and didnt clean up after execution. You can manually look
this up for your pools by running
<literal>zfs allow your-pool-name</literal> and use
<literal>zfs unallow syncoid your-pool-name</literal> to clean
this up.
</para>
</listitem>
<listitem>
<para>
Zfs: <literal>latestCompatibleLinuxPackages</literal> is now
exported on the zfs package. One can use
<literal>boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;</literal>
to always track the latest compatible kernel with a given
version of zfs.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View file

@ -11,6 +11,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `python3` now defaults to Python 3.9, updated from Python 3.8. - `python3` now defaults to Python 3.9, updated from Python 3.8.
- PostgreSQL now defaults to major version 13.
## New Services {#sec-release-21.11-new-services} ## New Services {#sec-release-21.11-new-services}
- [btrbk](https://digint.ch/btrbk/index.html), a backup tool for btrfs subvolumes, taking advantage of btrfs specific capabilities to create atomic snapshots and transfer them incrementally to your backup locations. Available as [services.btrbk](options.html#opt-services.brtbk.instances). - [btrbk](https://digint.ch/btrbk/index.html), a backup tool for btrfs subvolumes, taking advantage of btrfs specific capabilities to create atomic snapshots and transfer them incrementally to your backup locations. Available as [services.btrbk](options.html#opt-services.brtbk.instances).
@ -37,6 +39,15 @@ pt-services.clipcat.enable).
- [buildkite-agent-metrics](https://github.com/buildkite/buildkite-agent-metrics), a command-line tool for collecting Buildkite agent metrics, now has a Prometheus exporter available as [services.prometheus.exporters.buildkite-agent](#opt-services.prometheus.exporters.buildkite-agent.enable). - [buildkite-agent-metrics](https://github.com/buildkite/buildkite-agent-metrics), a command-line tool for collecting Buildkite agent metrics, now has a Prometheus exporter available as [services.prometheus.exporters.buildkite-agent](#opt-services.prometheus.exporters.buildkite-agent.enable).
- [influxdb-exporter](https://github.com/prometheus/influxdb_exporter) a Prometheus exporter that exports metrics received on an InfluxDB compatible endpoint is now available as [services.prometheus.exporters.influxdb](#opt-services.prometheus.exporters.influxdb.enable).
- [mx-puppet-discord](https://github.com/matrix-discord/mx-puppet-discord), a discord puppeting bridge for matrix. Available as [services.mx-puppet-discord](#opt-services.mx-puppet-discord.enable).
- [MeshCentral](https://www.meshcommander.com/meshcentral2/overview), a remote administration service ("TeamViewer but self-hosted and with more features") is now available with a package and a module: [services.meshcentral.enable](#opt-services.meshcentral.enable)
- [moonraker](https://github.com/Arksine/moonraker), an API web server for Klipper.
Available as [moonraker](#opt-services.moonraker.enable).
## Backward Incompatibilities {#sec-release-21.11-incompatibilities} ## Backward Incompatibilities {#sec-release-21.11-incompatibilities}
- The `staticjinja` package has been upgraded from 1.0.4 to 3.0.1 - The `staticjinja` package has been upgraded from 1.0.4 to 3.0.1
@ -140,6 +151,19 @@ pt-services.clipcat.enable).
- the `mingw-64` package has been upgraded from 6.0.0 to 9.0.0 - the `mingw-64` package has been upgraded from 6.0.0 to 9.0.0
- `tt-rss` was upgraded to the commit on 2021-06-21, which has breaking changes. If you use `services.tt-rss.extraConfig` you should migrate to the `putenv`-style configuration. See [this Discourse post](https://community.tt-rss.org/t/rip-config-php-hello-classes-config-php/4337) in the tt-rss forums for more details.
- The following Visual Studio Code extensions were renamed to keep the naming convention uniform.
- `bbenoist.Nix` -> `bbenoist.nix`
- `CoenraadS.bracket-pair-colorizer` -> `coenraads.bracket-pair-colorizer`
- `golang.Go` -> `golang.go`
- `services.uptimed` now uses `/var/lib/uptimed` as its stateDirectory instead of `/var/spool/uptimed`. Make sure to move all files to the new directory.
- Deprecated package aliases in `emacs.pkgs.*` have been removed. These aliases were remnants of the old Emacs package infrastructure. We now use exact upstream names wherever possible.
- `programs.neovim.runtime` switched to a `linkFarm` internally, making it impossible to use wildcards in the `source` argument.
## Other Notable Changes {#sec-release-21.11-notable-changes} ## Other Notable Changes {#sec-release-21.11-notable-changes}
- The setting [`services.openssh.logLevel`](options.html#opt-services.openssh.logLevel) `"VERBOSE"` `"INFO"`. This brings NixOS in line with upstream and other Linux distributions, and reduces log spam on servers due to bruteforcing botnets. - The setting [`services.openssh.logLevel`](options.html#opt-services.openssh.logLevel) `"VERBOSE"` `"INFO"`. This brings NixOS in line with upstream and other Linux distributions, and reduces log spam on servers due to bruteforcing botnets.
@ -179,3 +203,9 @@ pt-services.clipcat.enable).
- NSS modules which should be queried after `resolved`, `files` and - NSS modules which should be queried after `resolved`, `files` and
`myhostname`, but before `dns` should use the default priority `myhostname`, but before `dns` should use the default priority
- NSS modules which should come after `dns` should use mkAfter. - NSS modules which should come after `dns` should use mkAfter.
- The [networking.wireless.iwd](options.html#opt-networking.wireless.iwd.enable) module has a new [networking.wireless.iwd.settings](options.html#opt-networking.wireless.iwd.settings) option.
- The [services.syncoid.enable](options.html#opt-services.syncoid.enable) module now properly drops ZFS permissions after usage. Before it delegated permissions to whole pools instead of datasets and didn't clean up after execution. You can manually look this up for your pools by running `zfs allow your-pool-name` and use `zfs unallow syncoid your-pool-name` to clean this up.
- Zfs: `latestCompatibleLinuxPackages` is now exported on the zfs package. One can use `boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;` to always track the latest compatible kernel with a given version of zfs.

View file

@ -130,9 +130,12 @@ rec {
nodeHostNames = map (c: c.config.system.name) (lib.attrValues nodes); nodeHostNames = map (c: c.config.system.name) (lib.attrValues nodes);
# TODO: This is an implementation error and needs fixing
# the testing famework cannot legitimately restrict hostnames further
# beyond RFC1035
invalidNodeNames = lib.filter invalidNodeNames = lib.filter
(node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null) (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
(builtins.attrNames nodes); nodeHostNames;
testScript' = testScript' =
# Call the test script with the computed nodes. # Call the test script with the computed nodes.
@ -146,7 +149,9 @@ rec {
Cannot create machines out of (${lib.concatStringsSep ", " invalidNodeNames})! Cannot create machines out of (${lib.concatStringsSep ", " invalidNodeNames})!
All machines are referenced as python variables in the testing framework which will break the All machines are referenced as python variables in the testing framework which will break the
script when special characters are used. script when special characters are used.
Please stick to alphanumeric chars and underscores as separation.
This is an IMPLEMENTATION ERROR and needs to be fixed. Meanwhile,
please stick to alphanumeric chars and underscores as separation.
'' ''
else lib.warnIf skipLint "Linting is disabled" (runCommand testDriverName else lib.warnIf skipLint "Linting is disabled" (runCommand testDriverName
{ {

View file

@ -396,7 +396,7 @@ let
}; };
}; };
idsAreUnique = set: idAttr: !(fold (name: args@{ dup, acc }: idsAreUnique = set: idAttr: !(foldr (name: args@{ dup, acc }:
let let
id = builtins.toString (builtins.getAttr idAttr (builtins.getAttr name set)); id = builtins.toString (builtins.getAttr idAttr (builtins.getAttr name set));
exists = builtins.hasAttr id acc; exists = builtins.hasAttr id acc;

View file

@ -35,6 +35,14 @@ in {
''; '';
}; };
hardware.wirelessRegulatoryDatabase = mkOption {
default = false;
type = types.bool;
description = ''
Load the wireless regulatory database at boot.
'';
};
}; };
@ -50,6 +58,7 @@ in {
rtl8723bs-firmware rtl8723bs-firmware
rtl8761b-firmware rtl8761b-firmware
rtw88-firmware rtw88-firmware
rtw89-firmware
zd1211fw zd1211fw
alsa-firmware alsa-firmware
sof-firmware sof-firmware
@ -58,6 +67,7 @@ in {
++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [ ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
rtl8723bs-firmware rtl8723bs-firmware
]; ];
hardware.wirelessRegulatoryDatabase = true;
}) })
(mkIf cfg.enableAllFirmware { (mkIf cfg.enableAllFirmware {
assertions = [{ assertions = [{
@ -75,5 +85,8 @@ in {
b43FirmwareCutter b43FirmwareCutter
] ++ optional (pkgs.stdenv.hostPlatform.isi686 || pkgs.stdenv.hostPlatform.isx86_64) facetimehd-firmware; ] ++ optional (pkgs.stdenv.hostPlatform.isi686 || pkgs.stdenv.hostPlatform.isx86_64) facetimehd-firmware;
}) })
(mkIf cfg.wirelessRegulatoryDatabase {
hardware.firmware = [ pkgs.wireless-regdb ];
})
]; ];
} }

View file

@ -654,7 +654,11 @@ in
]; ];
fileSystems."/" = fileSystems."/" =
{ fsType = "tmpfs"; # This module is often over-layed onto an existing host config
# that defines `/`. We use mkOverride 60 to override standard
# values, but at the same time leave room for mkForce values
# targeted at the image build.
{ fsType = mkOverride 60 "tmpfs";
options = [ "mode=0755" ]; options = [ "mode=0755" ];
}; };

View file

@ -30,7 +30,11 @@ with lib;
else [ pkgs.grub2 pkgs.syslinux ]); else [ pkgs.grub2 pkgs.syslinux ]);
fileSystems."/" = fileSystems."/" =
{ fsType = "tmpfs"; # This module is often over-layed onto an existing host config
# that defines `/`. We use mkOverride 60 to override standard
# values, but at the same time leave room for mkForce values
# targeted at the image build.
{ fsType = mkOverride 60 "tmpfs";
options = [ "mode=0755" ]; options = [ "mode=0755" ];
}; };

View file

@ -1,7 +1,7 @@
{ {
x86_64-linux = "/nix/store/qsgz2hhn6mzlzp53a7pwf9z2pq3l5z6h-nix-2.3.14"; x86_64-linux = "/nix/store/jhbxh1jwjc3hjhzs9y2hifdn0rmnfwaj-nix-2.3.15";
i686-linux = "/nix/store/1yw40bj04lykisw2jilq06lir3k9ga4a-nix-2.3.14"; i686-linux = "/nix/store/9pspwnkdrgzma1l4xlv7arhwa56y16di-nix-2.3.15";
aarch64-linux = "/nix/store/32yzwmynmjxfrkb6y6l55liaqdrgkj4a-nix-2.3.14"; aarch64-linux = "/nix/store/72aqi5g7f4fhgvgafbcqwcpqjgnczj48-nix-2.3.15";
x86_64-darwin = "/nix/store/06j0vi2d13w4l0p3jsigq7lk4x6gkycj-nix-2.3.14"; x86_64-darwin = "/nix/store/6p6qwp73dgfkqhynmxrzbx1lcfgfpqal-nix-2.3.15";
aarch64-darwin = "/nix/store/77wi7vpbrghw5rgws25w30bwb8yggnk9-nix-2.3.14"; aarch64-darwin = "/nix/store/dmq2vksdhssgfl822shd0ky3x5x0klh4-nix-2.3.15";
} }

View file

@ -258,8 +258,7 @@ in
environment.systemPackages = [] environment.systemPackages = []
++ optional cfg.man.enable manual.manpages ++ optional cfg.man.enable manual.manpages
++ optionals cfg.doc.enable ([ manual.manualHTML nixos-help ] ++ optionals cfg.doc.enable [ manual.manualHTML nixos-help ];
++ optionals config.services.xserver.enable [ pkgs.nixos-icons ]);
services.getty.helpLine = mkIf cfg.doc.enable ( services.getty.helpLine = mkIf cfg.doc.enable (
"\nRun 'nixos-help' for the NixOS manual." "\nRun 'nixos-help' for the NixOS manual."

View file

@ -187,6 +187,7 @@ in
#seeks = 148; # removed 2020-06-21 #seeks = 148; # removed 2020-06-21
prosody = 149; prosody = 149;
i2pd = 150; i2pd = 150;
systemd-coredump = 151;
systemd-network = 152; systemd-network = 152;
systemd-resolve = 153; systemd-resolve = 153;
systemd-timesync = 154; systemd-timesync = 154;
@ -347,6 +348,8 @@ in
#mailman = 316; # removed 2019-08-30 #mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317; zigbee2mqtt = 317;
# shadow = 318; # unused # shadow = 318; # unused
hqplayer = 319;
moonraker = 320;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399! # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -649,6 +652,8 @@ in
#mailman = 316; # removed 2019-08-30 #mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317; zigbee2mqtt = 317;
shadow = 318; shadow = 318;
hqplayer = 319;
moonraker = 320;
# When adding a gid, make sure it doesn't match an existing # When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal # uid. Users and groups with the same name should have equal

View file

@ -39,7 +39,7 @@ let
if c x then true if c x then true
else lib.traceSeqN 1 x false; else lib.traceSeqN 1 x false;
in traceXIfNot isConfig; in traceXIfNot isConfig;
merge = args: fold (def: mergeConfig def.value) {}; merge = args: foldr (def: mergeConfig def.value) {};
}; };
overlayType = mkOptionType { overlayType = mkOptionType {

View file

@ -103,9 +103,10 @@ in
'' ''
NAME=NixOS NAME=NixOS
ID=nixos ID=nixos
VERSION="${cfg.version} (${cfg.codeName})" VERSION="${cfg.release} (${cfg.codeName})"
VERSION_CODENAME=${toLower cfg.codeName} VERSION_CODENAME=${toLower cfg.codeName}
VERSION_ID="${cfg.version}" VERSION_ID="${cfg.release}"
BUILD_ID="${cfg.version}"
PRETTY_NAME="NixOS ${cfg.release} (${cfg.codeName})" PRETTY_NAME="NixOS ${cfg.release} (${cfg.codeName})"
LOGO="nix-snowflake" LOGO="nix-snowflake"
HOME_URL="https://nixos.org/" HOME_URL="https://nixos.org/"

View file

@ -236,6 +236,7 @@
./security/doas.nix ./security/doas.nix
./security/systemd-confinement.nix ./security/systemd-confinement.nix
./security/tpm2.nix ./security/tpm2.nix
./services/admin/meshcentral.nix
./services/admin/oxidized.nix ./services/admin/oxidized.nix
./services/admin/salt/master.nix ./services/admin/salt/master.nix
./services/admin/salt/minion.nix ./services/admin/salt/minion.nix
@ -243,13 +244,16 @@
./services/amqp/rabbitmq.nix ./services/amqp/rabbitmq.nix
./services/audio/alsa.nix ./services/audio/alsa.nix
./services/audio/botamusique.nix ./services/audio/botamusique.nix
./services/audio/jack.nix ./services/audio/hqplayerd.nix
./services/audio/icecast.nix ./services/audio/icecast.nix
./services/audio/jack.nix
./services/audio/jmusicbot.nix ./services/audio/jmusicbot.nix
./services/audio/liquidsoap.nix ./services/audio/liquidsoap.nix
./services/audio/mpd.nix ./services/audio/mpd.nix
./services/audio/mpdscribble.nix ./services/audio/mpdscribble.nix
./services/audio/mopidy.nix ./services/audio/mopidy.nix
./services/audio/networkaudiod.nix
./services/audio/roon-bridge.nix
./services/audio/roon-server.nix ./services/audio/roon-server.nix
./services/audio/slimserver.nix ./services/audio/slimserver.nix
./services/audio/snapserver.nix ./services/audio/snapserver.nix
@ -519,6 +523,7 @@
./services/misc/logkeys.nix ./services/misc/logkeys.nix
./services/misc/leaps.nix ./services/misc/leaps.nix
./services/misc/lidarr.nix ./services/misc/lidarr.nix
./services/misc/libreddit.nix
./services/misc/lifecycled.nix ./services/misc/lifecycled.nix
./services/misc/mame.nix ./services/misc/mame.nix
./services/misc/matrix-appservice-discord.nix ./services/misc/matrix-appservice-discord.nix
@ -528,8 +533,11 @@
./services/misc/mbpfan.nix ./services/misc/mbpfan.nix
./services/misc/mediatomb.nix ./services/misc/mediatomb.nix
./services/misc/metabase.nix ./services/misc/metabase.nix
./services/misc/moonraker.nix
./services/misc/mwlib.nix ./services/misc/mwlib.nix
./services/misc/mx-puppet-discord.nix
./services/misc/n8n.nix ./services/misc/n8n.nix
./services/misc/nitter.nix
./services/misc/nix-daemon.nix ./services/misc/nix-daemon.nix
./services/misc/nix-gc.nix ./services/misc/nix-gc.nix
./services/misc/nix-optimise.nix ./services/misc/nix-optimise.nix
@ -633,6 +641,7 @@
./services/network-filesystems/glusterfs.nix ./services/network-filesystems/glusterfs.nix
./services/network-filesystems/kbfs.nix ./services/network-filesystems/kbfs.nix
./services/network-filesystems/ipfs.nix ./services/network-filesystems/ipfs.nix
./services/network-filesystems/litestream/default.nix
./services/network-filesystems/netatalk.nix ./services/network-filesystems/netatalk.nix
./services/network-filesystems/nfsd.nix ./services/network-filesystems/nfsd.nix
./services/network-filesystems/openafs/client.nix ./services/network-filesystems/openafs/client.nix
@ -929,6 +938,7 @@
./services/wayland/cage.nix ./services/wayland/cage.nix
./services/video/epgstation/default.nix ./services/video/epgstation/default.nix
./services/video/mirakurun.nix ./services/video/mirakurun.nix
./services/video/replay-sorcery.nix
./services/web-apps/atlassian/confluence.nix ./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix ./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix ./services/web-apps/atlassian/jira.nix
@ -960,6 +970,7 @@
./services/web-apps/moodle.nix ./services/web-apps/moodle.nix
./services/web-apps/nextcloud.nix ./services/web-apps/nextcloud.nix
./services/web-apps/nexus.nix ./services/web-apps/nexus.nix
./services/web-apps/node-red.nix
./services/web-apps/plantuml-server.nix ./services/web-apps/plantuml-server.nix
./services/web-apps/plausible.nix ./services/web-apps/plausible.nix
./services/web-apps/pgpkeyserver-lite.nix ./services/web-apps/pgpkeyserver-lite.nix

View file

@ -27,6 +27,7 @@ in
browser = mkOption { browser = mkOption {
type = types.str; type = types.str;
default = concatStringsSep " " [ default = concatStringsSep " " [
''env XDG_CONFIG_HOME="$PREV_CONFIG_HOME"''
''${pkgs.chromium}/bin/chromium'' ''${pkgs.chromium}/bin/chromium''
''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive'' ''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive''
''--proxy-server="socks5://$PROXY"'' ''--proxy-server="socks5://$PROXY"''
@ -111,6 +112,7 @@ in
security.wrappers.captive-browser = { security.wrappers.captive-browser = {
capabilities = "cap_net_raw+p"; capabilities = "cap_net_raw+p";
source = pkgs.writeShellScript "captive-browser" '' source = pkgs.writeShellScript "captive-browser" ''
export PREV_CONFIG_HOME="$XDG_CONFIG_HOME"
export XDG_CONFIG_HOME=${pkgs.writeTextDir "captive-browser.toml" '' export XDG_CONFIG_HOME=${pkgs.writeTextDir "captive-browser.toml" ''
browser = """${cfg.browser}""" browser = """${cfg.browser}"""
dhcp-dns = """${cfg.dhcp-dns}""" dhcp-dns = """${cfg.dhcp-dns}"""

View file

@ -7,18 +7,7 @@ let
runtime' = filter (f: f.enable) (attrValues cfg.runtime); runtime' = filter (f: f.enable) (attrValues cfg.runtime);
# taken from the etc module runtime = pkgs.linkFarm "neovim-runtime" (map (x: { name = x.target; path = x.source; }) runtime');
runtime = pkgs.stdenvNoCC.mkDerivation {
name = "runtime";
builder = ../system/etc/make-etc.sh;
preferLocalBuild = true;
allowSubstitutes = false;
sources = map (x: x.source) runtime';
targets = map (x: x.target) runtime';
};
in { in {
options.programs.neovim = { options.programs.neovim = {

View file

@ -278,7 +278,10 @@ in
fi fi
''; '';
environment.etc.zinputrc.source = ./zinputrc; # Bug in nix flakes:
# If we use `.source` here the path is garbage collected also we point to it with a symlink
# see https://github.com/NixOS/nixpkgs/issues/132732
environment.etc.zinputrc.text = builtins.readFile ./zinputrc;
environment.systemPackages = environment.systemPackages =
let let

View file

@ -0,0 +1,53 @@
{ config, pkgs, lib, ... }:
let
cfg = config.services.meshcentral;
configFormat = pkgs.formats.json {};
configFile = configFormat.generate "meshcentral-config.json" cfg.settings;
in with lib; {
options.services.meshcentral = with types; {
enable = mkEnableOption "MeshCentral computer management server";
package = mkOption {
description = "MeshCentral package to use. Replacing this may be necessary to add dependencies for extra functionality.";
type = types.package;
default = pkgs.meshcentral;
defaultText = "pkgs.meshcentral";
};
settings = mkOption {
description = ''
Settings for MeshCentral. Refer to upstream documentation for details:
<itemizedlist>
<listitem><para><link xlink:href="https://github.com/Ylianst/MeshCentral/blob/master/meshcentral-config-schema.json">JSON Schema definition</link></para></listitem>
<listitem><para><link xlink:href="https://github.com/Ylianst/MeshCentral/blob/master/sample-config.json">simple sample configuration</link></para></listitem>
<listitem><para><link xlink:href="https://github.com/Ylianst/MeshCentral/blob/master/sample-config-advanced.json">complex sample configuration</link></para></listitem>
<listitem><para><link xlink:href="https://www.meshcommander.com/meshcentral2">Old homepage) with documentation link</link></para></listitem>
</itemizedlist>
'';
type = types.submodule {
freeformType = configFormat.type;
};
example = {
settings = {
WANonly = true;
Cert = "meshcentral.example.com";
TlsOffload = "10.0.0.2,fd42::2";
Port = 4430;
};
domains."".certUrl = "https://meshcentral.example.com/";
};
};
};
config = mkIf cfg.enable {
services.meshcentral.settings.settings.autoBackup.backupPath = lib.mkDefault "/var/lib/meshcentral/backups";
systemd.services.meshcentral = {
wantedBy = ["multi-user.target"];
serviceConfig = {
ExecStart = "${cfg.package}/bin/meshcentral --datapath /var/lib/meshcentral --configfile ${configFile}";
DynamicUser = true;
StateDirectory = "meshcentral";
CacheDirectory = "meshcentral";
};
};
};
meta.maintainers = [ maintainers.lheckemann ];
}

View file

@ -0,0 +1,129 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.hqplayerd;
pkg = pkgs.hqplayerd;
# XXX: This is hard-coded in the distributed binary, don't try to change it.
stateDir = "/var/lib/hqplayer";
configDir = "/etc/hqplayer";
in
{
options = {
services.hqplayerd = {
enable = mkEnableOption "HQPlayer Embedded";
licenseFile = mkOption {
type = types.nullOr types.path;
default = null;
description = ''
Path to the HQPlayer license key file.
Without this, the service will run in trial mode and restart every 30
minutes.
'';
};
auth = {
username = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Username used for HQPlayer's WebUI.
Without this you will need to manually create the credentials after
first start by going to http://your.ip/8088/auth
'';
};
password = mkOption {
type = types.nullOr types.str;
default = null;
description = ''
Password used for HQPlayer's WebUI.
Without this you will need to manually create the credentials after
first start by going to http://your.ip/8088/auth
'';
};
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Open TCP port 8088 in the firewall for the server.
'';
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = (cfg.auth.username != null -> cfg.auth.password != null)
&& (cfg.auth.password != null -> cfg.auth.username != null);
message = "You must set either both services.hqplayer.auth.username and password, or neither.";
}
];
environment = {
etc = {
"hqplayer/hqplayerd4-key.xml" = mkIf (cfg.licenseFile != null) { source = cfg.licenseFile; };
"modules-load.d/taudio2.conf".source = "${pkg}/etc/modules-load.d/taudio2.conf";
};
systemPackages = [ pkg ];
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ 8088 ];
};
services.udev.packages = [ pkg ];
systemd = {
tmpfiles.rules = [
"d ${configDir} 0755 hqplayer hqplayer - -"
"d ${stateDir} 0755 hqplayer hqplayer - -"
"d ${stateDir}/home 0755 hqplayer hqplayer - -"
];
packages = [ pkg ];
services.hqplayerd = {
wantedBy = [ "multi-user.target" ];
after = [ "systemd-tmpfiles-setup.service" ];
environment.HOME = "${stateDir}/home";
unitConfig.ConditionPathExists = [ configDir stateDir ];
preStart = ''
cp -r "${pkg}/var/lib/hqplayer/web" "${stateDir}"
chmod -R u+wX "${stateDir}/web"
if [ ! -f "${configDir}/hqplayerd.xml" ]; then
echo "creating initial config file"
install -m 0644 "${pkg}/etc/hqplayer/hqplayerd.xml" "${configDir}/hqplayerd.xml"
fi
'' + optionalString (cfg.auth.username != null && cfg.auth.password != null) ''
${pkg}/bin/hqplayerd -s ${cfg.auth.username} ${cfg.auth.password}
'';
};
};
users.groups = {
hqplayer.gid = config.ids.gids.hqplayer;
};
users.users = {
hqplayer = {
description = "hqplayer daemon user";
extraGroups = [ "audio" ];
group = "hqplayer";
uid = config.ids.uids.hqplayer;
};
};
};
}

View file

@ -0,0 +1,19 @@
{ config, lib, pkgs, ... }:
with lib;
let
name = "networkaudiod";
cfg = config.services.networkaudiod;
in {
options = {
services.networkaudiod = {
enable = mkEnableOption "Networkaudiod (NAA)";
};
};
config = mkIf cfg.enable {
systemd.packages = [ pkgs.networkaudiod ];
systemd.services.networkaudiod.wantedBy = [ "multi-user.target" ];
};
}

View file

@ -7,28 +7,49 @@ let
cfg = config.services.postgresqlBackup; cfg = config.services.postgresqlBackup;
postgresqlBackupService = db: dumpCmd: postgresqlBackupService = db: dumpCmd:
{ let
compressSuffixes = {
"none" = "";
"gzip" = ".gz";
"zstd" = ".zstd";
};
compressSuffix = getAttr cfg.compression compressSuffixes;
compressCmd = getAttr cfg.compression {
"none" = "cat";
"gzip" = "${pkgs.gzip}/bin/gzip -c";
"zstd" = "${pkgs.zstd}/bin/zstd -c";
};
mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}";
curFile = mkSqlPath "" compressSuffix;
prevFile = mkSqlPath ".prev" compressSuffix;
prevFiles = map (mkSqlPath ".prev") (attrValues compressSuffixes);
inProgressFile = mkSqlPath ".in-progress" compressSuffix;
in {
enable = true; enable = true;
description = "Backup of ${db} database(s)"; description = "Backup of ${db} database(s)";
requires = [ "postgresql.service" ]; requires = [ "postgresql.service" ];
path = [ pkgs.coreutils pkgs.gzip config.services.postgresql.package ]; path = [ pkgs.coreutils config.services.postgresql.package ];
script = '' script = ''
set -e -o pipefail set -e -o pipefail
umask 0077 # ensure backup is only readable by postgres user umask 0077 # ensure backup is only readable by postgres user
if [ -e ${cfg.location}/${db}.sql.gz ]; then if [ -e ${curFile} ]; then
mv ${cfg.location}/${db}.sql.gz ${cfg.location}/${db}.prev.sql.gz rm -f ${toString prevFiles}
mv ${curFile} ${prevFile}
fi fi
${dumpCmd} | \ ${dumpCmd} \
gzip -c > ${cfg.location}/${db}.in-progress.sql.gz | ${compressCmd} \
> ${inProgressFile}
mv ${cfg.location}/${db}.in-progress.sql.gz ${cfg.location}/${db}.sql.gz mv ${inProgressFile} ${curFile}
''; '';
serviceConfig = { serviceConfig = {
@ -87,7 +108,7 @@ in {
default = "/var/backup/postgresql"; default = "/var/backup/postgresql";
type = types.path; type = types.path;
description = '' description = ''
Location to put the gzipped PostgreSQL database dumps. Path of directory where the PostgreSQL database dumps will be placed.
''; '';
}; };
@ -101,6 +122,14 @@ in {
when no databases where specified. when no databases where specified.
''; '';
}; };
compression = mkOption {
type = types.enum ["none" "gzip" "zstd"];
default = "gzip";
description = ''
The type of compression to use on the generated database dump.
'';
};
}; };
}; };

View file

@ -52,7 +52,7 @@ let
use_template = mkOption { use_template = mkOption {
description = "Names of the templates to use for this dataset."; description = "Names of the templates to use for this dataset.";
type = types.listOf (types.enum (attrNames cfg.templates)); type = types.listOf (types.enum (attrNames cfg.templates));
default = []; default = [ ];
}; };
useTemplate = use_template; useTemplate = use_template;
@ -70,116 +70,127 @@ let
processChildrenOnly = process_children_only; processChildrenOnly = process_children_only;
}; };
# Extract pool names from configured datasets # Extract unique dataset names
pools = unique (map (d: head (builtins.match "([^/]+).*" d)) (attrNames cfg.datasets)); datasets = unique (attrNames cfg.datasets);
configFile = let # Function to build "zfs allow" and "zfs unallow" commands for the
mkValueString = v: # filesystems we've delegated permissions to.
if builtins.isList v then concatStringsSep "," v buildAllowCommand = zfsAction: permissions: dataset: lib.escapeShellArgs [
else generators.mkValueStringDefault {} v; # Here we explicitly use the booted system to guarantee the stable API needed by ZFS
"-+/run/booted-system/sw/bin/zfs"
zfsAction
"sanoid"
(concatStringsSep "," permissions)
dataset
];
mkKeyValue = k: v: if v == null then "" configFile =
else if k == "processChildrenOnly" then "" let
else if k == "useTemplate" then "" mkValueString = v:
else generators.mkKeyValueDefault { inherit mkValueString; } "=" k v; if builtins.isList v then concatStringsSep "," v
in generators.toINI { inherit mkKeyValue; } cfg.settings; else generators.mkValueStringDefault { } v;
in { mkKeyValue = k: v:
if v == null then ""
else if k == "processChildrenOnly" then ""
else if k == "useTemplate" then ""
else generators.mkKeyValueDefault { inherit mkValueString; } "=" k v;
in
generators.toINI { inherit mkKeyValue; } cfg.settings;
# Interface in
{
options.services.sanoid = { # Interface
enable = mkEnableOption "Sanoid ZFS snapshotting service";
interval = mkOption { options.services.sanoid = {
type = types.str; enable = mkEnableOption "Sanoid ZFS snapshotting service";
default = "hourly";
example = "daily";
description = ''
Run sanoid at this interval. The default is to run hourly.
The format is described in interval = mkOption {
<citerefentry><refentrytitle>systemd.time</refentrytitle> type = types.str;
<manvolnum>7</manvolnum></citerefentry>. default = "hourly";
''; example = "daily";
}; description = ''
Run sanoid at this interval. The default is to run hourly.
datasets = mkOption { The format is described in
type = types.attrsOf (types.submodule ({config, options, ...}: { <citerefentry><refentrytitle>systemd.time</refentrytitle>
freeformType = datasetSettingsType; <manvolnum>7</manvolnum></citerefentry>.
options = commonOptions // datasetOptions; '';
config.use_template = mkAliasDefinitions (mkDefault options.useTemplate or {});
config.process_children_only = mkAliasDefinitions (mkDefault options.processChildrenOnly or {});
}));
default = {};
description = "Datasets to snapshot.";
};
templates = mkOption {
type = types.attrsOf (types.submodule {
freeformType = datasetSettingsType;
options = commonOptions;
});
default = {};
description = "Templates for datasets.";
};
settings = mkOption {
type = types.attrsOf datasetSettingsType;
description = ''
Free-form settings written directly to the config file. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/blob/master/sanoid.defaults.conf"/>
for allowed values.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--verbose" "--readonly" "--debug" ];
description = ''
Extra arguments to pass to sanoid. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/#sanoid-command-line-options"/>
for allowed options.
'';
};
}; };
# Implementation datasets = mkOption {
type = types.attrsOf (types.submodule ({ config, options, ... }: {
config = mkIf cfg.enable { freeformType = datasetSettingsType;
services.sanoid.settings = mkMerge [ options = commonOptions // datasetOptions;
(mapAttrs' (d: v: nameValuePair ("template_" + d) v) cfg.templates) config.use_template = mkAliasDefinitions (mkDefault options.useTemplate or { });
(mapAttrs (d: v: v) cfg.datasets) config.process_children_only = mkAliasDefinitions (mkDefault options.processChildrenOnly or { });
]; }));
default = { };
systemd.services.sanoid = { description = "Datasets to snapshot.";
description = "Sanoid snapshot service";
serviceConfig = {
ExecStartPre = map (pool: lib.escapeShellArgs [
"+/run/booted-system/sw/bin/zfs" "allow"
"sanoid" "snapshot,mount,destroy" pool
]) pools;
ExecStart = lib.escapeShellArgs ([
"${pkgs.sanoid}/bin/sanoid"
"--cron"
"--configdir" (pkgs.writeTextDir "sanoid.conf" configFile)
] ++ cfg.extraArgs);
ExecStopPost = map (pool: lib.escapeShellArgs [
"+/run/booted-system/sw/bin/zfs" "unallow" "sanoid" pool
]) pools;
User = "sanoid";
Group = "sanoid";
DynamicUser = true;
RuntimeDirectory = "sanoid";
CacheDirectory = "sanoid";
};
# Prevents missing snapshots during DST changes
environment.TZ = "UTC";
after = [ "zfs.target" ];
startAt = cfg.interval;
};
}; };
meta.maintainers = with maintainers; [ lopsided98 ]; templates = mkOption {
} type = types.attrsOf (types.submodule {
freeformType = datasetSettingsType;
options = commonOptions;
});
default = { };
description = "Templates for datasets.";
};
settings = mkOption {
type = types.attrsOf datasetSettingsType;
description = ''
Free-form settings written directly to the config file. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/blob/master/sanoid.defaults.conf"/>
for allowed values.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--verbose" "--readonly" "--debug" ];
description = ''
Extra arguments to pass to sanoid. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/#sanoid-command-line-options"/>
for allowed options.
'';
};
};
# Implementation
config = mkIf cfg.enable {
services.sanoid.settings = mkMerge [
(mapAttrs' (d: v: nameValuePair ("template_" + d) v) cfg.templates)
(mapAttrs (d: v: v) cfg.datasets)
];
systemd.services.sanoid = {
description = "Sanoid snapshot service";
serviceConfig = {
ExecStartPre = (map (buildAllowCommand "allow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStopPost = (map (buildAllowCommand "unallow" [ "snapshot" "mount" "destroy" ]) datasets);
ExecStart = lib.escapeShellArgs ([
"${pkgs.sanoid}/bin/sanoid"
"--cron"
"--configdir"
(pkgs.writeTextDir "sanoid.conf" configFile)
] ++ cfg.extraArgs);
User = "sanoid";
Group = "sanoid";
DynamicUser = true;
RuntimeDirectory = "sanoid";
CacheDirectory = "sanoid";
};
# Prevents missing snapshots during DST changes
environment.TZ = "UTC";
after = [ "zfs.target" ];
startAt = cfg.interval;
};
};
meta.maintainers = with maintainers; [ lopsided98 ];
}

View file

@ -5,212 +5,315 @@ with lib;
let let
cfg = config.services.syncoid; cfg = config.services.syncoid;
# Extract pool names of local datasets (ones that don't contain "@") that # Extract local dasaset names (so no datasets containing "@")
# have the specified type (either "source" or "target") localDatasetName = d: optionals (d != null) (
getPools = type: unique (map (d: head (builtins.match "([^/]+).*" d)) ( let m = builtins.match "([^/@]+[^@]*)" d; in
# Filter local datasets optionals (m != null) m
filter (d: !hasInfix "@" d) );
# Get datasets of the specified type
(catAttrs type (attrValues cfg.commands))
));
in {
# Interface # Escape as required by: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
escapeUnitName = name:
lib.concatMapStrings (s: if lib.isList s then "-" else s)
(builtins.split "[^a-zA-Z0-9_.\\-]+" name);
options.services.syncoid = { # Function to build "zfs allow" and "zfs unallow" commands for the
enable = mkEnableOption "Syncoid ZFS synchronization service"; # filesystems we've delegated permissions to.
buildAllowCommand = zfsAction: permissions: dataset: lib.escapeShellArgs [
# Here we explicitly use the booted system to guarantee the stable API needed by ZFS
"-+/run/booted-system/sw/bin/zfs"
zfsAction
cfg.user
(concatStringsSep "," permissions)
dataset
];
in
{
interval = mkOption { # Interface
type = types.str;
default = "hourly";
example = "*-*-* *:15:00";
description = ''
Run syncoid at this interval. The default is to run hourly.
The format is described in options.services.syncoid = {
<citerefentry><refentrytitle>systemd.time</refentrytitle> enable = mkEnableOption "Syncoid ZFS synchronization service";
<manvolnum>7</manvolnum></citerefentry>.
'';
};
user = mkOption { interval = mkOption {
type = types.str; type = types.str;
default = "syncoid"; default = "hourly";
example = "backup"; example = "*-*-* *:15:00";
description = '' description = ''
The user for the service. ZFS privilege delegation will be Run syncoid at this interval. The default is to run hourly.
automatically configured for any local pools used by syncoid if this
option is set to a user other than root. The user will be given the
"hold" and "send" privileges on any pool that has datasets being sent
and the "create", "mount", "receive", and "rollback" privileges on
any pool that has datasets being received.
'';
};
group = mkOption { The format is described in
type = types.str; <citerefentry><refentrytitle>systemd.time</refentrytitle>
default = "syncoid"; <manvolnum>7</manvolnum></citerefentry>.
example = "backup"; '';
description = "The group for the service."; };
};
sshKey = mkOption { user = mkOption {
type = types.nullOr types.path; type = types.str;
# Prevent key from being copied to store default = "syncoid";
apply = mapNullable toString; example = "backup";
default = null; description = ''
description = '' The user for the service. ZFS privilege delegation will be
SSH private key file to use to login to the remote system. Can be automatically configured for any local pools used by syncoid if this
overridden in individual commands. option is set to a user other than root. The user will be given the
''; "hold" and "send" privileges on any pool that has datasets being sent
}; and the "create", "mount", "receive", and "rollback" privileges on
any pool that has datasets being received.
'';
};
commonArgs = mkOption { group = mkOption {
type = types.listOf types.str; type = types.str;
default = []; default = "syncoid";
example = [ "--no-sync-snap" ]; example = "backup";
description = '' description = "The group for the service.";
Arguments to add to every syncoid command, unless disabled for that };
command. See
<link xlink:href="https://github.com/jimsalterjrs/sanoid/#syncoid-command-line-options"/>
for available options.
'';
};
commands = mkOption { sshKey = mkOption {
type = types.attrsOf (types.submodule ({ name, ... }: { type = types.nullOr types.path;
options = { # Prevent key from being copied to store
source = mkOption { apply = mapNullable toString;
type = types.str; default = null;
example = "pool/dataset"; description = ''
description = '' SSH private key file to use to login to the remote system. Can be
Source ZFS dataset. Can be either local or remote. Defaults to overridden in individual commands.
the attribute name. '';
''; };
};
target = mkOption { commonArgs = mkOption {
type = types.str; type = types.listOf types.str;
example = "user@server:pool/dataset"; default = [ ];
description = '' example = [ "--no-sync-snap" ];
Target ZFS dataset. Can be either local description = ''
(<replaceable>pool/dataset</replaceable>) or remote Arguments to add to every syncoid command, unless disabled for that
(<replaceable>user@server:pool/dataset</replaceable>). command. See
''; <link xlink:href="https://github.com/jimsalterjrs/sanoid/#syncoid-command-line-options"/>
}; for available options.
'';
};
recursive = mkOption { service = mkOption {
type = types.bool; type = types.attrs;
default = false; default = { };
description = '' description = ''
Whether to also transfer child datasets. Systemd configuration common to all syncoid services.
''; '';
}; };
sshKey = mkOption { commands = mkOption {
type = types.nullOr types.path; type = types.attrsOf (types.submodule ({ name, ... }: {
# Prevent key from being copied to store options = {
apply = mapNullable toString; source = mkOption {
description = '' type = types.str;
SSH private key file to use to login to the remote system. example = "pool/dataset";
Defaults to <option>services.syncoid.sshKey</option> option. description = ''
''; Source ZFS dataset. Can be either local or remote. Defaults to
}; the attribute name.
'';
sendOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "Lc e";
description = ''
Advanced options to pass to zfs send. Options are specified
without their leading dashes and separated by spaces.
'';
};
recvOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "ux recordsize o compression=lz4";
description = ''
Advanced options to pass to zfs recv. Options are specified
without their leading dashes and separated by spaces.
'';
};
useCommonArgs = mkOption {
type = types.bool;
default = true;
description = ''
Whether to add the configured common arguments to this command.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [];
example = [ "--sshport 2222" ];
description = "Extra syncoid arguments for this command.";
};
}; };
config = {
source = mkDefault name; target = mkOption {
sshKey = mkDefault cfg.sshKey; type = types.str;
example = "user@server:pool/dataset";
description = ''
Target ZFS dataset. Can be either local
(<replaceable>pool/dataset</replaceable>) or remote
(<replaceable>user@server:pool/dataset</replaceable>).
'';
}; };
}));
default = {}; recursive = mkEnableOption ''the transfer of child datasets'';
example = literalExample ''
sshKey = mkOption {
type = types.nullOr types.path;
# Prevent key from being copied to store
apply = mapNullable toString;
description = ''
SSH private key file to use to login to the remote system.
Defaults to <option>services.syncoid.sshKey</option> option.
'';
};
sendOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "Lc e";
description = ''
Advanced options to pass to zfs send. Options are specified
without their leading dashes and separated by spaces.
'';
};
recvOptions = mkOption {
type = types.separatedString " ";
default = "";
example = "ux recordsize o compression=lz4";
description = ''
Advanced options to pass to zfs recv. Options are specified
without their leading dashes and separated by spaces.
'';
};
useCommonArgs = mkOption {
type = types.bool;
default = true;
description = ''
Whether to add the configured common arguments to this command.
'';
};
service = mkOption {
type = types.attrs;
default = { };
description = ''
Systemd configuration specific to this syncoid service.
'';
};
extraArgs = mkOption {
type = types.listOf types.str;
default = [ ];
example = [ "--sshport 2222" ];
description = "Extra syncoid arguments for this command.";
};
};
config = {
source = mkDefault name;
sshKey = mkDefault cfg.sshKey;
};
}));
default = { };
example = literalExample ''
{
"pool/test".target = "root@target:pool/test";
}
'';
description = "Syncoid commands to run.";
};
};
# Implementation
config = mkIf cfg.enable {
users = {
users = mkIf (cfg.user == "syncoid") {
syncoid = {
group = cfg.group;
isSystemUser = true;
# For syncoid to be able to create /var/lib/syncoid/.ssh/
# and to use custom ssh_config or known_hosts.
home = "/var/lib/syncoid";
createHome = false;
};
};
groups = mkIf (cfg.group == "syncoid") {
syncoid = { };
};
};
systemd.services = mapAttrs'
(name: c:
nameValuePair "syncoid-${escapeUnitName name}" (mkMerge [
{ {
"pool/test".target = "root@target:pool/test"; description = "Syncoid ZFS synchronization from ${c.source} to ${c.target}";
after = [ "zfs.target" ];
startAt = cfg.interval;
# syncoid may need zpool to get feature@extensible_dataset
path = [ "/run/booted-system/sw/bin/" ];
serviceConfig = {
ExecStartPre =
# Permissions snapshot and destroy are in case --no-sync-snap is not used
(map (buildAllowCommand "allow" [ "bookmark" "hold" "send" "snapshot" "destroy" ]) (localDatasetName c.source)) ++
(map (buildAllowCommand "allow" [ "create" "mount" "receive" "rollback" ]) (localDatasetName c.target));
ExecStopPost =
# Permissions snapshot and destroy are in case --no-sync-snap is not used
(map (buildAllowCommand "unallow" [ "bookmark" "hold" "send" "snapshot" "destroy" ]) (localDatasetName c.source)) ++
(map (buildAllowCommand "unallow" [ "create" "mount" "receive" "rollback" ]) (localDatasetName c.target));
ExecStart = lib.escapeShellArgs ([ "${pkgs.sanoid}/bin/syncoid" ]
++ optionals c.useCommonArgs cfg.commonArgs
++ optional c.recursive "-r"
++ optionals (c.sshKey != null) [ "--sshkey" c.sshKey ]
++ c.extraArgs
++ [
"--sendoptions"
c.sendOptions
"--recvoptions"
c.recvOptions
"--no-privilege-elevation"
c.source
c.target
]);
User = cfg.user;
Group = cfg.group;
StateDirectory = [ "syncoid" ];
StateDirectoryMode = "700";
# Prevent SSH control sockets of different syncoid services from interfering
PrivateTmp = true;
# Permissive access to /proc because syncoid
# calls ps(1) to detect ongoing `zfs receive`.
ProcSubset = "all";
ProtectProc = "default";
# The following options are only for optimizing:
# systemd-analyze security | grep syncoid-'*'
AmbientCapabilities = "";
CapabilityBoundingSet = "";
DeviceAllow = [ "/dev/zfs" ];
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateNetwork = mkDefault false;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
RootDirectory = "/run/syncoid/${escapeUnitName name}";
RootDirectoryStartOnly = true;
BindPaths = [ "/dev/zfs" ];
BindReadOnlyPaths = [ builtins.storeDir "/etc" "/run" "/bin/sh" ];
# Avoid useless mounting of RootDirectory= in the own RootDirectory= of ExecStart='s mount namespace.
InaccessiblePaths = [ "-+/run/syncoid/${escapeUnitName name}" ];
MountAPIVFS = true;
# Create RootDirectory= in the host's mount namespace.
RuntimeDirectory = [ "syncoid/${escapeUnitName name}" ];
RuntimeDirectoryMode = "700";
SystemCallFilter = [
"@system-service"
# Groups in @system-service which do not contain a syscall listed by:
# perf stat -x, 2>perf.log -e 'syscalls:sys_enter_*' syncoid …
# awk >perf.syscalls -F "," '$1 > 0 {sub("syscalls:sys_enter_","",$3); print $3}' perf.log
# systemd-analyze syscall-filter | grep -v -e '#' | sed -e ':loop; /^[^ ]/N; s/\n //; t loop' | grep $(printf ' -e \\<%s\\>' $(cat perf.syscalls)) | cut -f 1 -d ' '
"~@aio"
"~@chown"
"~@keyring"
"~@memlock"
"~@privileged"
"~@resources"
"~@setuid"
"~@timer"
];
SystemCallArchitectures = "native";
# This is for BindPaths= and BindReadOnlyPaths=
# to allow traversal of directories they create in RootDirectory=.
UMask = "0066";
};
} }
''; cfg.service
description = "Syncoid commands to run."; c.service
}; ]))
}; cfg.commands;
};
# Implementation meta.maintainers = with maintainers; [ julm lopsided98 ];
}
config = mkIf cfg.enable {
users = {
users = mkIf (cfg.user == "syncoid") {
syncoid = {
group = cfg.group;
isSystemUser = true;
};
};
groups = mkIf (cfg.group == "syncoid") {
syncoid = {};
};
};
systemd.services.syncoid = {
description = "Syncoid ZFS synchronization service";
script = concatMapStringsSep "\n" (c: lib.escapeShellArgs
([ "${pkgs.sanoid}/bin/syncoid" ]
++ (optionals c.useCommonArgs cfg.commonArgs)
++ (optional c.recursive "-r")
++ (optionals (c.sshKey != null) [ "--sshkey" c.sshKey ])
++ c.extraArgs
++ [ "--sendoptions" c.sendOptions
"--recvoptions" c.recvOptions
"--no-privilege-elevation"
c.source c.target
])) (attrValues cfg.commands);
after = [ "zfs.target" ];
serviceConfig = {
ExecStartPre = let
allowCmd = permissions: pool: lib.escapeShellArgs [
"+/run/booted-system/sw/bin/zfs" "allow"
cfg.user (concatStringsSep "," permissions) pool
];
in
(map (allowCmd [ "hold" "send" "snapshot" "destroy" ]) (getPools "source")) ++
(map (allowCmd [ "create" "mount" "receive" "rollback" ]) (getPools "target"));
User = cfg.user;
Group = cfg.group;
};
startAt = cfg.interval;
};
};
meta.maintainers = with maintainers; [ lopsided98 ];
}

View file

@ -279,7 +279,7 @@ let
src_plan = plan; src_plan = plan;
tsformat = timestampFormat; tsformat = timestampFormat;
zend_delay = toString sendDelay; zend_delay = toString sendDelay;
} // fold (a: b: a // b) {} ( } // foldr (a: b: a // b) {} (
map mkDestAttrs (builtins.attrValues destinations) map mkDestAttrs (builtins.attrValues destinations)
); );

View file

@ -60,6 +60,45 @@ in {
sha256 = "02r440xcdsgi137k5lmmvp0z5w5fmk8g9mysq5pnysq1wl8sj6mw"; sha256 = "02r440xcdsgi137k5lmmvp0z5w5fmk8g9mysq5pnysq1wl8sj6mw";
}; };
}; };
corefile = mkOption {
description = ''
Custom coredns corefile configuration.
See: <link xlink:href="https://coredns.io/manual/toc/#configuration"/>.
'';
type = types.str;
default = ''
.:${toString ports.dns} {
errors
health :${toString ports.health}
kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :${toString ports.metrics}
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}'';
defaultText = ''
.:${toString ports.dns} {
errors
health :${toString ports.health}
kubernetes ''${config.services.kubernetes.addons.dns.clusterDomain} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :${toString ports.metrics}
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}'';
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
@ -151,20 +190,7 @@ in {
namespace = "kube-system"; namespace = "kube-system";
}; };
data = { data = {
Corefile = ".:${toString ports.dns} { Corefile = cfg.corefile;
errors
health :${toString ports.health}
kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :${toString ports.metrics}
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}";
}; };
}; };

View file

@ -189,7 +189,7 @@ in
# manually paste it in place. Just symlink. # manually paste it in place. Just symlink.
# otherwise, create the target file, ready for users to insert the token # otherwise, create the target file, ready for users to insert the token
mkdir -p $(dirname ${certmgrAPITokenPath}) mkdir -p "$(dirname "${certmgrAPITokenPath}")"
if [ -f "${cfsslAPITokenPath}" ]; then if [ -f "${cfsslAPITokenPath}" ]; then
ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}" ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
else else

View file

@ -339,6 +339,9 @@ in
<literal>CI_SERVER_URL=&lt;CI server URL&gt;</literal> <literal>CI_SERVER_URL=&lt;CI server URL&gt;</literal>
<literal>REGISTRATION_TOKEN=&lt;registration secret&gt;</literal> <literal>REGISTRATION_TOKEN=&lt;registration secret&gt;</literal>
WARNING: make sure to use quoted absolute path,
or it is going to be copied to Nix Store.
''; '';
}; };
registrationFlags = mkOption { registrationFlags = mkOption {
@ -523,7 +526,10 @@ in
}; };
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
warnings = optional (cfg.configFile != null) "services.gitlab-runner.`configFile` is deprecated, please use services.gitlab-runner.`services`."; warnings = (mapAttrsToList
(n: v: "services.gitlab-runner.services.${n}.`registrationConfigFile` points to a file in Nix Store. You should use quoted absolute path to prevent this.")
(filterAttrs (n: v: isStorePath v.registrationConfigFile) cfg.services))
++ optional (cfg.configFile != null) "services.gitlab-runner.`configFile` is deprecated, please use services.gitlab-runner.`services`.";
environment.systemPackages = [ cfg.package ]; environment.systemPackages = [ cfg.package ];
systemd.services.gitlab-runner = { systemd.services.gitlab-runner = {
description = "Gitlab Runner"; description = "Gitlab Runner";

View file

@ -293,7 +293,8 @@ in
# Note: when changing the default, make it conditional on # Note: when changing the default, make it conditional on
# system.stateVersion to maintain compatibility with existing # system.stateVersion to maintain compatibility with existing
# systems! # systems!
mkDefault (if versionAtLeast config.system.stateVersion "20.03" then pkgs.postgresql_11 mkDefault (if versionAtLeast config.system.stateVersion "21.11" then pkgs.postgresql_13
else if versionAtLeast config.system.stateVersion "20.03" then pkgs.postgresql_11
else if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6 else if versionAtLeast config.system.stateVersion "17.09" then pkgs.postgresql_9_6
else throw "postgresql_9_5 was removed, please upgrade your postgresql version."); else throw "postgresql_9_5 was removed, please upgrade your postgresql version.");

View file

@ -272,7 +272,7 @@ in {
} }
(mkIf (cfg.bind != null) { bind = cfg.bind; }) (mkIf (cfg.bind != null) { bind = cfg.bind; })
(mkIf (cfg.unixSocket != null) { unixsocket = cfg.unixSocket; unixsocketperm = "${toString cfg.unixSocketPerm}"; }) (mkIf (cfg.unixSocket != null) { unixsocket = cfg.unixSocket; unixsocketperm = "${toString cfg.unixSocketPerm}"; })
(mkIf (cfg.slaveOf != null) { slaveof = "${cfg.slaveOf.ip} ${cfg.slaveOf.port}"; }) (mkIf (cfg.slaveOf != null) { slaveof = "${cfg.slaveOf.ip} ${toString cfg.slaveOf.port}"; })
(mkIf (cfg.masterAuth != null) { masterauth = cfg.masterAuth; }) (mkIf (cfg.masterAuth != null) { masterauth = cfg.masterAuth; })
(mkIf (cfg.requirePass != null) { requirepass = cfg.requirePass; }) (mkIf (cfg.requirePass != null) { requirepass = cfg.requirePass; })
]; ];

View file

@ -5,8 +5,8 @@
with lib; with lib;
{ {
meta = { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
###### interface ###### interface

View file

@ -266,5 +266,7 @@ in
} // mapAttrs' appConfigToINICompatible cfg.appConfig); } // mapAttrs' appConfigToINICompatible cfg.appConfig);
}; };
meta.maintainers = with lib.maintainers; [ ]; meta = with lib; {
maintainers = with maintainers; [ ] ++ teams.pantheon.members;
};
} }

View file

@ -18,8 +18,8 @@ in
"") "")
]; ];
meta = { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
###### interface ###### interface

View file

@ -6,8 +6,8 @@ with lib;
{ {
meta = { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
###### interface ###### interface

View file

@ -4,7 +4,10 @@ with lib;
let let
pkg = pkgs.sane-backends; pkg = pkgs.sane-backends.override {
scanSnapDriversUnfree = config.hardware.sane.drivers.scanSnap.enable;
scanSnapDriversPackage = config.hardware.sane.drivers.scanSnap.package;
};
sanedConf = pkgs.writeTextFile { sanedConf = pkgs.writeTextFile {
name = "saned.conf"; name = "saned.conf";
@ -98,6 +101,28 @@ in
''; '';
}; };
hardware.sane.drivers.scanSnap.enable = mkOption {
type = types.bool;
default = false;
example = true;
description = ''
Whether to enable drivers for the Fujitsu ScanSnap scanners.
The driver files are unfree and extracted from the Windows driver image.
'';
};
hardware.sane.drivers.scanSnap.package = mkOption {
type = types.package;
default = pkgs.sane-drivers.epjitsu;
description = ''
Epjitsu driver package to use. Useful if you want to extract the driver files yourself.
The process is described in the <literal>/etc/sane.d/epjitsu.conf</literal> file in
the <literal>sane-backends</literal> package.
'';
};
services.saned.enable = mkOption { services.saned.enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;

View file

@ -194,7 +194,7 @@ let
# We need to handle the last column specially here, because it's # We need to handle the last column specially here, because it's
# open-ended (command + args). # open-ended (command + args).
lines = [ labels labelDefaults ] ++ (map (l: init l ++ [""]) masterCf); lines = [ labels labelDefaults ] ++ (map (l: init l ++ [""]) masterCf);
in fold foldLine (genList (const 0) (length labels)) lines; in foldr foldLine (genList (const 0) (length labels)) lines;
# Pad a string with spaces from the right (opposite of fixedWidthString). # Pad a string with spaces from the right (opposite of fixedWidthString).
pad = width: str: let pad = width: str: let
@ -203,7 +203,7 @@ let
in str + optionalString (padWidth > 0) padding; in str + optionalString (padWidth > 0) padding;
# It's + 2 here, because that's the amount of spacing between columns. # It's + 2 here, because that's the amount of spacing between columns.
fullWidth = fold (width: acc: acc + width + 2) 0 maxWidths; fullWidth = foldr (width: acc: acc + width + 2) 0 maxWidths;
formatLine = line: concatStringsSep " " (zipListsWith pad maxWidths line); formatLine = line: concatStringsSep " " (zipListsWith pad maxWidths line);

View file

@ -523,19 +523,12 @@ in
''} ''}
# update all hooks' binary paths # update all hooks' binary paths
HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 6 -type f -wholename "*git/hooks/*") ${gitea}/bin/gitea admin regenerate hooks
if [ "$HOOKS" ]
then
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea}/bin/gitea,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
fi
# update command option in authorized_keys # update command option in authorized_keys
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ] if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
then then
sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gitea,${gitea}/bin/gitea,g' ${cfg.stateDir}/.ssh/authorized_keys ${gitea}/bin/gitea admin regenerate keys
fi fi
''; '';

View file

@ -30,8 +30,7 @@ in
apiSocket = mkOption { apiSocket = mkOption {
type = types.nullOr types.path; type = types.nullOr types.path;
default = null; default = "/run/klipper/api";
example = "/run/klipper/api";
description = "Path of the API socket to create."; description = "Path of the API socket to create.";
}; };

View file

@ -0,0 +1,66 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.libreddit;
args = concatStringsSep " " ([
"--port ${toString cfg.port}"
"--address ${cfg.address}"
] ++ optional cfg.redirect "--redirect-https");
in
{
options = {
services.libreddit = {
enable = mkEnableOption "Private front-end for Reddit";
address = mkOption {
default = "0.0.0.0";
example = "127.0.0.1";
type = types.str;
description = "The address to listen on";
};
port = mkOption {
default = 8080;
example = 8000;
type = types.port;
description = "The port to listen on";
};
redirect = mkOption {
type = types.bool;
default = false;
description = "Enable the redirecting to HTTPS";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open ports in the firewall for the libreddit web interface";
};
};
};
config = mkIf cfg.enable {
systemd.services.libreddit = {
description = "Private front-end for Reddit";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
DynamicUser = true;
ExecStart = "${pkgs.libreddit}/bin/libreddit ${args}";
AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
Restart = "on-failure";
RestartSec = "2s";
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
};
}

View file

@ -0,0 +1,135 @@
{ config, lib, pkgs, ... }:
with lib;
let
pkg = pkgs.moonraker;
cfg = config.services.moonraker;
format = pkgs.formats.ini {
# https://github.com/NixOS/nixpkgs/pull/121613#issuecomment-885241996
listToValue = l:
if builtins.length l == 1 then generators.mkValueStringDefault {} (head l)
else lib.concatMapStrings (s: "\n ${generators.mkValueStringDefault {} s}") l;
mkKeyValue = generators.mkKeyValueDefault {} ":";
};
in {
options = {
services.moonraker = {
enable = mkEnableOption "Moonraker, an API web server for Klipper";
klipperSocket = mkOption {
type = types.path;
default = config.services.klipper.apiSocket;
description = "Path to Klipper's API socket.";
};
stateDir = mkOption {
type = types.path;
default = "/var/lib/moonraker";
description = "The directory containing the Moonraker databases.";
};
configDir = mkOption {
type = types.path;
default = cfg.stateDir + "/config";
description = ''
The directory containing client-writable configuration files.
Clients will be able to edit files in this directory via the API. This directory must be writable.
'';
};
user = mkOption {
type = types.str;
default = "moonraker";
description = "User account under which Moonraker runs.";
};
group = mkOption {
type = types.str;
default = "moonraker";
description = "Group account under which Moonraker runs.";
};
address = mkOption {
type = types.str;
default = "127.0.0.1";
example = "0.0.0.0";
description = "The IP or host to listen on.";
};
port = mkOption {
type = types.ints.unsigned;
default = 7125;
description = "The port to listen on.";
};
settings = mkOption {
type = format.type;
default = { };
example = {
authorization = {
trusted_clients = [ "10.0.0.0/24" ];
cors_domains = [ "https://app.fluidd.xyz" ];
};
};
description = ''
Configuration for Moonraker. See the <link xlink:href="https://moonraker.readthedocs.io/en/latest/configuration/">documentation</link>
for supported values.
'';
};
};
};
config = mkIf cfg.enable {
warnings = optional (cfg.settings ? update_manager)
''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.'';
users.users = optionalAttrs (cfg.user == "moonraker") {
moonraker = {
group = cfg.group;
uid = config.ids.uids.moonraker;
};
};
users.groups = optionalAttrs (cfg.group == "moonraker") {
moonraker.gid = config.ids.gids.moonraker;
};
environment.etc."moonraker.cfg".source = let
forcedConfig = {
server = {
host = cfg.address;
port = cfg.port;
klippy_uds_address = cfg.klipperSocket;
config_path = cfg.configDir;
database_path = "${cfg.stateDir}/database";
};
};
fullConfig = recursiveUpdate cfg.settings forcedConfig;
in format.generate "moonraker.cfg" fullConfig;
systemd.tmpfiles.rules = [
"d '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
"d '${cfg.configDir}' - ${cfg.user} ${cfg.group} - -"
];
systemd.services.moonraker = {
description = "Moonraker, an API web server for Klipper";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]
++ optional config.services.klipper.enable "klipper.service";
# Moonraker really wants its own config to be writable...
script = ''
cp /etc/moonraker.cfg ${cfg.configDir}/moonraker-temp.cfg
chmod u+w ${cfg.configDir}/moonraker-temp.cfg
exec ${pkg}/bin/moonraker -c ${cfg.configDir}/moonraker-temp.cfg
'';
serviceConfig = {
WorkingDirectory = cfg.stateDir;
Group = cfg.group;
User = cfg.user;
};
};
};
}

View file

@ -0,0 +1,120 @@
{ config, pkgs, lib, ... }:
with lib;
let
dataDir = "/var/lib/mx-puppet-discord";
registrationFile = "${dataDir}/discord-registration.yaml";
cfg = config.services.mx-puppet-discord;
settingsFormat = pkgs.formats.json {};
settingsFile = settingsFormat.generate "mx-puppet-discord-config.json" cfg.settings;
in {
options = {
services.mx-puppet-discord = {
enable = mkEnableOption ''
mx-puppet-discord is a discord puppeting bridge for matrix.
It handles bridging private and group DMs, as well as Guilds (servers)
'';
settings = mkOption rec {
apply = recursiveUpdate default;
inherit (settingsFormat) type;
default = {
bridge.port = 8434;
presence = {
enabled = true;
interval = 500;
};
provisioning.whitelist = [ ];
relay.whitelist = [ ];
# variables are preceded by a colon.
namePatterns = {
user = ":name";
userOverride = ":displayname";
room = ":name";
group = ":name";
};
#defaults to sqlite but can be configured to use postgresql with
#connstring
database.filename = "${dataDir}/mx-puppet-discord/database.db";
logging = {
console = "info";
lineDateFormat = "MMM-D HH:mm:ss.SSS";
};
};
example = literalExample ''
{
bridge = {
bindAddress = "localhost";
domain = "example.com";
homeserverUrl = "https://example.com";
};
provisioning.whitelist = [ "@admin:example.com" ];
relay.whitelist = [ "@.*:example.com" ];
}
'';
description = ''
<filename>config.yaml</filename> configuration as a Nix attribute set.
Configuration options should match those described in
<link xlink:href="https://github.com/matrix-discord/mx-puppet-discord/blob/master/sample.config.yaml">
sample.config.yaml</link>.
'';
};
serviceDependencies = mkOption {
type = with types; listOf str;
default = optional config.services.matrix-synapse.enable "matrix-synapse.service";
description = ''
List of Systemd services to require and wait for when starting the application service.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.mx-puppet-discord = {
description = ''
mx-puppet-discord is a discord puppeting bridge for matrix.
It handles bridging private and group DMs, as well as Guilds (servers).
'';
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
after = [ "network-online.target" ] ++ cfg.serviceDependencies;
preStart = ''
# generate the appservice's registration file if absent
if [ ! -f '${registrationFile}' ]; then
${pkgs.mx-puppet-discord}/bin/mx-puppet-discord -r -c ${settingsFile} \
-f ${registrationFile}
fi
'';
serviceConfig = {
Type = "simple";
Restart = "always";
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
DynamicUser = true;
PrivateTmp = true;
WorkingDirectory = pkgs.mx-puppet-discord;
StateDirectory = baseNameOf dataDir;
UMask = 0027;
ExecStart = ''
${pkgs.mx-puppet-discord}/bin/mx-puppet-discord -c ${settingsFile}
'';
};
};
};
meta.maintainers = with maintainers; [ govanify ];
}

View file

@ -0,0 +1,326 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.nitter;
configFile = pkgs.writeText "nitter.conf" ''
${generators.toINI {
# String values need to be quoted
mkKeyValue = generators.mkKeyValueDefault {
mkValueString = v:
if isString v then "\"" + (strings.escape ["\""] (toString v)) + "\""
else generators.mkValueStringDefault {} v;
} " = ";
} (lib.recursiveUpdate {
Server = cfg.server;
Cache = cfg.cache;
Config = cfg.config // { hmacKey = "@hmac@"; };
Preferences = cfg.preferences;
} cfg.settings)}
'';
# `hmac` is a secret used for cryptographic signing of video URLs.
# Generate it on first launch, then copy configuration and replace
# `@hmac@` with this value.
# We are not using sed as it would leak the value in the command line.
preStart = pkgs.writers.writePython3 "nitter-prestart" {} ''
import os
import secrets
state_dir = os.environ.get("STATE_DIRECTORY")
if not os.path.isfile(f"{state_dir}/hmac"):
# Generate hmac on first launch
hmac = secrets.token_hex(32)
with open(f"{state_dir}/hmac", "w") as f:
f.write(hmac)
else:
# Load previously generated hmac
with open(f"{state_dir}/hmac", "r") as f:
hmac = f.read()
configFile = "${configFile}"
with open(configFile, "r") as f_in:
with open(f"{state_dir}/nitter.conf", "w") as f_out:
f_out.write(f_in.read().replace("@hmac@", hmac))
'';
in
{
options = {
services.nitter = {
enable = mkEnableOption "If enabled, start Nitter.";
server = {
address = mkOption {
type = types.str;
default = "0.0.0.0";
example = "127.0.0.1";
description = "The address to listen on.";
};
port = mkOption {
type = types.port;
default = 8080;
example = 8000;
description = "The port to listen on.";
};
https = mkOption {
type = types.bool;
default = false;
description = "Set secure attribute on cookies. Keep it disabled to enable cookies when not using HTTPS.";
};
httpMaxConnections = mkOption {
type = types.int;
default = 100;
description = "Maximum number of HTTP connections.";
};
staticDir = mkOption {
type = types.path;
default = "${pkgs.nitter}/share/nitter/public";
defaultText = "\${pkgs.nitter}/share/nitter/public";
description = "Path to the static files directory.";
};
title = mkOption {
type = types.str;
default = "nitter";
description = "Title of the instance.";
};
hostname = mkOption {
type = types.str;
default = "localhost";
example = "nitter.net";
description = "Hostname of the instance.";
};
};
cache = {
listMinutes = mkOption {
type = types.int;
default = 240;
description = "How long to cache list info (not the tweets, so keep it high).";
};
rssMinutes = mkOption {
type = types.int;
default = 10;
description = "How long to cache RSS queries.";
};
redisHost = mkOption {
type = types.str;
default = "localhost";
description = "Redis host.";
};
redisPort = mkOption {
type = types.port;
default = 6379;
description = "Redis port.";
};
redisConnections = mkOption {
type = types.int;
default = 20;
description = "Redis connection pool size.";
};
redisMaxConnections = mkOption {
type = types.int;
default = 30;
description = ''
Maximum number of connections to Redis.
New connections are opened when none are available, but if the
pool size goes above this, they are closed when released, do not
worry about this unless you receive tons of requests per second.
'';
};
};
config = {
base64Media = mkOption {
type = types.bool;
default = false;
description = "Use base64 encoding for proxied media URLs.";
};
tokenCount = mkOption {
type = types.int;
default = 10;
description = ''
Minimum amount of usable tokens.
Tokens are used to authorize API requests, but they expire after
~1 hour, and have a limit of 187 requests. The limit gets reset
every 15 minutes, and the pool is filled up so there is always at
least tokenCount usable tokens. Only increase this if you receive
major bursts all the time.
'';
};
};
preferences = {
replaceTwitter = mkOption {
type = types.str;
default = "";
example = "nitter.net";
description = "Replace Twitter links with links to this instance (blank to disable).";
};
replaceYouTube = mkOption {
type = types.str;
default = "";
example = "piped.kavin.rocks";
description = "Replace YouTube links with links to this instance (blank to disable).";
};
replaceInstagram = mkOption {
type = types.str;
default = "";
description = "Replace Instagram links with links to this instance (blank to disable).";
};
mp4Playback = mkOption {
type = types.bool;
default = true;
description = "Enable MP4 video playback.";
};
hlsPlayback = mkOption {
type = types.bool;
default = false;
description = "Enable HLS video streaming (requires JavaScript).";
};
proxyVideos = mkOption {
type = types.bool;
default = true;
description = "Proxy video streaming through the server (might be slow).";
};
muteVideos = mkOption {
type = types.bool;
default = false;
description = "Mute videos by default.";
};
autoplayGifs = mkOption {
type = types.bool;
default = true;
description = "Autoplay GIFs.";
};
theme = mkOption {
type = types.str;
default = "Nitter";
description = "Instance theme.";
};
infiniteScroll = mkOption {
type = types.bool;
default = false;
description = "Infinite scrolling (requires JavaScript, experimental!).";
};
stickyProfile = mkOption {
type = types.bool;
default = true;
description = "Make profile sidebar stick to top.";
};
bidiSupport = mkOption {
type = types.bool;
default = false;
description = "Support bidirectional text (makes clicking on tweets harder).";
};
hideTweetStats = mkOption {
type = types.bool;
default = false;
description = "Hide tweet stats (replies, retweets, likes).";
};
hideBanner = mkOption {
type = types.bool;
default = false;
description = "Hide profile banner.";
};
hidePins = mkOption {
type = types.bool;
default = false;
description = "Hide pinned tweets.";
};
hideReplies = mkOption {
type = types.bool;
default = false;
description = "Hide tweet replies.";
};
};
settings = mkOption {
type = types.attrs;
default = {};
description = ''
Add settings here to override NixOS module generated settings.
Check the official repository for the available settings:
https://github.com/zedeus/nitter/blob/master/nitter.conf
'';
};
redisCreateLocally = mkOption {
type = types.bool;
default = true;
description = "Configure local Redis server for Nitter.";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open ports in the firewall for Nitter web interface.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = !cfg.redisCreateLocally || (cfg.cache.redisHost == "localhost" && cfg.cache.redisPort == 6379);
message = "When services.nitter.redisCreateLocally is enabled, you need to use localhost:6379 as a cache server.";
}
];
systemd.services.nitter = {
description = "Nitter (An alternative Twitter front-end)";
wantedBy = [ "multi-user.target" ];
after = [ "syslog.target" "network.target" ];
serviceConfig = {
DynamicUser = true;
StateDirectory = "nitter";
Environment = [ "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf" ];
# Some parts of Nitter expect `public` folder in working directory,
# see https://github.com/zedeus/nitter/issues/414
WorkingDirectory = "${pkgs.nitter}/share/nitter";
ExecStart = "${pkgs.nitter}/bin/nitter";
ExecStartPre = "${preStart}";
AmbientCapabilities = lib.mkIf (cfg.server.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
Restart = "on-failure";
RestartSec = "5s";
};
};
services.redis = lib.mkIf (cfg.redisCreateLocally) {
enable = true;
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.server.port ];
};
};
}

View file

@ -458,7 +458,7 @@ in
description = "The flake reference to which <option>from></option> is to be rewritten."; description = "The flake reference to which <option>from></option> is to be rewritten.";
}; };
flake = mkOption { flake = mkOption {
type = types.unspecified; type = types.nullOr types.attrs;
default = null; default = null;
example = literalExample "nixpkgs"; example = literalExample "nixpkgs";
description = '' description = ''

View file

@ -33,6 +33,7 @@ let
"domain" "domain"
"dovecot" "dovecot"
"fritzbox" "fritzbox"
"influxdb"
"json" "json"
"jitsi" "jitsi"
"kea" "kea"

View file

@ -0,0 +1,34 @@
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.influxdb;
in
{
port = 9122;
extraOpts = {
sampleExpiry = mkOption {
type = types.str;
default = "5m";
example = "10m";
description = "How long a sample is valid for";
};
udpBindAddress = mkOption {
type = types.str;
default = ":9122";
example = "192.0.2.1:9122";
description = "Address on which to listen for udp packets";
};
};
serviceOpts = {
serviceConfig = {
RuntimeDirectory = "prometheus-influxdb-exporter";
ExecStart = ''
${pkgs.prometheus-influxdb-exporter}/bin/influxdb_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
--influxdb.sample-expiry ${cfg.sampleExpiry} ${concatStringsSep " " cfg.extraFlags}
'';
};
};
}

View file

@ -0,0 +1,100 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.litestream;
settingsFormat = pkgs.formats.yaml {};
in
{
options.services.litestream = {
enable = mkEnableOption "litestream";
package = mkOption {
description = "Package to use.";
default = pkgs.litestream;
defaultText = "pkgs.litestream";
type = types.package;
};
settings = mkOption {
description = ''
See the <link xlink:href="https://litestream.io/reference/config/">documentation</link>.
'';
type = settingsFormat.type;
example = {
dbs = [
{
path = "/var/lib/db1";
replicas = [
{
url = "s3://mybkt.litestream.io/db1";
}
];
}
];
};
};
environmentFile = mkOption {
type = types.nullOr types.path;
default = null;
example = "/run/secrets/litestream";
description = ''
Environment file as defined in <citerefentry>
<refentrytitle>systemd.exec</refentrytitle><manvolnum>5</manvolnum>
</citerefentry>.
Secrets may be passed to the service without adding them to the
world-readable Nix store, by specifying placeholder variables as
the option value in Nix and setting these variables accordingly in the
environment file.
By default, Litestream will perform environment variable expansion
within the config file before reading it. Any references to ''$VAR or
''${VAR} formatted variables will be replaced with their environment
variable values. If no value is set then it will be replaced with an
empty string.
<programlisting>
# Content of the environment file
LITESTREAM_ACCESS_KEY_ID=AKIAxxxxxxxxxxxxxxxx
LITESTREAM_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
</programlisting>
Note that this file needs to be available on the host on which
this exporter is running.
'';
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
environment.etc = {
"litestream.yml" = {
source = settingsFormat.generate "litestream-config.yaml" cfg.settings;
};
};
systemd.services.litestream = {
description = "Litestream";
wantedBy = [ "multi-user.target" ];
after = [ "networking.target" ];
serviceConfig = {
EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
ExecStart = "${cfg.package}/bin/litestream replicate";
Restart = "always";
User = "litestream";
Group = "litestream";
};
};
users.users.litestream = {
description = "Litestream user";
group = "litestream";
isSystemUser = true;
};
users.groups.litestream = {};
};
meta.doc = ./litestream.xml;
}

View file

@ -0,0 +1,65 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-litestream">
<title>Litestream</title>
<para>
<link xlink:href="https://litestream.io/">Litestream</link> is a standalone streaming
replication tool for SQLite.
</para>
<section xml:id="module-services-litestream-configuration">
<title>Configuration</title>
<para>
Litestream service is managed by a dedicated user named <literal>litestream</literal>
which needs permission to the database file. Here's an example config which gives
required permissions to access <link linkend="opt-services.grafana.database.path">
grafana database</link>:
<programlisting>
{ pkgs, ... }:
{
users.users.litestream.extraGroups = [ "grafana" ];
systemd.services.grafana.serviceConfig.ExecStartPost = "+" + pkgs.writeShellScript "grant-grafana-permissions" ''
timeout=10
while [ ! -f /var/lib/grafana/data/grafana.db ];
do
if [ "$timeout" == 0 ]; then
echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db."
exit 1
fi
sleep 1
((timeout--))
done
find /var/lib/grafana -type d -exec chmod -v 775 {} \;
find /var/lib/grafana -type f -exec chmod -v 660 {} \;
'';
services.litestream = {
enable = true;
environmentFile = "/run/secrets/litestream";
settings = {
dbs = [
{
path = "/var/lib/grafana/data/grafana.db";
replicas = [{
url = "s3://mybkt.litestream.io/grafana";
}];
}
];
};
};
}
</programlisting>
</para>
</section>
</chapter>

View file

@ -79,7 +79,7 @@ in
systemd.services = systemd.services =
lib.fold ( s : acc : acc // lib.foldr ( s : acc : acc //
{ {
"autossh-${s.name}" = "autossh-${s.name}" =
let let

View file

@ -10,8 +10,8 @@ let
birdBin = if variant == "bird6" then "bird6" else "bird"; birdBin = if variant == "bird6" then "bird6" else "bird";
birdc = if variant == "bird6" then "birdc6" else "birdc"; birdc = if variant == "bird6" then "birdc6" else "birdc";
descr = descr =
{ bird = "1.9.x with IPv4 suport"; { bird = "1.6.x with IPv4 support";
bird6 = "1.9.x with IPv6 suport"; bird6 = "1.6.x with IPv6 support";
bird2 = "2.x"; bird2 = "2.x";
}.${variant}; }.${variant};
in { in {

View file

@ -339,6 +339,8 @@ in
description = description =
'' ''
Whether to log rejected or dropped incoming connections. Whether to log rejected or dropped incoming connections.
Note: The logs are found in the kernel logs, i.e. dmesg
or journalctl -k.
''; '';
}; };
@ -350,6 +352,8 @@ in
Whether to log all rejected or dropped incoming packets. Whether to log all rejected or dropped incoming packets.
This tends to give a lot of log messages, so it's mostly This tends to give a lot of log messages, so it's mostly
useful for debugging. useful for debugging.
Note: The logs are found in the kernel logs, i.e. dmesg
or journalctl -k.
''; '';
}; };

View file

@ -4,8 +4,31 @@ with lib;
let let
cfg = config.networking.wireless.iwd; cfg = config.networking.wireless.iwd;
ini = pkgs.formats.ini { };
configFile = ini.generate "main.conf" cfg.settings;
in { in {
options.networking.wireless.iwd.enable = mkEnableOption "iwd"; options.networking.wireless.iwd = {
enable = mkEnableOption "iwd";
settings = mkOption {
type = ini.type;
default = { };
example = {
Settings.AutoConnect = true;
Network = {
EnableIPv6 = true;
RoutePriorityOffset = 300;
};
};
description = ''
Options passed to iwd.
See <link xlink:href="https://iwd.wiki.kernel.org/networkconfigurationsettings">here</link> for supported options.
'';
};
};
config = mkIf cfg.enable { config = mkIf cfg.enable {
assertions = [{ assertions = [{
@ -15,6 +38,8 @@ in {
''; '';
}]; }];
environment.etc."iwd/main.conf".source = configFile;
# for iwctl # for iwctl
environment.systemPackages = [ pkgs.iwd ]; environment.systemPackages = [ pkgs.iwd ];
@ -27,7 +52,10 @@ in {
linkConfig.NamePolicy = "keep kernel"; linkConfig.NamePolicy = "keep kernel";
}; };
systemd.services.iwd.wantedBy = [ "multi-user.target" ]; systemd.services.iwd = {
wantedBy = [ "multi-user.target" ];
restartTriggers = [ configFile ];
};
}; };
meta.maintainers = with lib.maintainers; [ mic92 dtzWill ]; meta.maintainers = with lib.maintainers; [ mic92 dtzWill ];

View file

@ -238,6 +238,10 @@ in
KEA_PIDFILE_DIR = "/run/kea"; KEA_PIDFILE_DIR = "/run/kea";
}; };
restartTriggers = [
ctrlAgentConfig
];
serviceConfig = { serviceConfig = {
ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.dhcp4.extraArgs}"; ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.dhcp4.extraArgs}";
KillMode = "process"; KillMode = "process";
@ -269,6 +273,10 @@ in
KEA_PIDFILE_DIR = "/run/kea"; KEA_PIDFILE_DIR = "/run/kea";
}; };
restartTriggers = [
dhcp4Config
];
serviceConfig = { serviceConfig = {
ExecStart = "${package}/bin/kea-dhcp4 -c /etc/kea/dhcp4-server.conf ${lib.escapeShellArgs cfg.dhcp4.extraArgs}"; ExecStart = "${package}/bin/kea-dhcp4 -c /etc/kea/dhcp4-server.conf ${lib.escapeShellArgs cfg.dhcp4.extraArgs}";
# Kea does not request capabilities by itself # Kea does not request capabilities by itself
@ -307,6 +315,10 @@ in
KEA_PIDFILE_DIR = "/run/kea"; KEA_PIDFILE_DIR = "/run/kea";
}; };
restartTriggers = [
dhcp6Config
];
serviceConfig = { serviceConfig = {
ExecStart = "${package}/bin/kea-dhcp6 -c /etc/kea/dhcp6-server.conf ${lib.escapeShellArgs cfg.dhcp6.extraArgs}"; ExecStart = "${package}/bin/kea-dhcp6 -c /etc/kea/dhcp6-server.conf ${lib.escapeShellArgs cfg.dhcp6.extraArgs}";
# Kea does not request capabilities by itself # Kea does not request capabilities by itself
@ -343,6 +355,10 @@ in
KEA_PIDFILE_DIR = "/run/kea"; KEA_PIDFILE_DIR = "/run/kea";
}; };
restartTriggers = [
dhcpDdnsConfig
];
serviceConfig = { serviceConfig = {
ExecStart = "${package}/bin/kea-dhcp-ddns -c /etc/kea/dhcp-ddns.conf ${lib.escapeShellArgs cfg.dhcp-ddns.extraArgs}"; ExecStart = "${package}/bin/kea-dhcp-ddns -c /etc/kea/dhcp-ddns.conf ${lib.escapeShellArgs cfg.dhcp-ddns.extraArgs}";
AmbientCapabilites = [ AmbientCapabilites = [

View file

@ -6,7 +6,6 @@ let
cfg = config.networking.networkmanager; cfg = config.networking.networkmanager;
basePackages = with pkgs; [ basePackages = with pkgs; [
crda
modemmanager modemmanager
networkmanager networkmanager
networkmanager-fortisslvpn networkmanager-fortisslvpn
@ -49,6 +48,7 @@ let
rc-manager = rc-manager =
if config.networking.resolvconf.enable then "resolvconf" if config.networking.resolvconf.enable then "resolvconf"
else "unmanaged"; else "unmanaged";
firewall-backend = cfg.firewallBackend;
}) })
(mkSection "keyfile" { (mkSection "keyfile" {
unmanaged-devices = unmanaged-devices =
@ -244,6 +244,15 @@ in {
''; '';
}; };
firewallBackend = mkOption {
type = types.enum [ "iptables" "nftables" "none" ];
default = "iptables";
description = ''
Which firewall backend should be used for configuring masquerading with shared mode.
If set to none, NetworkManager doesn't manage the configuration at all.
'';
};
logLevel = mkOption { logLevel = mkOption {
type = types.enum [ "OFF" "ERR" "WARN" "INFO" "DEBUG" "TRACE" ]; type = types.enum [ "OFF" "ERR" "WARN" "INFO" "DEBUG" "TRACE" ];
default = "WARN"; default = "WARN";
@ -404,6 +413,8 @@ in {
} }
]; ];
hardware.wirelessRegulatoryDatabase = true;
environment.etc = with pkgs; { environment.etc = with pkgs; {
"NetworkManager/NetworkManager.conf".source = configFile; "NetworkManager/NetworkManager.conf".source = configFile;

View file

@ -103,6 +103,7 @@ in
}]; }];
boot.blacklistedKernelModules = [ "ip_tables" ]; boot.blacklistedKernelModules = [ "ip_tables" ];
environment.systemPackages = [ pkgs.nftables ]; environment.systemPackages = [ pkgs.nftables ];
networking.networkmanager.firewallBackend = mkDefault "nftables";
systemd.services.nftables = { systemd.services.nftables = {
description = "nftables firewall"; description = "nftables firewall";
before = [ "network-pre.target" ]; before = [ "network-pre.target" ];

View file

@ -160,7 +160,7 @@ in
users.groups.nylon.gid = config.ids.gids.nylon; users.groups.nylon.gid = config.ids.gids.nylon;
systemd.services = fold (a: b: a // b) {} nylonUnits; systemd.services = foldr (a: b: a // b) {} nylonUnits;
}; };
} }

View file

@ -87,7 +87,7 @@ with lib;
}; };
config = mkIf (cfg != []) { config = mkIf (cfg != []) {
systemd.services = fold (a: b: a // b) {} ( systemd.services = foldr (a: b: a // b) {} (
mapAttrsToList (name: qtcfg: { mapAttrsToList (name: qtcfg: {
"quicktun-${name}" = { "quicktun-${name}" = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View file

@ -5,15 +5,16 @@ with lib;
let let
cfg = config.services.syncthing; cfg = config.services.syncthing;
defaultUser = "syncthing"; defaultUser = "syncthing";
defaultGroup = defaultUser;
devices = mapAttrsToList (name: device: { devices = mapAttrsToList (name: device: {
deviceID = device.id; deviceID = device.id;
inherit (device) name addresses introducer; inherit (device) name addresses introducer;
}) cfg.declarative.devices; }) cfg.devices;
folders = mapAttrsToList ( _: folder: { folders = mapAttrsToList ( _: folder: {
inherit (folder) path id label type; inherit (folder) path id label type;
devices = map (device: { deviceId = cfg.declarative.devices.${device}.id; }) folder.devices; devices = map (device: { deviceId = cfg.devices.${device}.id; }) folder.devices;
rescanIntervalS = folder.rescanInterval; rescanIntervalS = folder.rescanInterval;
fsWatcherEnabled = folder.watch; fsWatcherEnabled = folder.watch;
fsWatcherDelayS = folder.watchDelay; fsWatcherDelayS = folder.watchDelay;
@ -23,215 +24,218 @@ let
}) (filterAttrs ( }) (filterAttrs (
_: folder: _: folder:
folder.enable folder.enable
) cfg.declarative.folders); ) cfg.folders);
# get the api key by parsing the config.xml
getApiKey = pkgs.writers.writeDash "getAPIKey" ''
${pkgs.libxml2}/bin/xmllint \
--xpath 'string(configuration/gui/apikey)'\
${cfg.configDir}/config.xml
'';
updateConfig = pkgs.writers.writeDash "merge-syncthing-config" '' updateConfig = pkgs.writers.writeDash "merge-syncthing-config" ''
set -efu set -efu
# wait for syncthing port to open
until ${pkgs.curl}/bin/curl -Ss ${cfg.guiAddress} -o /dev/null; do
sleep 1
done
API_KEY=$(${getApiKey}) # get the api key by parsing the config.xml
OLD_CFG=$(${pkgs.curl}/bin/curl -Ss \ while
-H "X-API-Key: $API_KEY" \ ! api_key=$(${pkgs.libxml2}/bin/xmllint \
${cfg.guiAddress}/rest/system/config) --xpath 'string(configuration/gui/apikey)' \
${cfg.configDir}/config.xml)
do sleep 1; done
# generate the new config by merging with the nixos config options curl() {
NEW_CFG=$(echo "$OLD_CFG" | ${pkgs.jq}/bin/jq -s '.[] as $in | $in * { ${pkgs.curl}/bin/curl -sS -H "X-API-Key: $api_key" \
"devices": (${builtins.toJSON devices}${optionalString (! cfg.declarative.overrideDevices) " + $in.devices"}), --retry 1000 --retry-delay 1 --retry-all-errors \
"folders": (${builtins.toJSON folders}${optionalString (! cfg.declarative.overrideFolders) " + $in.folders"}) "$@"
}') }
# POST the new config to syncthing # query the old config
echo "$NEW_CFG" | ${pkgs.curl}/bin/curl -Ss \ old_cfg=$(curl ${cfg.guiAddress}/rest/config)
-H "X-API-Key: $API_KEY" \
${cfg.guiAddress}/rest/system/config -d @-
# restart syncthing after sending the new config # generate the new config by merging with the NixOS config options
${pkgs.curl}/bin/curl -Ss \ new_cfg=$(echo "$old_cfg" | ${pkgs.jq}/bin/jq -c '. * {
-H "X-API-Key: $API_KEY" \ "devices": (${builtins.toJSON devices}${optionalString (! cfg.overrideDevices) " + .devices"}),
-X POST \ "folders": (${builtins.toJSON folders}${optionalString (! cfg.overrideFolders) " + .folders"})
${cfg.guiAddress}/rest/system/restart } * ${builtins.toJSON cfg.extraOptions}')
# send the new config
curl -X PUT -d "$new_cfg" ${cfg.guiAddress}/rest/config
# restart Syncthing if required
if curl ${cfg.guiAddress}/rest/config/restart-required |
${pkgs.jq}/bin/jq -e .requiresRestart > /dev/null; then
curl -X POST ${cfg.guiAddress}/rest/system/restart
fi
''; '';
in { in {
###### interface ###### interface
options = { options = {
services.syncthing = { services.syncthing = {
enable = mkEnableOption '' enable = mkEnableOption
Syncthing - the self-hosted open-source alternative "Syncthing, a self-hosted open-source alternative to Dropbox and Bittorrent Sync";
to Dropbox and Bittorrent Sync. Initial interface will be
available on http://127.0.0.1:8384/.
'';
declarative = { cert = mkOption {
cert = mkOption { type = types.nullOr types.str;
type = types.nullOr types.str; default = null;
default = null; description = ''
description = '' Path to the <literal>cert.pem</literal> file, which will be copied into Syncthing's
Path to users cert.pem file, will be copied into the syncthing's <link linkend="opt-services.syncthing.configDir">configDir</link>.
<literal>configDir</literal> '';
''; };
};
key = mkOption { key = mkOption {
type = types.nullOr types.str; type = types.nullOr types.str;
default = null; default = null;
description = '' description = ''
Path to users key.pem file, will be copied into the syncthing's Path to the <literal>key.pem</literal> file, which will be copied into Syncthing's
<literal>configDir</literal> <link linkend="opt-services.syncthing.configDir">configDir</link>.
''; '';
}; };
overrideDevices = mkOption { overrideDevices = mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
description = '' description = ''
Whether to delete the devices which are not configured via the Whether to delete the devices which are not configured via the
<literal>declarative.devices</literal> option. <link linkend="opt-services.syncthing.devices">devices</link> option.
If set to false, devices added via the webinterface will If set to <literal>false</literal>, devices added via the web
persist but will have to be deleted manually. interface will persist and will have to be deleted manually.
''; '';
}; };
devices = mkOption { devices = mkOption {
default = {}; default = {};
description = '' description = ''
Peers/devices which syncthing should communicate with. Peers/devices which Syncthing should communicate with.
'';
example = { Note that you can still add devices manually, but those changes
bigbox = { will be reverted on restart if <link linkend="opt-services.syncthing.overrideDevices">overrideDevices</link>
id = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU"; is enabled.
addresses = [ "tcp://192.168.0.10:51820" ]; '';
}; example = {
bigbox = {
id = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU";
addresses = [ "tcp://192.168.0.10:51820" ];
}; };
type = types.attrsOf (types.submodule ({ name, ... }: { };
options = { type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
name = mkOption {
type = types.str;
default = name;
description = ''
Name of the device
'';
};
addresses = mkOption {
type = types.listOf types.str;
default = [];
description = ''
The addresses used to connect to the device.
If this is let empty, dynamic configuration is attempted
'';
};
id = mkOption {
type = types.str;
description = ''
The id of the other peer, this is mandatory. It's documented at
https://docs.syncthing.net/dev/device-ids.html
'';
};
introducer = mkOption {
type = types.bool;
default = false;
description = ''
If the device should act as an introducer and be allowed
to add folders on this computer.
'';
};
name = mkOption {
type = types.str;
default = name;
description = ''
The name of the device.
'';
}; };
}));
};
overrideFolders = mkOption { addresses = mkOption {
type = types.bool; type = types.listOf types.str;
default = true; default = [];
description = '' description = ''
Whether to delete the folders which are not configured via the The addresses used to connect to the device.
<literal>declarative.folders</literal> option. If this is left empty, dynamic configuration is attempted.
If set to false, folders added via the webinterface will persist '';
but will have to be deleted manually. };
'';
};
folders = mkOption { id = mkOption {
default = {}; type = types.str;
description = '' description = ''
folders which should be shared by syncthing. The device ID. See <link xlink:href="https://docs.syncthing.net/dev/device-ids.html"/>.
''; '';
example = literalExample '' };
{
"/home/user/sync" = {
id = "syncme";
devices = [ "bigbox" ];
};
}
'';
type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
enable = mkOption { introducer = mkOption {
type = types.bool; type = types.bool;
default = true; default = false;
description = '' description = ''
share this folder. Whether the device should act as an introducer and be allowed
This option is useful when you want to define all folders to add folders on this computer.
in one place, but not every machine should share all folders. See <link xlink:href="https://docs.syncthing.net/users/introducer.html"/>.
''; '';
}; };
path = mkOption { };
type = types.str; }));
default = name; };
description = ''
The path to the folder which should be shared.
'';
};
id = mkOption { overrideFolders = mkOption {
type = types.str; type = types.bool;
default = name; default = true;
description = '' description = ''
The id of the folder. Must be the same on all devices. Whether to delete the folders which are not configured via the
''; <link linkend="opt-services.syncthing.folders">folders</link> option.
}; If set to <literal>false</literal>, folders added via the web
interface will persist and will have to be deleted manually.
'';
};
label = mkOption { folders = mkOption {
type = types.str; default = {};
default = name; description = ''
description = '' Folders which should be shared by Syncthing.
The label of the folder.
'';
};
devices = mkOption { Note that you can still add devices manually, but those changes
type = types.listOf types.str; will be reverted on restart if <link linkend="opt-services.syncthing.overrideDevices">overrideDevices</link>
default = []; is enabled.
description = '' '';
The devices this folder should be shared with. Must be defined example = literalExample ''
in the <literal>declarative.devices</literal> attribute. {
''; "/home/user/sync" = {
}; id = "syncme";
devices = [ "bigbox" ];
};
}
'';
type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
versioning = mkOption { enable = mkOption {
default = null; type = types.bool;
description = '' default = true;
How to keep changed/deleted files with syncthing. description = ''
There are 4 different types of versioning with different parameters. Whether to share this folder.
See https://docs.syncthing.net/users/versioning.html This option is useful when you want to define all folders
''; in one place, but not every machine should share all folders.
example = [ '';
};
path = mkOption {
type = types.str;
default = name;
description = ''
The path to the folder which should be shared.
'';
};
id = mkOption {
type = types.str;
default = name;
description = ''
The ID of the folder. Must be the same on all devices.
'';
};
label = mkOption {
type = types.str;
default = name;
description = ''
The label of the folder.
'';
};
devices = mkOption {
type = types.listOf types.str;
default = [];
description = ''
The devices this folder should be shared with. Each device must
be defined in the <link linkend="opt-services.syncthing.devices">devices</link> option.
'';
};
versioning = mkOption {
default = null;
description = ''
How to keep changed/deleted files with Syncthing.
There are 4 different types of versioning with different parameters.
See <link xlink:href="https://docs.syncthing.net/users/versioning.html"/>.
'';
example = literalExample ''
[
{ {
versioning = { versioning = {
type = "simple"; type = "simple";
@ -257,87 +261,99 @@ in {
{ {
versioning = { versioning = {
type = "external"; type = "external";
params.versionsPath = pkgs.writers.writeBash "backup" '' params.versionsPath = pkgs.writers.writeBash "backup" '''
folderpath="$1" folderpath="$1"
filepath="$2" filepath="$2"
rm -rf "$folderpath/$filepath" rm -rf "$folderpath/$filepath"
''; ''';
}; };
} }
]; ]
type = with types; nullOr (submodule { '';
options = { type = with types; nullOr (submodule {
type = mkOption { options = {
type = enum [ "external" "simple" "staggered" "trashcan" ]; type = mkOption {
description = '' type = enum [ "external" "simple" "staggered" "trashcan" ];
Type of versioning. description = ''
See https://docs.syncthing.net/users/versioning.html The type of versioning.
''; See <link xlink:href="https://docs.syncthing.net/users/versioning.html"/>.
}; '';
params = mkOption {
type = attrsOf (either str path);
description = ''
Parameters for versioning. Structure depends on versioning.type.
See https://docs.syncthing.net/users/versioning.html
'';
};
}; };
}); params = mkOption {
}; type = attrsOf (either str path);
description = ''
rescanInterval = mkOption { The parameters for versioning. Structure depends on
type = types.int; <link linkend="opt-services.syncthing.folders._name_.versioning.type">versioning.type</link>.
default = 3600; See <link xlink:href="https://docs.syncthing.net/users/versioning.html"/>.
description = '' '';
How often the folders should be rescaned for changes. };
''; };
}; });
type = mkOption {
type = types.enum [ "sendreceive" "sendonly" "receiveonly" ];
default = "sendreceive";
description = ''
Whether to send only changes from this folder, only receive them
or propagate both.
'';
};
watch = mkOption {
type = types.bool;
default = true;
description = ''
Whether the folder should be watched for changes by inotify.
'';
};
watchDelay = mkOption {
type = types.int;
default = 10;
description = ''
The delay after an inotify event is triggered.
'';
};
ignorePerms = mkOption {
type = types.bool;
default = true;
description = ''
Whether to propagate permission changes.
'';
};
ignoreDelete = mkOption {
type = types.bool;
default = false;
description = ''
Whether to delete files in destination. See <link
xlink:href="https://docs.syncthing.net/advanced/folder-ignoredelete.html">
upstream's docs</link>.
'';
};
}; };
}));
rescanInterval = mkOption {
type = types.int;
default = 3600;
description = ''
How often the folder should be rescanned for changes.
'';
};
type = mkOption {
type = types.enum [ "sendreceive" "sendonly" "receiveonly" ];
default = "sendreceive";
description = ''
Whether to only send changes for this folder, only receive them
or both.
'';
};
watch = mkOption {
type = types.bool;
default = true;
description = ''
Whether the folder should be watched for changes by inotify.
'';
};
watchDelay = mkOption {
type = types.int;
default = 10;
description = ''
The delay after an inotify event is triggered.
'';
};
ignorePerms = mkOption {
type = types.bool;
default = true;
description = ''
Whether to ignore permission changes.
'';
};
ignoreDelete = mkOption {
type = types.bool;
default = false;
description = ''
Whether to skip deleting files that are deleted by peers.
See <link xlink:href="https://docs.syncthing.net/advanced/folder-ignoredelete.html"/>.
'';
};
};
}));
};
extraOptions = mkOption {
type = types.addCheck (pkgs.formats.json {}).type isAttrs;
default = {};
description = ''
Extra configuration options for Syncthing.
See <link xlink:href="https://docs.syncthing.net/users/config.html"/>.
'';
example = {
options.localAnnounceEnabled = false;
gui.theme = "black";
}; };
}; };
@ -345,31 +361,35 @@ in {
type = types.str; type = types.str;
default = "127.0.0.1:8384"; default = "127.0.0.1:8384";
description = '' description = ''
Address to serve the GUI. The address to serve the web interface at.
''; '';
}; };
systemService = mkOption { systemService = mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
description = "Auto launch Syncthing as a system service."; description = ''
Whether to auto-launch Syncthing as a system service.
'';
}; };
user = mkOption { user = mkOption {
type = types.str; type = types.str;
default = defaultUser; default = defaultUser;
example = "yourUser";
description = '' description = ''
Syncthing will be run under this user (user will be created if it doesn't exist. The user to run Syncthing as.
This can be your user name). By default, a user named <literal>${defaultUser}</literal> will be created.
''; '';
}; };
group = mkOption { group = mkOption {
type = types.str; type = types.str;
default = defaultUser; default = defaultGroup;
example = "yourGroup";
description = '' description = ''
Syncthing will be run under this group (group will not be created if it doesn't exist. The group to run Syncthing under.
This can be your user name). By default, a group named <literal>${defaultGroup}</literal> will be created.
''; '';
}; };
@ -378,63 +398,67 @@ in {
default = null; default = null;
example = "socks5://address.com:1234"; example = "socks5://address.com:1234";
description = '' description = ''
Overwrites all_proxy environment variable for the syncthing process to Overwrites the all_proxy environment variable for the Syncthing process to
the given value. This is normaly used to let relay client connect the given value. This is normally used to let Syncthing connect
through SOCKS5 proxy server. through a SOCKS5 proxy server.
See <link xlink:href="https://docs.syncthing.net/users/proxying.html"/>.
''; '';
}; };
dataDir = mkOption { dataDir = mkOption {
type = types.path; type = types.path;
default = "/var/lib/syncthing"; default = "/var/lib/syncthing";
example = "/home/yourUser";
description = '' description = ''
Path where synced directories will exist. The path where synchronised directories will exist.
''; '';
}; };
configDir = mkOption { configDir = let
cond = versionAtLeast config.system.stateVersion "19.03";
in mkOption {
type = types.path; type = types.path;
description = '' description = ''
Path where the settings and keys will exist. The path where the settings and keys will exist.
''; '';
default = default = cfg.dataDir + (optionalString cond "/.config/syncthing");
let defaultText = literalExample "dataDir${optionalString cond " + \"/.config/syncthing\""}";
nixos = config.system.stateVersion;
cond = versionAtLeast nixos "19.03";
in cfg.dataDir + (optionalString cond "/.config/syncthing");
}; };
openDefaultPorts = mkOption { openDefaultPorts = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
example = literalExample "true"; example = true;
description = '' description = ''
Open the default ports in the firewall: Whether to open the default ports in the firewall: TCP 22000 for transfers
- TCP 22000 for transfers and UDP 21027 for discovery.
- UDP 21027 for discovery
If multiple users are running syncthing on this machine, you will need to manually open a set of ports for each instance and leave this disabled. If multiple users are running Syncthing on this machine, you will need
Alternatively, if are running only a single instance on this machine using the default ports, enable this. to manually open a set of ports for each instance and leave this disabled.
Alternatively, if you are running only a single instance on this machine
using the default ports, enable this.
''; '';
}; };
package = mkOption { package = mkOption {
type = types.package; type = types.package;
default = pkgs.syncthing; default = pkgs.syncthing;
defaultText = "pkgs.syncthing"; defaultText = literalExample "pkgs.syncthing";
example = literalExample "pkgs.syncthing";
description = '' description = ''
Syncthing package to use. The Syncthing package to use.
''; '';
}; };
}; };
}; };
imports = [ imports = [
(mkRemovedOptionModule ["services" "syncthing" "useInotify"] '' (mkRemovedOptionModule [ "services" "syncthing" "useInotify" ] ''
This option was removed because syncthing now has the inotify functionality included under the name "fswatcher". This option was removed because Syncthing now has the inotify functionality included under the name "fswatcher".
It can be enabled on a per-folder basis through the webinterface. It can be enabled on a per-folder basis through the web interface.
'') '')
]; ] ++ map (o:
mkRenamedOptionModule [ "services" "syncthing" "declarative" o ] [ "services" "syncthing" o ]
) [ "cert" "key" "devices" "folders" "overrideDevices" "overrideFolders" "extraOptions"];
###### implementation ###### implementation
@ -457,8 +481,8 @@ in {
}; };
}; };
users.groups = mkIf (cfg.systemService && cfg.group == defaultUser) { users.groups = mkIf (cfg.systemService && cfg.group == defaultGroup) {
${defaultUser}.gid = ${defaultGroup}.gid =
config.ids.gids.syncthing; config.ids.gids.syncthing;
}; };
@ -478,14 +502,14 @@ in {
RestartForceExitStatus="3 4"; RestartForceExitStatus="3 4";
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
ExecStartPre = mkIf (cfg.declarative.cert != null || cfg.declarative.key != null) ExecStartPre = mkIf (cfg.cert != null || cfg.key != null)
"+${pkgs.writers.writeBash "syncthing-copy-keys" '' "+${pkgs.writers.writeBash "syncthing-copy-keys" ''
install -dm700 -o ${cfg.user} -g ${cfg.group} ${cfg.configDir} install -dm700 -o ${cfg.user} -g ${cfg.group} ${cfg.configDir}
${optionalString (cfg.declarative.cert != null) '' ${optionalString (cfg.cert != null) ''
install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.declarative.cert} ${cfg.configDir}/cert.pem install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.cert} ${cfg.configDir}/cert.pem
''} ''}
${optionalString (cfg.declarative.key != null) '' ${optionalString (cfg.key != null) ''
install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.declarative.key} ${cfg.configDir}/key.pem install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.key} ${cfg.configDir}/key.pem
''} ''}
''}" ''}"
; ;
@ -516,8 +540,10 @@ in {
}; };
}; };
syncthing-init = mkIf ( syncthing-init = mkIf (
cfg.declarative.devices != {} || cfg.declarative.folders != {} cfg.devices != {} || cfg.folders != {} || cfg.extraOptions != {}
) { ) {
description = "Syncthing configuration updater";
requisite = [ "syncthing.service" ];
after = [ "syncthing.service" ]; after = [ "syncthing.service" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];

View file

@ -351,7 +351,7 @@ in
config = mkIf (cfg.networks != { }) { config = mkIf (cfg.networks != { }) {
environment.etc = fold (a: b: a // b) { } environment.etc = foldr (a: b: a // b) { }
(flip mapAttrsToList cfg.networks (network: data: (flip mapAttrsToList cfg.networks (network: data:
flip mapAttrs' data.hosts (host: text: nameValuePair flip mapAttrs' data.hosts (host: text: nameValuePair
("tinc/${network}/hosts/${host}") ("tinc/${network}/hosts/${host}")

View file

@ -19,7 +19,7 @@ let
${ethtool} -s ${interface} ${methodParameter {inherit method password;}} ${ethtool} -s ${interface} ${methodParameter {inherit method password;}}
''; '';
concatStrings = fold (x: y: x + y) ""; concatStrings = foldr (x: y: x + y) "";
lines = concatStrings (map (l: line l) interfaces); lines = concatStrings (map (l: line l) interfaces);
in in

View file

@ -241,7 +241,8 @@ in {
environment.systemPackages = [ package ]; environment.systemPackages = [ package ];
services.dbus.packages = [ package ]; services.dbus.packages = [ package ];
services.udev.packages = [ pkgs.crda ];
hardware.wirelessRegulatoryDatabase = true;
# FIXME: start a separate wpa_supplicant instance per interface. # FIXME: start a separate wpa_supplicant instance per interface.
systemd.services.wpa_supplicant = let systemd.services.wpa_supplicant = let

View file

@ -4,7 +4,7 @@ with lib;
let let
cfg = config.services.uptimed; cfg = config.services.uptimed;
stateDir = "/var/spool/uptimed"; stateDir = "/var/lib/uptimed";
in in
{ {
options = { options = {
@ -21,12 +21,16 @@ in
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.uptimed ];
users.users.uptimed = { users.users.uptimed = {
description = "Uptimed daemon user"; description = "Uptimed daemon user";
home = stateDir; home = stateDir;
createHome = true;
uid = config.ids.uids.uptimed; uid = config.ids.uids.uptimed;
group = "uptimed";
}; };
users.groups.uptimed = {};
systemd.services.uptimed = { systemd.services.uptimed = {
unitConfig.Documentation = "man:uptimed(8) man:uprecords(1)"; unitConfig.Documentation = "man:uptimed(8) man:uprecords(1)";
@ -41,7 +45,7 @@ in
PrivateTmp = "yes"; PrivateTmp = "yes";
PrivateNetwork = "yes"; PrivateNetwork = "yes";
NoNewPrivileges = "yes"; NoNewPrivileges = "yes";
ReadWriteDirectories = stateDir; StateDirectory = [ "uptimed" ];
InaccessibleDirectories = "/home"; InaccessibleDirectories = "/home";
ExecStart = "${pkgs.uptimed}/sbin/uptimed -f -p ${stateDir}/pid"; ExecStart = "${pkgs.uptimed}/sbin/uptimed -f -p ${stateDir}/pid";
}; };

View file

@ -0,0 +1,70 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.replay-sorcery;
configFile = generators.toKeyValue {} cfg.settings;
in
{
options = with types; {
services.replay-sorcery = {
enable = mkEnableOption "the ReplaySorcery service for instant-replays";
enableSysAdminCapability = mkEnableOption ''
the system admin capability to support hardware accelerated
video capture. This is equivalent to running ReplaySorcery as
root, so use with caution'';
autoStart = mkOption {
type = bool;
default = false;
description = "Automatically start ReplaySorcery when graphical-session.target starts.";
};
settings = mkOption {
type = attrsOf (oneOf [ str int ]);
default = {};
description = "System-wide configuration for ReplaySorcery (/etc/replay-sorcery.conf).";
example = literalExample ''
{
videoInput = "hwaccel"; # requires `services.replay-sorcery.enableSysAdminCapability = true`
videoFramerate = 60;
}
'';
};
};
};
config = mkIf cfg.enable {
environment = {
systemPackages = [ pkgs.replay-sorcery ];
etc."replay-sorcery.conf".text = configFile;
};
security.wrappers = mkIf cfg.enableSysAdminCapability {
replay-sorcery = {
source = "${pkgs.replay-sorcery}/bin/replay-sorcery";
capabilities = "cap_sys_admin+ep";
};
};
systemd = {
packages = [ pkgs.replay-sorcery ];
user.services.replay-sorcery = {
wantedBy = mkIf cfg.autoStart [ "graphical-session.target" ];
partOf = mkIf cfg.autoStart [ "graphical-session.target" ];
serviceConfig = {
ExecStart = mkIf cfg.enableSysAdminCapability [
"" # Tell systemd to clear the existing ExecStart list, to prevent appending to it.
"${config.security.wrapperDir}/replay-sorcery"
];
};
};
};
};
meta = {
maintainers = with maintainers; [ kira-bruneau ];
};
}

View file

@ -0,0 +1,148 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.node-red;
defaultUser = "node-red";
finalPackage = if cfg.withNpmAndGcc then node-red_withNpmAndGcc else cfg.package;
node-red_withNpmAndGcc = pkgs.runCommandNoCC "node-red" {
nativeBuildInputs = [ pkgs.makeWrapper ];
}
''
mkdir -p $out/bin
makeWrapper ${pkgs.nodePackages.node-red}/bin/node-red $out/bin/node-red \
--set PATH '${lib.makeBinPath [ pkgs.nodePackages.npm pkgs.gcc ]}:$PATH' \
'';
in
{
options.services.node-red = {
enable = mkEnableOption "the Node-RED service";
package = mkOption {
default = pkgs.nodePackages.node-red;
defaultText = "pkgs.nodePackages.node-red";
type = types.package;
description = "Node-RED package to use.";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Open ports in the firewall for the server.
'';
};
withNpmAndGcc = mkOption {
type = types.bool;
default = false;
description = ''
Give Node-RED access to NPM and GCC at runtime, so 'Nodes' can be
downloaded and managed imperatively via the 'Palette Manager'.
'';
};
configFile = mkOption {
type = types.path;
default = "${cfg.package}/lib/node_modules/node-red/settings.js";
defaultText = "\${cfg.package}/lib/node_modules/node-red/settings.js";
description = ''
Path to the JavaScript configuration file.
See <link
xlink:href="https://github.com/node-red/node-red/blob/master/packages/node_modules/node-red/settings.js"/>
for a configuration example.
'';
};
port = mkOption {
type = types.port;
default = 1880;
description = "Listening port.";
};
user = mkOption {
type = types.str;
default = defaultUser;
description = ''
User under which Node-RED runs.If left as the default value this user
will automatically be created on system activation, otherwise the
sysadmin is responsible for ensuring the user exists.
'';
};
group = mkOption {
type = types.str;
default = defaultUser;
description = ''
Group under which Node-RED runs.If left as the default value this group
will automatically be created on system activation, otherwise the
sysadmin is responsible for ensuring the group exists.
'';
};
userDir = mkOption {
type = types.path;
default = "/var/lib/node-red";
description = ''
The directory to store all user data, such as flow and credential files and all library data. If left
as the default value this directory will automatically be created before the node-red service starts,
otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership
and permissions.
'';
};
safe = mkOption {
type = types.bool;
default = false;
description = "Whether to launch Node-RED in --safe mode.";
};
define = mkOption {
type = types.attrs;
default = {};
description = "List of settings.js overrides to pass via -D to Node-RED.";
example = literalExample ''
{
"logging.console.level" = "trace";
}
'';
};
};
config = mkIf cfg.enable {
users.users = optionalAttrs (cfg.user == defaultUser) {
${defaultUser} = {
isSystemUser = true;
};
};
users.groups = optionalAttrs (cfg.group == defaultUser) {
${defaultUser} = { };
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
systemd.services.node-red = {
description = "Node-RED Service";
wantedBy = [ "multi-user.target" ];
after = [ "networking.target" ];
environment = {
HOME = cfg.userDir;
};
serviceConfig = mkMerge [
{
User = cfg.user;
Group = cfg.group;
ExecStart = "${finalPackage}/bin/node-red ${pkgs.lib.optionalString cfg.safe "--safe"} --settings ${cfg.configFile} --port ${toString cfg.port} --userDir ${cfg.userDir} ${concatStringsSep " " (mapAttrsToList (name: value: "-D ${name}=${value}") cfg.define)}";
PrivateTmp = true;
Restart = "always";
WorkingDirectory = cfg.userDir;
}
(mkIf (cfg.userDir == "/var/lib/node-red") { StateDirectory = "node-red"; })
];
};
};
}

View file

@ -19,82 +19,84 @@ let
mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql"; mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql"; pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
tt-rss-config = pkgs.writeText "config.php" '' tt-rss-config = let
password =
if (cfg.database.password != null) then
"${(escape ["'" "\\"] cfg.database.password)}"
else if (cfg.database.passwordFile != null) then
"file_get_contents('${cfg.database.passwordFile}'"
else
""
;
in pkgs.writeText "config.php" ''
<?php <?php
putenv('TTRSS_PHP_EXECUTABLE=${pkgs.php}/bin/php');
define('PHP_EXECUTABLE', '${pkgs.php}/bin/php'); putenv('TTRSS_LOCK_DIRECTORY=${lockDir}');
putenv('TTRSS_CACHE_DIR=${cacheDir}');
putenv('TTRSS_ICONS_DIR=${feedIconsDir}');
putenv('TTRSS_ICONS_URL=${feedIconsDir}');
putenv('TTRSS_SELF_URL_PATH=${cfg.selfUrlPath}');
define('LOCK_DIRECTORY', '${lockDir}'); putenv('TTRSS_MYSQL_CHARSET=UTF8');
define('CACHE_DIR', '${cacheDir}');
define('ICONS_DIR', '${feedIconsDir}');
define('ICONS_URL', '${feedIconsDir}');
define('SELF_URL_PATH', '${cfg.selfUrlPath}');
define('MYSQL_CHARSET', 'UTF8'); putenv('TTRSS_DB_TYPE=${cfg.database.type}');
putenv('TTRSS_DB_HOST=${optionalString (cfg.database.host != null) cfg.database.host}');
putenv('TTRSS_DB_USER=${cfg.database.user}');
putenv('TTRSS_DB_NAME=${cfg.database.name}');
putenv('TTRSS_DB_PASS=${password}');
putenv('TTRSS_DB_PORT=${toString dbPort}');
define('DB_TYPE', '${cfg.database.type}'); putenv('TTRSS_AUTH_AUTO_CREATE=${boolToString cfg.auth.autoCreate}');
define('DB_HOST', '${optionalString (cfg.database.host != null) cfg.database.host}'); putenv('TTRSS_AUTH_AUTO_LOGIN=${boolToString cfg.auth.autoLogin}');
define('DB_USER', '${cfg.database.user}');
define('DB_NAME', '${cfg.database.name}');
define('DB_PASS', ${
if (cfg.database.password != null) then
"'${(escape ["'" "\\"] cfg.database.password)}'"
else if (cfg.database.passwordFile != null) then
"file_get_contents('${cfg.database.passwordFile}')"
else
"''"
});
define('DB_PORT', '${toString dbPort}');
define('AUTH_AUTO_CREATE', ${boolToString cfg.auth.autoCreate}); putenv('TTRSS_FEED_CRYPT_KEY=${escape ["'" "\\"] cfg.feedCryptKey}');
define('AUTH_AUTO_LOGIN', ${boolToString cfg.auth.autoLogin});
define('FEED_CRYPT_KEY', '${escape ["'" "\\"] cfg.feedCryptKey}');
define('SINGLE_USER_MODE', ${boolToString cfg.singleUserMode}); putenv('TTRSS_SINGLE_USER_MODE=${boolToString cfg.singleUserMode}');
define('SIMPLE_UPDATE_MODE', ${boolToString cfg.simpleUpdateMode}); putenv('TTRSS_SIMPLE_UPDATE_MODE=${boolToString cfg.simpleUpdateMode}');
// Never check for updates - the running version of the code should be # Never check for updates - the running version of the code should
// controlled entirely by the version of TT-RSS active in the current Nix # be controlled entirely by the version of TT-RSS active in the
// profile. If TT-RSS updates itself to a version requiring a database # current Nix profile. If TT-RSS updates itself to a version
// schema upgrade, and then the SystemD tt-rss.service is restarted, the # requiring a database schema upgrade, and then the SystemD
// old code copied from the Nix store will overwrite the updated version, # tt-rss.service is restarted, the old code copied from the Nix
// causing the code to detect the need for a schema "upgrade" (since the # store will overwrite the updated version, causing the code to
// schema version in the database is different than in the code), but the # detect the need for a schema "upgrade" (since the schema version
// update schema operation in TT-RSS will do nothing because the schema # in the database is different than in the code), but the update
// version in the database is newer than that in the code. # schema operation in TT-RSS will do nothing because the schema
define('CHECK_FOR_UPDATES', false); # version in the database is newer than that in the code.
putenv('TTRSS_CHECK_FOR_UPDATES=false');
define('FORCE_ARTICLE_PURGE', ${toString cfg.forceArticlePurge}); putenv('TTRSS_FORCE_ARTICLE_PURGE=${toString cfg.forceArticlePurge}');
define('SESSION_COOKIE_LIFETIME', ${toString cfg.sessionCookieLifetime}); putenv('TTRSS_SESSION_COOKIE_LIFETIME=${toString cfg.sessionCookieLifetime}');
define('ENABLE_GZIP_OUTPUT', ${boolToString cfg.enableGZipOutput}); putenv('TTRSS_ENABLE_GZIP_OUTPUT=${boolToString cfg.enableGZipOutput}');
define('PLUGINS', '${builtins.concatStringsSep "," cfg.plugins}'); putenv('TTRSS_PLUGINS=${builtins.concatStringsSep "," cfg.plugins}');
define('LOG_DESTINATION', '${cfg.logDestination}'); putenv('TTRSS_LOG_DESTINATION=${cfg.logDestination}');
define('CONFIG_VERSION', ${toString configVersion}); putenv('TTRSS_CONFIG_VERSION=${toString configVersion}');
define('PUBSUBHUBBUB_ENABLED', ${boolToString cfg.pubSubHubbub.enable}); putenv('TTRSS_PUBSUBHUBBUB_ENABLED=${boolToString cfg.pubSubHubbub.enable}');
define('PUBSUBHUBBUB_HUB', '${cfg.pubSubHubbub.hub}'); putenv('TTRSS_PUBSUBHUBBUB_HUB=${cfg.pubSubHubbub.hub}');
define('SPHINX_SERVER', '${cfg.sphinx.server}'); putenv('TTRSS_SPHINX_SERVER=${cfg.sphinx.server}');
define('SPHINX_INDEX', '${builtins.concatStringsSep "," cfg.sphinx.index}'); putenv('TTRSS_SPHINX_INDEX=${builtins.concatStringsSep "," cfg.sphinx.index}');
define('ENABLE_REGISTRATION', ${boolToString cfg.registration.enable}); putenv('TTRSS_ENABLE_REGISTRATION=${boolToString cfg.registration.enable}');
define('REG_NOTIFY_ADDRESS', '${cfg.registration.notifyAddress}'); putenv('TTRSS_REG_NOTIFY_ADDRESS=${cfg.registration.notifyAddress}');
define('REG_MAX_USERS', ${toString cfg.registration.maxUsers}); putenv('TTRSS_REG_MAX_USERS=${toString cfg.registration.maxUsers}');
define('SMTP_SERVER', '${cfg.email.server}'); putenv('TTRSS_SMTP_SERVER=${cfg.email.server}');
define('SMTP_LOGIN', '${cfg.email.login}'); putenv('TTRSS_SMTP_LOGIN=${cfg.email.login}');
define('SMTP_PASSWORD', '${escape ["'" "\\"] cfg.email.password}'); putenv('TTRSS_SMTP_PASSWORD=${escape ["'" "\\"] cfg.email.password}');
define('SMTP_SECURE', '${cfg.email.security}'); putenv('TTRSS_SMTP_SECURE=${cfg.email.security}');
define('SMTP_FROM_NAME', '${escape ["'" "\\"] cfg.email.fromName}'); putenv('TTRSS_SMTP_FROM_NAME=${escape ["'" "\\"] cfg.email.fromName}');
define('SMTP_FROM_ADDRESS', '${escape ["'" "\\"] cfg.email.fromAddress}'); putenv('TTRSS_SMTP_FROM_ADDRESS=${escape ["'" "\\"] cfg.email.fromAddress}');
define('DIGEST_SUBJECT', '${escape ["'" "\\"] cfg.email.digestSubject}'); putenv('TTRSS_DIGEST_SUBJECT=${escape ["'" "\\"] cfg.email.digestSubject}');
${cfg.extraConfig} ${cfg.extraConfig}
''; '';
@ -564,9 +566,12 @@ let
"Z '${cfg.root}' 0755 ${cfg.user} tt_rss - -" "Z '${cfg.root}' 0755 ${cfg.user} tt_rss - -"
]; ];
systemd.services.tt-rss = systemd.services = {
{ phpfpm-tt-rss = mkIf (cfg.pool == "${poolName}") {
restartTriggers = [ tt-rss-config pkgs.tt-rss ];
};
tt-rss = {
description = "Tiny Tiny RSS feeds update daemon"; description = "Tiny Tiny RSS feeds update daemon";
preStart = let preStart = let
@ -604,6 +609,9 @@ let
''} ''}
ln -sf "${tt-rss-config}" "${cfg.root}/config.php" ln -sf "${tt-rss-config}" "${cfg.root}/config.php"
chmod -R 755 "${cfg.root}" chmod -R 755 "${cfg.root}"
chmod -R ug+rwX "${cfg.root}/${lockDir}"
chmod -R ug+rwX "${cfg.root}/${cacheDir}"
chmod -R ug+rwX "${cfg.root}/${feedIconsDir}"
'' ''
+ (optionalString (cfg.database.type == "pgsql") '' + (optionalString (cfg.database.type == "pgsql") ''
@ -640,6 +648,7 @@ let
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service"; requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
after = [ "network.target" ] ++ optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service"; after = [ "network.target" ] ++ optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
};
}; };
services.mysql = mkIf mysqlLocal { services.mysql = mkIf mysqlLocal {

View file

@ -271,13 +271,14 @@ in
kmenuedit kmenuedit
kscreen kscreen
kscreenlocker kscreenlocker
ksysguard ksystemstats
kwayland kwayland
kwin kwin
kwrited kwrited
libkscreen libkscreen
libksysguard libksysguard
milou milou
plasma-systemmonitor
plasma-browser-integration plasma-browser-integration
plasma-integration plasma-integration
polkit-kde-agent polkit-kde-agent

View file

@ -39,6 +39,6 @@ python3.pkgs.buildPythonApplication {
''; '';
meta = with lib; { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
} }

View file

@ -10,8 +10,8 @@ let
in in
{ {
meta = { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
options = { options = {

View file

@ -69,8 +69,8 @@ let
in in
{ {
meta = { meta = with lib; {
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ ] ++ teams.pantheon.members;
}; };
# Note: the order in which lightdm greeter modules are imported # Note: the order in which lightdm greeter modules are imported

View file

@ -72,11 +72,14 @@ def main():
f"Setting session name: {session}, as we found the existing wayland-session: {session_file}" f"Setting session name: {session}, as we found the existing wayland-session: {session_file}"
) )
user.set_session(session) user.set_session(session)
user.set_session_type("wayland")
elif is_session_xsession(session_file): elif is_session_xsession(session_file):
logging.debug( logging.debug(
f"Setting session name: {session}, as we found the existing xsession: {session_file}" f"Setting session name: {session}, as we found the existing xsession: {session_file}"
) )
user.set_x_session(session) user.set_x_session(session)
user.set_session(session)
user.set_session_type("x11")
else: else:
logging.error(f"Couldn't figure out session type for {session_file}") logging.error(f"Couldn't figure out session type for {session_file}")
sys.exit(1) sys.exit(1)

View file

@ -26,6 +26,7 @@ in
./leftwm.nix ./leftwm.nix
./lwm.nix ./lwm.nix
./metacity.nix ./metacity.nix
./mlvwm.nix
./mwm.nix ./mwm.nix
./openbox.nix ./openbox.nix
./pekwm.nix ./pekwm.nix

View file

@ -0,0 +1,41 @@
{ config, lib, pkgs, ... }:
with lib;
let cfg = config.services.xserver.windowManager.mlvwm;
in
{
options.services.xserver.windowManager.mlvwm = {
enable = mkEnableOption "Macintosh-like Virtual Window Manager";
configFile = mkOption {
default = null;
type = with types; nullOr path;
description = ''
Path to the mlvwm configuration file.
If left at the default value, $HOME/.mlvwmrc will be used.
'';
};
};
config = mkIf cfg.enable {
services.xserver.windowManager.session = [{
name = "mlvwm";
start = ''
${pkgs.mlvwm}/bin/mlvwm ${optionalString (cfg.configFile != null)
"-f /etc/mlvwm/mlvwmrc"
} &
waitPID=$!
'';
}];
environment.etc."mlvwm/mlvwmrc" = mkIf (cfg.configFile != null) {
source = cfg.configFile;
};
environment.systemPackages = [ pkgs.mlvwm ];
};
}

View file

@ -657,6 +657,7 @@ in
pkgs.xterm pkgs.xterm
pkgs.xdg-utils pkgs.xdg-utils
xorg.xf86inputevdev.out # get evdev.4 man page xorg.xf86inputevdev.out # get evdev.4 man page
pkgs.nixos-icons # needed for gnome and pantheon about dialog, nixos-manual and maybe more
] ]
++ optional (elem "virtualbox" cfg.videoDrivers) xorg.xrefresh; ++ optional (elem "virtualbox" cfg.videoDrivers) xorg.xrefresh;

Some files were not shown because too many files have changed in this diff Show more