Project import generated by Copybara.

GitOrigin-RevId: 5ed481943351e9fd354aeb557679624224de38d5
This commit is contained in:
Default email 2023-01-20 11:41:00 +01:00
parent 28cbcf08a4
commit 0d9fc34957
2379 changed files with 57504 additions and 67450 deletions

View file

@ -17,9 +17,16 @@ function Inlines(inlines)
if correct_tags then if correct_tags then
-- docutils supports alphanumeric strings separated by [-._:] -- docutils supports alphanumeric strings separated by [-._:]
-- We are slightly more liberal for simplicity. -- We are slightly more liberal for simplicity.
local role = first.text:match('^{([-._+:%w]+)}$') -- Allow preceding punctuation (eg '('), otherwise '({file}`...`)'
if role ~= nil then -- does not match. Also allow anything followed by a non-breaking space
inlines:remove(i) -- since pandoc emits those after certain abbreviations (e.g. e.g.).
local prefix, role = first.text:match('^(.*){([-._+:%w]+)}$')
if role ~= nil and (prefix == '' or prefix:match("^.*[%p ]$") ~= nil) then
if prefix == '' then
inlines:remove(i)
else
first.text = prefix
end
second.attributes['role'] = role second.attributes['role'] = role
second.classes:insert('interpreted-text') second.classes:insert('interpreted-text')
end end

File diff suppressed because it is too large Load diff

View file

@ -175,10 +175,11 @@ buildNpmPackage rec {
hash = "sha256-BR+ZGkBBfd0dSQqAvujsbgsEPFYw/ThrylxUbOksYxM="; hash = "sha256-BR+ZGkBBfd0dSQqAvujsbgsEPFYw/ThrylxUbOksYxM=";
}; };
patches = [ ./remove-prepack-script.patch ];
npmDepsHash = "sha256-tuEfyePwlOy2/mOPdXbqJskO6IowvAP4DWg8xSZwbJw="; npmDepsHash = "sha256-tuEfyePwlOy2/mOPdXbqJskO6IowvAP4DWg8xSZwbJw=";
# The prepack script runs the build script, which we'd rather do in the build phase.
npmPackFlags = [ "--ignore-scripts" ];
NODE_OPTIONS = "--openssl-legacy-provider"; NODE_OPTIONS = "--openssl-legacy-provider";
meta = with lib; { meta = with lib; {

View file

@ -2,14 +2,11 @@
Writing Nix expressions for Qt libraries and applications is largely similar as for other C++ software. Writing Nix expressions for Qt libraries and applications is largely similar as for other C++ software.
This section assumes some knowledge of the latter. This section assumes some knowledge of the latter.
There are two problems that the Nixpkgs Qt infrastructure addresses,
which are not shared by other C++ software:
1. There are usually multiple supported versions of Qt in Nixpkgs. The major caveat with Qt applications is that Qt uses a plugin system to load additional modules at runtime,
All of a package's dependencies must be built with the same version of Qt. from a list of well-known locations. In Nixpkgs, we patch QtCore to instead use an environment variable,
This is similar to the version constraints imposed on interpreted languages like Python. and wrap Qt applications to set it to the right paths. This effectively makes the runtime dependencies
2. Qt makes extensive use of runtime dependency detection. pure and explicit at build-time, at the cost of introducing an extra indirection.
Runtime dependencies are made into build dependencies through wrappers.
## Nix expression for a Qt package (default.nix) {#qt-default-nix} ## Nix expression for a Qt package (default.nix) {#qt-default-nix}
@ -95,66 +92,3 @@ stdenv.mkDerivation {
This means that scripts won't be automatically wrapped so you'll need to manually wrap them as previously mentioned. This means that scripts won't be automatically wrapped so you'll need to manually wrap them as previously mentioned.
An example of when you'd always need to do this is with Python applications that use PyQt. An example of when you'd always need to do this is with Python applications that use PyQt.
::: :::
## Adding a library to Nixpkgs {#adding-a-library-to-nixpkgs}
Add Qt libraries to `qt5-packages.nix` to make them available for every
supported Qt version.
### Example adding a Qt library {#qt-library-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
```nix
{
# ...
mylib = callPackage ../path/to/mylib {};
# ...
}
```
Libraries are built with every available version of Qt.
Use the `meta.broken` attribute to disable the package for unsupported Qt versions:
```nix
{ stdenv, lib, qtbase }:
stdenv.mkDerivation {
# ...
# Disable this library with Qt < 5.9.0
meta.broken = lib.versionOlder qtbase.version "5.9.0";
}
```
## Adding an application to Nixpkgs {#adding-an-application-to-nixpkgs}
Add Qt applications to `qt5-packages.nix`. Add an alias to `all-packages.nix`
to select the Qt 5 version used for the application.
### Example adding a Qt application {#qt-application-all-packages-nix}
The following represents the contents of `qt5-packages.nix`.
```nix
{
# ...
myapp = callPackage ../path/to/myapp {};
# ...
}
```
The following represents the contents of `all-packages.nix`.
```nix
{
# ...
myapp = libsForQt5.myapp;
# ...
}
```

View file

@ -994,6 +994,32 @@ Convenience function for `makeWrapper` that replaces `<\executable\>` with a wra
If you will apply it multiple times, it will overwrite the wrapper file and you will end up with double wrapping, which should be avoided. If you will apply it multiple times, it will overwrite the wrapper file and you will end up with double wrapping, which should be avoided.
### `prependToVar` \<variableName\> \<elements...\> {#fun-prependToVar}
Prepend elements to a variable.
Example:
```shellSession
$ configureFlags="--disable-static"
$ prependToVar configureFlags --disable-dependency-tracking --enable-foo
$ echo $configureFlags
--disable-dependency-tracking --enable-foo --disable-static
```
### `appendToVar` \<variableName\> \<elements...\> {#fun-appendToVar}
Append elements to a variable.
Example:
```shellSession
$ configureFlags="--disable-static"
$ appendToVar configureFlags --disable-dependency-tracking --enable-foo
$ echo $configureFlags
--disable-static --disable-dependency-tracking --enable-foo
```
## Package setup hooks {#ssec-setup-hooks} ## Package setup hooks {#ssec-setup-hooks}
Nix itself considers a build-time dependency as merely something that should previously be built and accessible at build time—packages themselves are on their own to perform any additional setup. In most cases, that is fine, and the downstream derivation can deal with its own dependencies. But for a few common tasks, that would result in almost every package doing the same sort of setup work—depending not on the package itself, but entirely on which dependencies were used. Nix itself considers a build-time dependency as merely something that should previously be built and accessible at build time—packages themselves are on their own to perform any additional setup. In most cases, that is fine, and the downstream derivation can deal with its own dependencies. But for a few common tasks, that would result in almost every package doing the same sort of setup work—depending not on the package itself, but entirely on which dependencies were used.

View file

@ -107,7 +107,7 @@ rec {
# Same as `makeExtensible` but the name of the extending attribute is # Same as `makeExtensible` but the name of the extending attribute is
# customized. # customized.
makeExtensibleWithCustomName = extenderName: rattrs: makeExtensibleWithCustomName = extenderName: rattrs:
fix' rattrs // { fix' (self: (rattrs self) // {
${extenderName} = f: makeExtensibleWithCustomName extenderName (extends f rattrs); ${extenderName} = f: makeExtensibleWithCustomName extenderName (extends f rattrs);
}; });
} }

View file

@ -68,6 +68,7 @@ in {
none = []; none = [];
arm = filterDoubles predicates.isAarch32; arm = filterDoubles predicates.isAarch32;
armv7 = filterDoubles predicates.isArmv7;
aarch64 = filterDoubles predicates.isAarch64; aarch64 = filterDoubles predicates.isAarch64;
x86 = filterDoubles predicates.isx86; x86 = filterDoubles predicates.isx86;
i686 = filterDoubles predicates.isi686; i686 = filterDoubles predicates.isi686;
@ -75,6 +76,7 @@ in {
microblaze = filterDoubles predicates.isMicroBlaze; microblaze = filterDoubles predicates.isMicroBlaze;
mips = filterDoubles predicates.isMips; mips = filterDoubles predicates.isMips;
mmix = filterDoubles predicates.isMmix; mmix = filterDoubles predicates.isMmix;
power = filterDoubles predicates.isPower;
riscv = filterDoubles predicates.isRiscV; riscv = filterDoubles predicates.isRiscV;
riscv32 = filterDoubles predicates.isRiscV32; riscv32 = filterDoubles predicates.isRiscV32;
riscv64 = filterDoubles predicates.isRiscV64; riscv64 = filterDoubles predicates.isRiscV64;
@ -83,6 +85,7 @@ in {
or1k = filterDoubles predicates.isOr1k; or1k = filterDoubles predicates.isOr1k;
m68k = filterDoubles predicates.isM68k; m68k = filterDoubles predicates.isM68k;
s390 = filterDoubles predicates.isS390; s390 = filterDoubles predicates.isS390;
s390x = filterDoubles predicates.isS390x;
js = filterDoubles predicates.isJavaScript; js = filterDoubles predicates.isJavaScript;
bigEndian = filterDoubles predicates.isBigEndian; bigEndian = filterDoubles predicates.isBigEndian;

View file

@ -22,6 +22,9 @@ rec {
]; ];
isx86 = { cpu = { family = "x86"; }; }; isx86 = { cpu = { family = "x86"; }; };
isAarch32 = { cpu = { family = "arm"; bits = 32; }; }; isAarch32 = { cpu = { family = "arm"; bits = 32; }; };
isArmv7 = map ({ arch, ... }: { cpu = { inherit arch; }; })
(lib.filter (cpu: lib.hasPrefix "armv7" cpu.arch or "")
(lib.attrValues cpuTypes));
isAarch64 = { cpu = { family = "arm"; bits = 64; }; }; isAarch64 = { cpu = { family = "arm"; bits = 64; }; };
isAarch = { cpu = { family = "arm"; }; }; isAarch = { cpu = { family = "arm"; }; };
isMicroBlaze = { cpu = { family = "microblaze"; }; }; isMicroBlaze = { cpu = { family = "microblaze"; }; };
@ -44,6 +47,7 @@ rec {
isOr1k = { cpu = { family = "or1k"; }; }; isOr1k = { cpu = { family = "or1k"; }; };
isM68k = { cpu = { family = "m68k"; }; }; isM68k = { cpu = { family = "m68k"; }; };
isS390 = { cpu = { family = "s390"; }; }; isS390 = { cpu = { family = "s390"; }; };
isS390x = { cpu = { family = "s390"; bits = 64; }; };
isJavaScript = { cpu = cpuTypes.js; }; isJavaScript = { cpu = cpuTypes.js; };
is32bit = { cpu = { bits = 32; }; }; is32bit = { cpu = { bits = 32; }; };
@ -78,7 +82,7 @@ rec {
isUClibc = with abis; map (a: { abi = a; }) [ uclibc uclibceabi uclibceabihf ]; isUClibc = with abis; map (a: { abi = a; }) [ uclibc uclibceabi uclibceabihf ];
isEfi = map (family: { cpu.family = family; }) isEfi = map (family: { cpu.family = family; })
[ "x86" "arm" "aarch64" ]; [ "x86" "arm" "aarch64" "riscv" ];
}; };
matchAnyAttrs = patterns: matchAnyAttrs = patterns:

View file

@ -1,11 +1,11 @@
{ # The pkgs used for dependencies for the testing itself { # The pkgs used for dependencies for the testing itself
# Don't test properties of pkgs.lib, but rather the lib in the parent directory # Don't test properties of pkgs.lib, but rather the lib in the parent directory
pkgs ? import ../.. {} // { lib = throw "pkgs.lib accessed, but the lib tests should use nixpkgs' lib path directly!"; } pkgs ? import ../.. {} // { lib = throw "pkgs.lib accessed, but the lib tests should use nixpkgs' lib path directly!"; },
nix ? pkgs.nix,
}: }:
pkgs.runCommand "nixpkgs-lib-tests" { pkgs.runCommand "nixpkgs-lib-tests" {
buildInputs = [ buildInputs = [
pkgs.nix
(import ./check-eval.nix) (import ./check-eval.nix)
(import ./maintainers.nix { (import ./maintainers.nix {
inherit pkgs; inherit pkgs;
@ -19,8 +19,12 @@ pkgs.runCommand "nixpkgs-lib-tests" {
inherit pkgs; inherit pkgs;
}) })
]; ];
nativeBuildInputs = [
nix
];
strictDeps = true;
} '' } ''
datadir="${pkgs.nix}/share" datadir="${nix}/share"
export TEST_ROOT=$(pwd)/test-tmp export TEST_ROOT=$(pwd)/test-tmp
export NIX_BUILD_HOOK= export NIX_BUILD_HOOK=
export NIX_CONF_DIR=$TEST_ROOT/etc export NIX_CONF_DIR=$TEST_ROOT/etc

View file

@ -16,12 +16,15 @@ with lib.systems.doubles; lib.runTests {
testall = mseteq all (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ wasi ++ windows ++ embedded ++ mmix ++ js ++ genode ++ redox); testall = mseteq all (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ wasi ++ windows ++ embedded ++ mmix ++ js ++ genode ++ redox);
testarm = mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv6l-netbsd" "armv6l-none" "armv7a-linux" "armv7a-netbsd" "armv7l-linux" "armv7l-netbsd" "arm-none" "armv7a-darwin" ]; testarm = mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv6l-netbsd" "armv6l-none" "armv7a-linux" "armv7a-netbsd" "armv7l-linux" "armv7l-netbsd" "arm-none" "armv7a-darwin" ];
testarmv7 = mseteq armv7 [ "armv7a-darwin" "armv7a-linux" "armv7l-linux" "armv7a-netbsd" "armv7l-netbsd" ];
testi686 = mseteq i686 [ "i686-linux" "i686-freebsd13" "i686-genode" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" "i686-none" "i686-darwin" ]; testi686 = mseteq i686 [ "i686-linux" "i686-freebsd13" "i686-genode" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" "i686-none" "i686-darwin" ];
testmips = mseteq mips [ "mips64el-linux" "mipsel-linux" "mipsel-netbsd" ]; testmips = mseteq mips [ "mips64el-linux" "mipsel-linux" "mipsel-netbsd" ];
testmmix = mseteq mmix [ "mmix-mmixware" ]; testmmix = mseteq mmix [ "mmix-mmixware" ];
testpower = mseteq power [ "powerpc-netbsd" "powerpc-none" "powerpc64-linux" "powerpc64le-linux" "powerpcle-none" ];
testriscv = mseteq riscv [ "riscv32-linux" "riscv64-linux" "riscv32-netbsd" "riscv64-netbsd" "riscv32-none" "riscv64-none" ]; testriscv = mseteq riscv [ "riscv32-linux" "riscv64-linux" "riscv32-netbsd" "riscv64-netbsd" "riscv32-none" "riscv64-none" ];
testriscv32 = mseteq riscv32 [ "riscv32-linux" "riscv32-netbsd" "riscv32-none" ]; testriscv32 = mseteq riscv32 [ "riscv32-linux" "riscv32-netbsd" "riscv32-none" ];
testriscv64 = mseteq riscv64 [ "riscv64-linux" "riscv64-netbsd" "riscv64-none" ]; testriscv64 = mseteq riscv64 [ "riscv64-linux" "riscv64-netbsd" "riscv64-none" ];
tests390x = mseteq s390x [ "s390x-linux" "s390x-none" ];
testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd13" "x86_64-genode" "x86_64-redox" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" "x86_64-none" ]; testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd13" "x86_64-genode" "x86_64-redox" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" "x86_64-none" ];
testcygwin = mseteq cygwin [ "i686-cygwin" "x86_64-cygwin" ]; testcygwin = mseteq cygwin [ "i686-cygwin" "x86_64-cygwin" ];

View file

@ -3291,6 +3291,12 @@
githubId = 798427; githubId = 798427;
name = "Davor Babic"; name = "Davor Babic";
}; };
davsanchez = {
email = "davidslt+nixpkgs@pm.me";
github = "davsanchez";
githubId = 11422515;
name = "David Sánchez";
};
dawidsowa = { dawidsowa = {
email = "dawid_sowa@posteo.net"; email = "dawid_sowa@posteo.net";
github = "dawidsowa"; github = "dawidsowa";
@ -4183,6 +4189,12 @@
githubId = 5300871; githubId = 5300871;
name = "Leon Kowarschick"; name = "Leon Kowarschick";
}; };
elnudev = {
email = "elnu@elnu.com";
github = "elnudev";
githubId = 9874955;
name = "Elnu";
};
elohmeier = { elohmeier = {
email = "elo-nixos@nerdworks.de"; email = "elo-nixos@nerdworks.de";
github = "elohmeier"; github = "elohmeier";
@ -6870,6 +6882,12 @@
githubId = 310981; githubId = 310981;
name = "Joel Burget"; name = "Joel Burget";
}; };
joelkoen = {
email = "mail@joelkoen.com";
github = "joelkoen";
githubId = 122502655;
name = "Joel Koen";
};
joelmo = { joelmo = {
email = "joel.moberg@gmail.com"; email = "joel.moberg@gmail.com";
github = "joelmo"; github = "joelmo";
@ -7260,12 +7278,6 @@
githubId = 20658981; githubId = 20658981;
name = "Jarosław Wygoda"; name = "Jarosław Wygoda";
}; };
jyooru = {
email = "joel@joel.tokyo";
github = "jyooru";
githubId = 63786778;
name = "Joel";
};
jyp = { jyp = {
email = "jeanphilippe.bernardy@gmail.com"; email = "jeanphilippe.bernardy@gmail.com";
github = "jyp"; github = "jyp";
@ -9425,6 +9437,12 @@
githubId = 1776903; githubId = 1776903;
name = "Andrew Abbott"; name = "Andrew Abbott";
}; };
mislavzanic = {
email = "mislavzanic3@gmail.com";
github = "mislavzanic";
githubId = 48838244;
name = "Mislav Zanic";
};
misterio77 = { misterio77 = {
email = "eu@misterio.me"; email = "eu@misterio.me";
github = "Misterio77"; github = "Misterio77";
@ -9825,6 +9843,16 @@
githubId = 5047140; githubId = 5047140;
name = "Victor Collod"; name = "Victor Collod";
}; };
munksgaard = {
name = "Philip Munksgaard";
email = "philip@munksgaard.me";
github = "munksgaard";
githubId = 230613;
matrix = "@philip:matrix.munksgaard.me";
keys = [{
fingerprint = "5658 4D09 71AF E45F CC29 6BD7 4CE6 2A90 EFC0 B9B2";
}];
};
muscaln = { muscaln = {
email = "muscaln@protonmail.com"; email = "muscaln@protonmail.com";
github = "muscaln"; github = "muscaln";
@ -9932,6 +9960,12 @@
fingerprint = "7A10 AB8E 0BEC 566B 090C 9BE3 D812 6E55 9CE7 C35D"; fingerprint = "7A10 AB8E 0BEC 566B 090C 9BE3 D812 6E55 9CE7 C35D";
}]; }];
}; };
nat-418 = {
email = "93013864+nat-418@users.noreply.github.com";
github = "nat-418";
githubId = 93013864;
name = "nat-418";
};
nathanruiz = { nathanruiz = {
email = "nathanruiz@protonmail.com"; email = "nathanruiz@protonmail.com";
github = "nathanruiz"; github = "nathanruiz";
@ -10742,6 +10776,12 @@
githubId = 15930073; githubId = 15930073;
name = "Moritz Scheuren"; name = "Moritz Scheuren";
}; };
ovlach = {
email = "ondrej@vlach.xyz";
name = "Ondrej Vlach";
github = "ovlach";
githubId = 4405107;
};
ozkutuk = { ozkutuk = {
email = "ozkutuk@protonmail.com"; email = "ozkutuk@protonmail.com";
github = "ozkutuk"; github = "ozkutuk";
@ -11808,6 +11848,12 @@
githubId = 1973389; githubId = 1973389;
name = "Reuben D'Netto"; name = "Reuben D'Netto";
}; };
realsnick = {
name = "Ido Samuelson";
email = "ido.samuelson@gmail.com";
github = "realsnick";
githubId = 1440852;
};
redbaron = { redbaron = {
email = "ivanov.maxim@gmail.com"; email = "ivanov.maxim@gmail.com";
github = "redbaron"; github = "redbaron";
@ -13549,6 +13595,12 @@
githubId = 16734772; githubId = 16734772;
name = "Sumner Evans"; name = "Sumner Evans";
}; };
suominen = {
email = "kimmo@suominen.com";
github = "suominen";
githubId = 1939855;
name = "Kimmo Suominen";
};
superbo = { superbo = {
email = "supernbo@gmail.com"; email = "supernbo@gmail.com";
github = "SuperBo"; github = "SuperBo";
@ -13869,6 +13921,14 @@
github = "tejasag"; github = "tejasag";
githubId = 67542663; githubId = 67542663;
}; };
tejing = {
name = "Jeff Huffman";
email = "tejing@tejing.com";
matrix = "@tejing:matrix.org";
github = "tejing1";
githubId = 5663576;
keys = [{ fingerprint = "6F0F D43B 80E5 583E 60FC 51DC 4936 D067 EB12 AB32"; }];
};
telotortium = { telotortium = {
email = "rirelan@gmail.com"; email = "rirelan@gmail.com";
github = "telotortium"; github = "telotortium";
@ -14378,6 +14438,12 @@
githubId = 52011418; githubId = 52011418;
name = "Travis Davis"; name = "Travis Davis";
}; };
traxys = {
email = "quentin+dev@familleboyer.net";
github = "traxys";
githubId = 5623227;
name = "Quentin Boyer";
};
TredwellGit = { TredwellGit = {
email = "tredwell@tutanota.com"; email = "tredwell@tutanota.com";
github = "TredwellGit"; github = "TredwellGit";
@ -16253,6 +16319,24 @@
githubId = 5228243; githubId = 5228243;
name = "waelwindows"; name = "waelwindows";
}; };
witchof0x20 = {
name = "Jade";
email = "jade@witchof.space";
github = "witchof0x20";
githubId = 36118348;
keys = [{
fingerprint = "69C9 876B 5797 1B2E 11C5 7C39 80A1 F76F C9F9 54AE";
}];
};
WhiteBlackGoose = {
email = "wbg@angouri.org";
github = "WhiteBlackGoose";
githubId = 31178401;
name = "WhiteBlackGoose";
keys = [{
fingerprint = "640B EDDE 9734 310A BFA3 B257 52ED AE6A 3995 AFAB";
}];
};
wuyoli = { wuyoli = {
name = "wuyoli"; name = "wuyoli";
email = "wuyoli@tilde.team"; email = "wuyoli@tilde.team";
@ -16271,4 +16355,16 @@
github = "Detegr"; github = "Detegr";
githubId = 724433; githubId = 724433;
}; };
RossComputerGuy = {
name = "Tristan Ross";
email = "tristan.ross@midstall.com";
github = "RossComputerGuy";
githubId = 19699320;
};
franzmondlichtmann = {
name = "Franz Schroepf";
email = "franz-schroepf@t-online.de";
github = "franzmondlichtmann";
githubId = 105480088;
};
} }

View file

@ -280,6 +280,7 @@ with lib.maintainers; {
kalbasit kalbasit
mic92 mic92
zowoq zowoq
qbit
]; ];
scope = "Maintain Golang compilers."; scope = "Maintain Golang compilers.";
shortName = "Go"; shortName = "Go";

View file

@ -8,7 +8,7 @@ services.openssh.enable = true;
By default, root logins using a password are disallowed. They can be By default, root logins using a password are disallowed. They can be
disabled entirely by setting disabled entirely by setting
[](#opt-services.openssh.permitRootLogin) to `"no"`. [](#opt-services.openssh.settings.PermitRootLogin) to `"no"`.
You can declaratively specify authorised RSA/DSA public keys for a user You can declaratively specify authorised RSA/DSA public keys for a user
as follows: as follows:

View file

@ -40,6 +40,26 @@ file.
$ nix-build nixos/release.nix -A manual.x86_64-linux $ nix-build nixos/release.nix -A manual.x86_64-linux
``` ```
This file should *not* usually be written by hand. Instead it is preferred
to write documentation using CommonMark and converting it to CommonMark
using pandoc. The simplest documentation can be converted using just
```ShellSession
$ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart > doc.xml
```
More elaborate documentation may wish to add one or more of the pandoc
filters used to build the remainder of the manual, for example the GNOME
desktop uses
```ShellSession
$ pandoc gnome.md -t docbook --top-level-division=chapter \
--extract-media=media -f markdown+smart \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \
> gnome.xml
```
- `buildDocsInSandbox` indicates whether the option documentation for the - `buildDocsInSandbox` indicates whether the option documentation for the
module can be built in a derivation sandbox. This option is currently only module can be built in a derivation sandbox. This option is currently only
honored for modules shipped by nixpkgs. User modules and modules taken from honored for modules shipped by nixpkgs. User modules and modules taken from

View file

@ -9,7 +9,7 @@ services.openssh.enable = true;
<para> <para>
By default, root logins using a password are disallowed. They can be By default, root logins using a password are disallowed. They can be
disabled entirely by setting disabled entirely by setting
<xref linkend="opt-services.openssh.permitRootLogin" /> to <xref linkend="opt-services.openssh.settings.PermitRootLogin" /> to
<literal>&quot;no&quot;</literal>. <literal>&quot;no&quot;</literal>.
</para> </para>
<para> <para>

View file

@ -50,6 +50,27 @@
</para> </para>
<programlisting> <programlisting>
$ nix-build nixos/release.nix -A manual.x86_64-linux $ nix-build nixos/release.nix -A manual.x86_64-linux
</programlisting>
<para>
This file should <emphasis>not</emphasis> usually be written by
hand. Instead it is preferred to write documentation using
CommonMark and converting it to CommonMark using pandoc. The
simplest documentation can be converted using just
</para>
<programlisting>
$ pandoc doc.md -t docbook --top-level-division=chapter -f markdown+smart &gt; doc.xml
</programlisting>
<para>
More elaborate documentation may wish to add one or more of the
pandoc filters used to build the remainder of the manual, for
example the GNOME desktop uses
</para>
<programlisting>
$ pandoc gnome.md -t docbook --top-level-division=chapter \
--extract-media=media -f markdown+smart \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/myst-reader/roles.lua \
--lua-filter ../../../../../doc/build-aux/pandoc-filters/docbook-writer/rst-roles.lua \
&gt; gnome.xml
</programlisting> </programlisting>
</listitem> </listitem>
<listitem> <listitem>

View file

@ -10,7 +10,7 @@
In addition to numerous new and upgraded packages, this release In addition to numerous new and upgraded packages, this release
has the following highlights: has the following highlights:
</para> </para>
<itemizedlist spacing="compact"> <itemizedlist>
<listitem> <listitem>
<para> <para>
Cinnamon has been updated to 5.6, see Cinnamon has been updated to 5.6, see
@ -18,6 +18,14 @@
pull request</link> for what is changed. pull request</link> for what is changed.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>nixos-rebuild</literal> now supports an extra
<literal>--specialisation</literal> option that can be used to
change specialisation for <literal>switch</literal> and
<literal>test</literal> commands.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-23.05-new-services"> <section xml:id="sec-release-23.05-new-services">
@ -60,6 +68,13 @@
<link linkend="opt-programs.fzf.fuzzyCompletion">programs.fzf</link>. <link linkend="opt-programs.fzf.fuzzyCompletion">programs.fzf</link>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<link xlink:href="https://github.com/hzeller/gmrender-resurrect">gmediarender</link>,
a simple, headless UPnP/DLNA renderer. Available as
<link xlink:href="options.html#opt-services.gmediarender.enable">services.gmediarender</link>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<link xlink:href="https://github.com/StevenBlack/hosts">stevenblack-blocklist</link>, <link xlink:href="https://github.com/StevenBlack/hosts">stevenblack-blocklist</link>,
@ -99,6 +114,14 @@
<link xlink:href="options.html#opt-services.ulogd.enable">services.ulogd</link>. <link xlink:href="options.html#opt-services.ulogd.enable">services.ulogd</link>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<link xlink:href="https://photoprism.app/">photoprism</link>,
a AI-Powered Photos App for the Decentralized Web. Available
as
<link xlink:href="options.html#opt-services.photoprism.enable">services.photoprism</link>.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-23.05-incompatibilities"> <section xml:id="sec-release-23.05-incompatibilities">
@ -214,6 +237,17 @@
or configure your firewall. or configure your firewall.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>llvmPackages_rocm.llvm</literal> will not contain
<literal>clang</literal> or <literal>compiler-rt</literal>.
<literal>llvmPackages_rocm.clang</literal> will not contain
<literal>llvm</literal>.
<literal>llvmPackages_rocm.clangNoCompilerRt</literal> has
been removed in favor of using
<literal>llvmPackages_rocm.clang-unwrapped</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
The Nginx module now validates the syntax of config files at The Nginx module now validates the syntax of config files at
@ -271,6 +305,14 @@
that it configures the NixOS boot process, not the Nix daemon. that it configures the NixOS boot process, not the Nix daemon.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Deprecated <literal>xlibsWrapper</literal> transitional
package has been removed in favour of direct use of its
constitutents: <literal>xorg.libX11</literal>,
<literal>freetype</literal> and others.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-23.05-notable-changes"> <section xml:id="sec-release-23.05-notable-changes">
@ -305,6 +347,24 @@
<link linkend="opt-services.usbmuxd.package">services.usbmuxd.package</link> <link linkend="opt-services.usbmuxd.package">services.usbmuxd.package</link>
</para> </para>
</listitem> </listitem>
<listitem>
<para>
A few openssh options have been moved from extraConfig to the
new freeform option <literal>settings</literal> and renamed as
follow:
<literal>services.openssh.kbdInteractiveAuthentication</literal>
to
<literal>services.openssh.settings.KbdInteractiveAuthentication</literal>,
<literal>services.openssh.passwordAuthentication</literal> to
<literal>services.openssh.settings.PasswordAuthentication</literal>,
<literal>services.openssh.useDns</literal> to
<literal>services.openssh.settings.UseDns</literal>,
<literal>services.openssh.permitRootLogin</literal> to
<literal>services.openssh.settings.PermitRootLogin</literal>,
<literal>services.openssh.logLevel</literal> to
<literal>services.openssh.settings.LogLevel</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<literal>services.mastodon</literal> gained a tootctl wrapped <literal>services.mastodon</literal> gained a tootctl wrapped
@ -337,6 +397,16 @@
which now also accepts structured settings. which now also accepts structured settings.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The <literal>wordpress</literal> service now takes
configuration via the
<literal>services.wordpress.sites.&lt;name&gt;.settings</literal>
attribute set, <literal>extraConfig</literal> is still
available to append additional text to
<literal>wp-config.php</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
To reduce closure size in To reduce closure size in
@ -444,6 +514,22 @@
dynamically. dynamically.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The <literal>root</literal> package is now built with the
<literal>&quot;-Dgnuinstall=ON&quot;</literal> CMake flag,
making the output conform the <literal>bin</literal>
<literal>lib</literal> <literal>share</literal> layout. In
this layout, <literal>tutorials</literal> is under
<literal>share/doc/ROOT/</literal>; <literal>cmake</literal>,
<literal>font</literal>, <literal>icons</literal>,
<literal>js</literal> and <literal>macro</literal> under
<literal>share/root</literal>;
<literal>Makefile.comp</literal> and
<literal>Makefile.config</literal> under
<literal>etc/root</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Enabling global redirect in Enabling global redirect in
@ -475,6 +561,13 @@
<link xlink:href="options.html#opt-system.stateVersion">system.stateVersion</link>. <link xlink:href="options.html#opt-system.stateVersion">system.stateVersion</link>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>hip</literal> has been separated into
<literal>hip</literal>, <literal>hip-common</literal> and
<literal>hipcc</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Resilio sync secret keys can now be provided using a secrets Resilio sync secret keys can now be provided using a secrets
@ -532,6 +625,13 @@
information about the current generation revision information about the current generation revision
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The option
<literal>services.nomad.extraSettingsPlugins</literal> has
been fixed to allow more than one plugin in the path.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View file

@ -113,6 +113,18 @@
</group> <replaceable>name</replaceable> </group> <replaceable>name</replaceable>
</arg> </arg>
<arg>
<group choice='req'>
<arg choice='plain'>
<option>--specialisation</option>
</arg>
<arg choice='plain'>
<option>-c</option>
</arg>
</group> <replaceable>name</replaceable>
</arg>
<sbr /> <sbr />
<arg> <arg>
@ -204,6 +216,20 @@
<command>nixos-rebuild switch</command> or <command>nixos-rebuild <command>nixos-rebuild switch</command> or <command>nixos-rebuild
boot</command> remain available in the GRUB menu. boot</command> remain available in the GRUB menu.
</para> </para>
<para>
Note that if you are using specializations, running just
<command>nixos-rebuild switch</command> will switch you back to the
unspecialized, base system - in that case, you might want to use this
instead:
<screen>
<prompt>$ </prompt>nixos-rebuild switch --specialisation your-specialisation-name
</screen>
This command will build all specialisations and make them bootable just
like regular <command>nixos-rebuild switch</command> does - the only
thing different is that it will switch to given specialisation instead
of the base system; it can be also used to switch from the base system
into a specialised one, or to switch between specialisations.
</para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -233,6 +259,16 @@
configuration resulting from the last call to <command>nixos-rebuild configuration resulting from the last call to <command>nixos-rebuild
switch</command> or <command>nixos-rebuild boot</command>). switch</command> or <command>nixos-rebuild boot</command>).
</para> </para>
<para>
Note that if you are using specialisations, running just
<command>nixos-rebuild test</command> will activate the unspecialised,
base system - in that case, you might want to use this instead:
<screen>
<prompt>$ </prompt>nixos-rebuild test --specialisation your-specialisation-name
</screen>
This command can be also used to switch from the base system into a
specialised one, or to switch between specialisations.
</para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -499,6 +535,21 @@
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<option>--specialisation</option>
</term>
<term>
<option>-c</option>
</term>
<listitem>
<para>
Activates given specialisation; when not specified, switching and testing
will activate the base, unspecialised system.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<option>--build-host</option> <option>--build-host</option>

View file

@ -50,3 +50,21 @@ for mf in ${MD_FILES[*]}; do
done done
popd popd
# now handle module chapters. we'll need extra checks to ensure that we don't process
# markdown files we're not interested in, so we'll require an x.nix file for ever x.md
# that we'll convert to xml.
pushd "$DIR/../../modules"
mapfile -t MD_FILES < <(find . -type f -regex '.*\.md$')
for mf in ${MD_FILES[*]}; do
[ -f "${mf%.md}.nix" ] || continue
pandoc --top-level-division=chapter "$mf" "${pandoc_flags[@]}" -o "${mf%.md}.xml"
sed -i -e '1 i <!-- Do not edit this file directly, edit its companion .md instead\
and regenerate this file using nixos/doc/manual/md-to-db.sh -->' \
"${mf%.md}.xml"
done
popd

View file

@ -10,6 +10,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed. - Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
- `nixos-rebuild` now supports an extra `--specialisation` option that can be used to change specialisation for `switch` and `test` commands.
## New Services {#sec-release-23.05-new-services} ## New Services {#sec-release-23.05-new-services}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -24,6 +26,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion). - [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
- [stevenblack-blocklist](https://github.com/StevenBlack/hosts), A unified hosts file with base extensions for blocking unwanted websites. Available as [networking.stevenblack](options.html#opt-networking.stevenblack.enable). - [stevenblack-blocklist](https://github.com/StevenBlack/hosts), A unified hosts file with base extensions for blocking unwanted websites. Available as [networking.stevenblack](options.html#opt-networking.stevenblack.enable).
- [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable). - [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable).
@ -34,6 +38,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable). - [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
## Backward Incompatibilities {#sec-release-23.05-incompatibilities} ## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -59,6 +65,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall. - The [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
- The Nginx module now validates the syntax of config files at build time. For more complex configurations (using `include` with out-of-store files notably) you may need to disable this check by setting [services.nginx.validateConfig](#opt-services.nginx.validateConfig) to `false`. - The Nginx module now validates the syntax of config files at build time. For more complex configurations (using `include` with out-of-store files notably) you may need to disable this check by setting [services.nginx.validateConfig](#opt-services.nginx.validateConfig) to `false`.
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2. - The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
@ -71,6 +79,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `nix.readOnlyStore` option has been renamed to `boot.readOnlyNixStore` to clarify that it configures the NixOS boot process, not the Nix daemon. - The `nix.readOnlyStore` option has been renamed to `boot.readOnlyNixStore` to clarify that it configures the NixOS boot process, not the Nix daemon.
- Deprecated `xlibsWrapper` transitional package has been removed in favour of direct use of its constitutents: `xorg.libX11`, `freetype` and others.
## Other Notable Changes {#sec-release-23.05-notable-changes} ## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -81,6 +91,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package) - The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package)
- A few openssh options have been moved from extraConfig to the new freeform option `settings` and renamed as follow: `services.openssh.kbdInteractiveAuthentication` to `services.openssh.settings.KbdInteractiveAuthentication`, `services.openssh.passwordAuthentication` to `services.openssh.settings.PasswordAuthentication`, `services.openssh.useDns` to `services.openssh.settings.UseDns`, `services.openssh.permitRootLogin` to `services.openssh.settings.PermitRootLogin`, `services.openssh.logLevel` to `services.openssh.settings.LogLevel`.
- `services.mastodon` gained a tootctl wrapped named `mastodon-tootctl` similar to `nextcloud-occ` which can be executed from any user and switches to the configured mastodon user with sudo and sources the environment variables. - `services.mastodon` gained a tootctl wrapped named `mastodon-tootctl` similar to `nextcloud-occ` which can be executed from any user and switches to the configured mastodon user with sudo and sources the environment variables.
- The `dnsmasq` service now takes configuration via the - The `dnsmasq` service now takes configuration via the
@ -92,6 +104,8 @@ In addition to numerous new and upgraded packages, this release has the followin
The `{aclUse,superUser,disableActions}` attributes have been renamed, `pluginsConfig` now also accepts an attribute set of booleans, passing plain PHP is deprecated. The `{aclUse,superUser,disableActions}` attributes have been renamed, `pluginsConfig` now also accepts an attribute set of booleans, passing plain PHP is deprecated.
Same applies to `acl` which now also accepts structured settings. Same applies to `acl` which now also accepts structured settings.
- The `wordpress` service now takes configuration via the `services.wordpress.sites.<name>.settings` attribute set, `extraConfig` is still available to append additional text to `wp-config.php`.
- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services. - To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
- The minimal ISO image now uses the `nixos/modules/profiles/minimal.nix` profile. - The minimal ISO image now uses the `nixos/modules/profiles/minimal.nix` profile.
@ -121,12 +135,16 @@ In addition to numerous new and upgraded packages, this release has the followin
- The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically. - The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically.
- The `root` package is now built with the `"-Dgnuinstall=ON"` CMake flag, making the output conform the `bin` `lib` `share` layout. In this layout, `tutorials` is under `share/doc/ROOT/`; `cmake`, `font`, `icons`, `js` and `macro` under `share/root`; `Makefile.comp` and `Makefile.config` under `etc/root`.
- Enabling global redirect in `services.nginx.virtualHosts` now allows one to add exceptions with the `locations` option. - Enabling global redirect in `services.nginx.virtualHosts` now allows one to add exceptions with the `locations` option.
- A new option `recommendedBrotliSettings` has been added to `services.nginx`. Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/blob/master/README.md). - A new option `recommendedBrotliSettings` has been added to `services.nginx`. Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/blob/master/README.md).
- [Garage](https://garagehq.deuxfleurs.fr/) version is based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will keep using version 0.7. New installations will use version 0.8. In order to upgrade a Garage cluster, please follow [upstream instructions](https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/) and force [services.garage.package](options.html#opt-services.garage.package) or upgrade accordingly [system.stateVersion](options.html#opt-system.stateVersion). - [Garage](https://garagehq.deuxfleurs.fr/) version is based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will keep using version 0.7. New installations will use version 0.8. In order to upgrade a Garage cluster, please follow [upstream instructions](https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/) and force [services.garage.package](options.html#opt-services.garage.package) or upgrade accordingly [system.stateVersion](options.html#opt-system.stateVersion).
- `hip` has been separated into `hip`, `hip-common` and `hipcc`.
- Resilio sync secret keys can now be provided using a secrets file at runtime, preventing these secrets from ending up in the Nix store. - Resilio sync secret keys can now be provided using a secrets file at runtime, preventing these secrets from ending up in the Nix store.
- The `firewall` and `nat` module now has a nftables based implementation. Enable `networking.nftables` to use it. - The `firewall` and `nat` module now has a nftables based implementation. Enable `networking.nftables` to use it.
@ -140,3 +158,5 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package. - [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
- `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision - `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision
- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.

View file

@ -33,12 +33,16 @@ with lib;
ffmpeg_4 = super.ffmpeg_4-headless; ffmpeg_4 = super.ffmpeg_4-headless;
ffmpeg_5 = super.ffmpeg_5-headless; ffmpeg_5 = super.ffmpeg_5-headless;
gobject-introspection = super.gobject-introspection.override { x11Support = false; }; gobject-introspection = super.gobject-introspection.override { x11Support = false; };
gst_all_1 = super.gst_all_1 // {
gst-plugins-base = super.gst_all_1.gst-plugins-base.override { enableX11 = false; };
};
gpsd = super.gpsd.override { guiSupport = false; }; gpsd = super.gpsd.override { guiSupport = false; };
imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; }; imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; }; imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
libextractor = super.libextractor.override { gstreamerSupport = false; gtkSupport = false; }; libextractor = super.libextractor.override { gtkSupport = false; };
libva = super.libva-minimal; libva = super.libva-minimal;
limesuite = super.limesuite.override { withGui = false; }; limesuite = super.limesuite.override { withGui = false; };
mpv-unwrapped = super.mpv-unwrapped.override { sdl2Support = false; x11Support = false; };
msmtp = super.msmtp.override { withKeyring = false; }; msmtp = super.msmtp.override { withKeyring = false; };
networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; }; networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; }; networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };

View file

@ -102,9 +102,17 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.variables.QT_QPA_PLATFORMTHEME = cfg.platformTheme; environment.variables = {
QT_QPA_PLATFORMTHEME = cfg.platformTheme;
QT_STYLE_OVERRIDE = mkIf (! (isQt5ct || isLxqt || isKde)) cfg.style;
};
environment.variables.QT_STYLE_OVERRIDE = mkIf (! (isQt5ct || isLxqt || isKde)) cfg.style; environment.profileRelativeSessionVariables = let
qtVersions = with pkgs; [ qt5 qt6 ];
in {
QT_PLUGIN_PATH = map (qt: "/${qt.qtbase.qtPluginPrefix}") qtVersions;
QML2_IMPORT_PATH = map (qt: "/${qt.qtbase.qtQmlPrefix}") qtVersions;
};
environment.systemPackages = packages; environment.systemPackages = packages;

View file

@ -66,7 +66,7 @@ let
device = mkOption { device = mkOption {
example = "/dev/sda3"; example = "/dev/sda3";
type = types.str; type = types.nonEmptyStr;
description = lib.mdDoc "Path of the device or swap file."; description = lib.mdDoc "Path of the device or swap file.";
}; };
@ -197,6 +197,21 @@ in
}; };
config = mkIf ((length config.swapDevices) != 0) { config = mkIf ((length config.swapDevices) != 0) {
assertions = map (sw: {
assertion = sw.randomEncryption.enable -> builtins.match "/dev/disk/by-(uuid|label)/.*" sw.device == null;
message = ''
You cannot use swap device "${sw.device}" with randomEncryption enabled.
The UUIDs and labels will get erased on every boot when the partition is encrypted.
Use /dev/disk/by-partuuid/ instead.
'';
}) config.swapDevices;
warnings =
concatMap (sw:
if sw.size != null && hasPrefix "/dev/" sw.device
then [ "Setting the swap size of block device ${sw.device} has no effect" ]
else [ ])
config.swapDevices;
system.requiredKernelConfig = with config.lib.kernelConfig; [ system.requiredKernelConfig = with config.lib.kernelConfig; [
(isYes "SWAP") (isYes "SWAP")
@ -205,24 +220,27 @@ in
# Create missing swapfiles. # Create missing swapfiles.
systemd.services = systemd.services =
let let
createSwapDevice = sw: createSwapDevice = sw:
assert sw.device != "";
assert !(sw.randomEncryption.enable && lib.hasPrefix "/dev/disk/by-uuid" sw.device);
assert !(sw.randomEncryption.enable && lib.hasPrefix "/dev/disk/by-label" sw.device);
let realDevice' = escapeSystemdPath sw.realDevice; let realDevice' = escapeSystemdPath sw.realDevice;
in nameValuePair "mkswap-${sw.deviceName}" in nameValuePair "mkswap-${sw.deviceName}"
{ description = "Initialisation of swap device ${sw.device}"; { description = "Initialisation of swap device ${sw.device}";
wantedBy = [ "${realDevice'}.swap" ]; wantedBy = [ "${realDevice'}.swap" ];
before = [ "${realDevice'}.swap" ]; before = [ "${realDevice'}.swap" ];
path = [ pkgs.util-linux ] ++ optional sw.randomEncryption.enable pkgs.cryptsetup; path = [ pkgs.util-linux pkgs.e2fsprogs ]
++ optional sw.randomEncryption.enable pkgs.cryptsetup;
environment.DEVICE = sw.device;
script = script =
'' ''
${optionalString (sw.size != null) '' ${optionalString (sw.size != null) ''
currentSize=$(( $(stat -c "%s" "${sw.device}" 2>/dev/null || echo 0) / 1024 / 1024 )) currentSize=$(( $(stat -c "%s" "$DEVICE" 2>/dev/null || echo 0) / 1024 / 1024 ))
if [ "${toString sw.size}" != "$currentSize" ]; then if [[ ! -b "$DEVICE" && "${toString sw.size}" != "$currentSize" ]]; then
dd if=/dev/zero of="${sw.device}" bs=1M count=${toString sw.size} # Disable CoW for CoW based filesystems like BTRFS.
truncate --size 0 "$DEVICE"
chattr +C "$DEVICE" 2>/dev/null || true
dd if=/dev/zero of="$DEVICE" bs=1M count=${toString sw.size}
chmod 0600 ${sw.device} chmod 0600 ${sw.device}
${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"} ${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"}
fi fi

View file

@ -110,21 +110,26 @@ in {
}; };
config = mkIf (cfg.ensurePrinters != [] && config.services.printing.enable) { config = mkIf (cfg.ensurePrinters != [] && config.services.printing.enable) {
systemd.services.ensure-printers = let systemd.services.ensure-printers = {
cupsUnit = if config.services.printing.startWhenNeeded then "cups.socket" else "cups.service";
in {
description = "Ensure NixOS-configured CUPS printers"; description = "Ensure NixOS-configured CUPS printers";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
requires = [ cupsUnit ]; wants = [ "cups.service" ];
after = [ cupsUnit ]; after = [ "cups.service" ];
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
RemainAfterExit = true; RemainAfterExit = true;
}; };
script = concatMapStringsSep "\n" ensurePrinter cfg.ensurePrinters script = concatStringsSep "\n" [
+ optionalString (cfg.ensureDefaultPrinter != null) (ensureDefaultPrinter cfg.ensureDefaultPrinter); (concatMapStrings ensurePrinter cfg.ensurePrinters)
(optionalString (cfg.ensureDefaultPrinter != null)
(ensureDefaultPrinter cfg.ensureDefaultPrinter))
# Note: if cupsd is "stateless" the service can't be stopped,
# otherwise the configuration will be wiped on the next start.
(optionalString (with config.services.printing; startWhenNeeded && !stateless)
"systemctl stop cups.service")
];
}; };
}; };
} }

View file

@ -0,0 +1,158 @@
# Input Methods {#module-services-input-methods}
Input methods are an operating system component that allows any data, such as
keyboard strokes or mouse movements, to be received as input. In this way
users can enter characters and symbols not found on their input devices.
Using an input method is obligatory for any language that has more graphemes
than there are keys on the keyboard.
The following input methods are available in NixOS:
- IBus: The intelligent input bus.
- Fcitx: A customizable lightweight input method.
- Nabi: A Korean input method based on XIM.
- Uim: The universal input method, is a library with a XIM bridge.
- Hime: An extremely easy-to-use input method framework.
- Kime: Korean IME
## IBus {#module-services-input-methods-ibus}
IBus is an Intelligent Input Bus. It provides full featured and user
friendly input method user interface.
The following snippet can be used to configure IBus:
```
i18n.inputMethod = {
enabled = "ibus";
ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
};
```
`i18n.inputMethod.ibus.engines` is optional and can be used
to add extra IBus engines.
Available extra IBus engines are:
- Anthy (`ibus-engines.anthy`): Anthy is a system for
Japanese input method. It converts Hiragana text to Kana Kanji mixed text.
- Hangul (`ibus-engines.hangul`): Korean input method.
- m17n (`ibus-engines.m17n`): m17n is an input method that
uses input methods and corresponding icons in the m17n database.
- mozc (`ibus-engines.mozc`): A Japanese input method from
Google.
- Table (`ibus-engines.table`): An input method that load
tables of input methods.
- table-others (`ibus-engines.table-others`): Various
table-based input methods. To use this, and any other table-based input
methods, it must appear in the list of engines along with
`table`. For example:
```
ibus.engines = with pkgs.ibus-engines; [ table table-others ];
```
To use any input method, the package must be added in the configuration, as
shown above, and also (after running `nixos-rebuild`) the
input method must be added from IBus' preference dialog.
### Troubleshooting {#module-services-input-methods-troubleshooting}
If IBus works in some applications but not others, a likely cause of this
is that IBus is depending on a different version of `glib`
to what the applications are depending on. This can be checked by running
`nix-store -q --requisites <path> | grep glib`,
where `<path>` is the path of either IBus or an
application in the Nix store. The `glib` packages must
match exactly. If they do not, uninstalling and reinstalling the
application is a likely fix.
## Fcitx {#module-services-input-methods-fcitx}
Fcitx is an input method framework with extension support. It has three
built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
The following snippet can be used to configure Fcitx:
```
i18n.inputMethod = {
enabled = "fcitx";
fcitx.engines = with pkgs.fcitx-engines; [ mozc hangul m17n ];
};
```
`i18n.inputMethod.fcitx.engines` is optional and can be
used to add extra Fcitx engines.
Available extra Fcitx engines are:
- Anthy (`fcitx-engines.anthy`): Anthy is a system for
Japanese input method. It converts Hiragana text to Kana Kanji mixed text.
- Chewing (`fcitx-engines.chewing`): Chewing is an
intelligent Zhuyin input method. It is one of the most popular input
methods among Traditional Chinese Unix users.
- Hangul (`fcitx-engines.hangul`): Korean input method.
- Unikey (`fcitx-engines.unikey`): Vietnamese input method.
- m17n (`fcitx-engines.m17n`): m17n is an input method that
uses input methods and corresponding icons in the m17n database.
- mozc (`fcitx-engines.mozc`): A Japanese input method from
Google.
- table-others (`fcitx-engines.table-others`): Various
table-based input methods.
## Nabi {#module-services-input-methods-nabi}
Nabi is an easy to use Korean X input method. It allows you to enter
phonetic Korean characters (hangul) and pictographic Korean characters
(hanja).
The following snippet can be used to configure Nabi:
```
i18n.inputMethod = {
enabled = "nabi";
};
```
## Uim {#module-services-input-methods-uim}
Uim (short for "universal input method") is a multilingual input method
framework. Applications can use it through so-called bridges.
The following snippet can be used to configure uim:
```
i18n.inputMethod = {
enabled = "uim";
};
```
Note: The [](#opt-i18n.inputMethod.uim.toolbar) option can be
used to choose uim toolbar.
## Hime {#module-services-input-methods-hime}
Hime is an extremely easy-to-use input method framework. It is lightweight,
stable, powerful and supports many commonly used input methods, including
Cangjie, Zhuyin, Dayi, Rank, Shrimp, Greek, Korean Pinyin, Latin Alphabet,
etc...
The following snippet can be used to configure Hime:
```
i18n.inputMethod = {
enabled = "hime";
};
```
## Kime {#module-services-input-methods-kime}
Kime is Korean IME. it's built with Rust language and let you get simple, safe, fast Korean typing
The following snippet can be used to configure Kime:
```
i18n.inputMethod = {
enabled = "kime";
};
```

View file

@ -1,291 +1,275 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-input-methods">
version="5.0" <title>Input Methods</title>
xml:id="module-services-input-methods"> <para>
<title>Input Methods</title> Input methods are an operating system component that allows any
<para> data, such as keyboard strokes or mouse movements, to be received as
Input methods are an operating system component that allows any data, such as input. In this way users can enter characters and symbols not found
keyboard strokes or mouse movements, to be received as input. In this way on their input devices. Using an input method is obligatory for any
users can enter characters and symbols not found on their input devices. language that has more graphemes than there are keys on the
Using an input method is obligatory for any language that has more graphemes keyboard.
than there are keys on the keyboard. </para>
</para> <para>
<para> The following input methods are available in NixOS:
The following input methods are available in NixOS: </para>
</para> <itemizedlist spacing="compact">
<itemizedlist> <listitem>
<listitem> <para>
<para> IBus: The intelligent input bus.
IBus: The intelligent input bus. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> Fcitx: A customizable lightweight input method.
Fcitx: A customizable lightweight input method. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> Nabi: A Korean input method based on XIM.
Nabi: A Korean input method based on XIM. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> Uim: The universal input method, is a library with a XIM bridge.
Uim: The universal input method, is a library with a XIM bridge. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> Hime: An extremely easy-to-use input method framework.
Hime: An extremely easy-to-use input method framework. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
Kime: Korean IME
</para>
</listitem>
</itemizedlist>
<section xml:id="module-services-input-methods-ibus">
<title>IBus</title>
<para> <para>
Kime: Korean IME IBus is an Intelligent Input Bus. It provides full featured and
user friendly input method user interface.
</para> </para>
</listitem> <para>
</itemizedlist> The following snippet can be used to configure IBus:
<section xml:id="module-services-input-methods-ibus"> </para>
<title>IBus</title> <programlisting>
<para>
IBus is an Intelligent Input Bus. It provides full featured and user
friendly input method user interface.
</para>
<para>
The following snippet can be used to configure IBus:
</para>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "ibus"; enabled = &quot;ibus&quot;;
<link linkend="opt-i18n.inputMethod.ibus.engines">ibus.engines</link> = with pkgs.ibus-engines; [ anthy hangul mozc ]; ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
}; };
</programlisting> </programlisting>
<para>
<literal>i18n.inputMethod.ibus.engines</literal> is optional and can be used
to add extra IBus engines.
</para>
<para>
Available extra IBus engines are:
</para>
<itemizedlist>
<listitem>
<para> <para>
Anthy (<literal>ibus-engines.anthy</literal>): Anthy is a system for <literal>i18n.inputMethod.ibus.engines</literal> is optional and
Japanese input method. It converts Hiragana text to Kana Kanji mixed text. can be used to add extra IBus engines.
</para> </para>
</listitem>
<listitem>
<para> <para>
Hangul (<literal>ibus-engines.hangul</literal>): Korean input method. Available extra IBus engines are:
</para> </para>
</listitem> <itemizedlist>
<listitem> <listitem>
<para> <para>
m17n (<literal>ibus-engines.m17n</literal>): m17n is an input method that Anthy (<literal>ibus-engines.anthy</literal>): Anthy is a
uses input methods and corresponding icons in the m17n database. system for Japanese input method. It converts Hiragana text to
</para> Kana Kanji mixed text.
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
mozc (<literal>ibus-engines.mozc</literal>): A Japanese input method from <para>
Google. Hangul (<literal>ibus-engines.hangul</literal>): Korean input
</para> method.
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
Table (<literal>ibus-engines.table</literal>): An input method that load <para>
tables of input methods. m17n (<literal>ibus-engines.m17n</literal>): m17n is an input
</para> method that uses input methods and corresponding icons in the
</listitem> m17n database.
<listitem> </para>
<para> </listitem>
table-others (<literal>ibus-engines.table-others</literal>): Various <listitem>
table-based input methods. To use this, and any other table-based input <para>
methods, it must appear in the list of engines along with mozc (<literal>ibus-engines.mozc</literal>): A Japanese input
<literal>table</literal>. For example: method from Google.
<programlisting> </para>
</listitem>
<listitem>
<para>
Table (<literal>ibus-engines.table</literal>): An input method
that load tables of input methods.
</para>
</listitem>
<listitem>
<para>
table-others (<literal>ibus-engines.table-others</literal>):
Various table-based input methods. To use this, and any other
table-based input methods, it must appear in the list of
engines along with <literal>table</literal>. For example:
</para>
<programlisting>
ibus.engines = with pkgs.ibus-engines; [ table table-others ]; ibus.engines = with pkgs.ibus-engines; [ table table-others ];
</programlisting> </programlisting>
</listitem>
</itemizedlist>
<para>
To use any input method, the package must be added in the
configuration, as shown above, and also (after running
<literal>nixos-rebuild</literal>) the input method must be added
from IBus preference dialog.
</para> </para>
</listitem> <section xml:id="module-services-input-methods-troubleshooting">
</itemizedlist> <title>Troubleshooting</title>
<para>
<para> If IBus works in some applications but not others, a likely
To use any input method, the package must be added in the configuration, as cause of this is that IBus is depending on a different version
shown above, and also (after running <literal>nixos-rebuild</literal>) the of <literal>glib</literal> to what the applications are
input method must be added from IBus' preference dialog. depending on. This can be checked by running
</para> <literal>nix-store -q --requisites &lt;path&gt; | grep glib</literal>,
where <literal>&lt;path&gt;</literal> is the path of either IBus
<simplesect xml:id="module-services-input-methods-troubleshooting"> or an application in the Nix store. The <literal>glib</literal>
<title>Troubleshooting</title> packages must match exactly. If they do not, uninstalling and
<para> reinstalling the application is a likely fix.
If IBus works in some applications but not others, a likely cause of this </para>
is that IBus is depending on a different version of <literal>glib</literal> </section>
to what the applications are depending on. This can be checked by running </section>
<literal>nix-store -q --requisites &lt;path&gt; | grep glib</literal>, <section xml:id="module-services-input-methods-fcitx">
where <literal>&lt;path&gt;</literal> is the path of either IBus or an <title>Fcitx</title>
application in the Nix store. The <literal>glib</literal> packages must <para>
match exactly. If they do not, uninstalling and reinstalling the Fcitx is an input method framework with extension support. It has
application is a likely fix. three built-in Input Method Engine, Pinyin, QuWei and Table-based
</para> input methods.
</simplesect> </para>
</section> <para>
<section xml:id="module-services-input-methods-fcitx"> The following snippet can be used to configure Fcitx:
<title>Fcitx</title> </para>
<programlisting>
<para>
Fcitx is an input method framework with extension support. It has three
built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
</para>
<para>
The following snippet can be used to configure Fcitx:
</para>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "fcitx"; enabled = &quot;fcitx&quot;;
<link linkend="opt-i18n.inputMethod.fcitx.engines">fcitx.engines</link> = with pkgs.fcitx-engines; [ mozc hangul m17n ]; fcitx.engines = with pkgs.fcitx-engines; [ mozc hangul m17n ];
}; };
</programlisting> </programlisting>
<para>
<literal>i18n.inputMethod.fcitx.engines</literal> is optional and can be
used to add extra Fcitx engines.
</para>
<para>
Available extra Fcitx engines are:
</para>
<itemizedlist>
<listitem>
<para> <para>
Anthy (<literal>fcitx-engines.anthy</literal>): Anthy is a system for <literal>i18n.inputMethod.fcitx.engines</literal> is optional and
Japanese input method. It converts Hiragana text to Kana Kanji mixed text. can be used to add extra Fcitx engines.
</para> </para>
</listitem>
<listitem>
<para> <para>
Chewing (<literal>fcitx-engines.chewing</literal>): Chewing is an Available extra Fcitx engines are:
intelligent Zhuyin input method. It is one of the most popular input
methods among Traditional Chinese Unix users.
</para> </para>
</listitem> <itemizedlist spacing="compact">
<listitem> <listitem>
<para>
Anthy (<literal>fcitx-engines.anthy</literal>): Anthy is a
system for Japanese input method. It converts Hiragana text to
Kana Kanji mixed text.
</para>
</listitem>
<listitem>
<para>
Chewing (<literal>fcitx-engines.chewing</literal>): Chewing is
an intelligent Zhuyin input method. It is one of the most
popular input methods among Traditional Chinese Unix users.
</para>
</listitem>
<listitem>
<para>
Hangul (<literal>fcitx-engines.hangul</literal>): Korean input
method.
</para>
</listitem>
<listitem>
<para>
Unikey (<literal>fcitx-engines.unikey</literal>): Vietnamese
input method.
</para>
</listitem>
<listitem>
<para>
m17n (<literal>fcitx-engines.m17n</literal>): m17n is an input
method that uses input methods and corresponding icons in the
m17n database.
</para>
</listitem>
<listitem>
<para>
mozc (<literal>fcitx-engines.mozc</literal>): A Japanese input
method from Google.
</para>
</listitem>
<listitem>
<para>
table-others (<literal>fcitx-engines.table-others</literal>):
Various table-based input methods.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-input-methods-nabi">
<title>Nabi</title>
<para> <para>
Hangul (<literal>fcitx-engines.hangul</literal>): Korean input method. Nabi is an easy to use Korean X input method. It allows you to
enter phonetic Korean characters (hangul) and pictographic Korean
characters (hanja).
</para> </para>
</listitem>
<listitem>
<para> <para>
Unikey (<literal>fcitx-engines.unikey</literal>): Vietnamese input method. The following snippet can be used to configure Nabi:
</para> </para>
</listitem> <programlisting>
<listitem>
<para>
m17n (<literal>fcitx-engines.m17n</literal>): m17n is an input method that
uses input methods and corresponding icons in the m17n database.
</para>
</listitem>
<listitem>
<para>
mozc (<literal>fcitx-engines.mozc</literal>): A Japanese input method from
Google.
</para>
</listitem>
<listitem>
<para>
table-others (<literal>fcitx-engines.table-others</literal>): Various
table-based input methods.
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-input-methods-nabi">
<title>Nabi</title>
<para>
Nabi is an easy to use Korean X input method. It allows you to enter
phonetic Korean characters (hangul) and pictographic Korean characters
(hanja).
</para>
<para>
The following snippet can be used to configure Nabi:
</para>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "nabi"; enabled = &quot;nabi&quot;;
}; };
</programlisting> </programlisting>
</section> </section>
<section xml:id="module-services-input-methods-uim"> <section xml:id="module-services-input-methods-uim">
<title>Uim</title> <title>Uim</title>
<para>
<para> Uim (short for <quote>universal input method</quote>) is a
Uim (short for "universal input method") is a multilingual input method multilingual input method framework. Applications can use it
framework. Applications can use it through so-called bridges. through so-called bridges.
</para> </para>
<para>
<para> The following snippet can be used to configure uim:
The following snippet can be used to configure uim: </para>
</para> <programlisting>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "uim"; enabled = &quot;uim&quot;;
}; };
</programlisting> </programlisting>
<para>
<para> Note: The <xref linkend="opt-i18n.inputMethod.uim.toolbar" />
Note: The <xref linkend="opt-i18n.inputMethod.uim.toolbar"/> option can be option can be used to choose uim toolbar.
used to choose uim toolbar. </para>
</para> </section>
</section> <section xml:id="module-services-input-methods-hime">
<section xml:id="module-services-input-methods-hime"> <title>Hime</title>
<title>Hime</title> <para>
Hime is an extremely easy-to-use input method framework. It is
<para> lightweight, stable, powerful and supports many commonly used
Hime is an extremely easy-to-use input method framework. It is lightweight, input methods, including Cangjie, Zhuyin, Dayi, Rank, Shrimp,
stable, powerful and supports many commonly used input methods, including Greek, Korean Pinyin, Latin Alphabet, etc…
Cangjie, Zhuyin, Dayi, Rank, Shrimp, Greek, Korean Pinyin, Latin Alphabet, </para>
etc... <para>
</para> The following snippet can be used to configure Hime:
</para>
<para> <programlisting>
The following snippet can be used to configure Hime:
</para>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "hime"; enabled = &quot;hime&quot;;
}; };
</programlisting> </programlisting>
</section> </section>
<section xml:id="module-services-input-methods-kime"> <section xml:id="module-services-input-methods-kime">
<title>Kime</title> <title>Kime</title>
<para>
<para> Kime is Korean IME. its built with Rust language and let you get
Kime is Korean IME. it's built with Rust language and let you get simple, safe, fast Korean typing simple, safe, fast Korean typing
</para> </para>
<para>
<para> The following snippet can be used to configure Kime:
The following snippet can be used to configure Kime: </para>
</para> <programlisting>
<programlisting>
i18n.inputMethod = { i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "kime"; enabled = &quot;kime&quot;;
}; };
</programlisting> </programlisting>
</section> </section>
</chapter> </chapter>

View file

@ -14,6 +14,10 @@
documentation.man.enable = lib.mkOverride 500 true; documentation.man.enable = lib.mkOverride 500 true;
# Although we don't really need HTML documentation in the minimal installer,
# not including it may cause annoying cache misses in the case of the NixOS manual.
documentation.doc.enable = lib.mkOverride 500 true;
fonts.fontconfig.enable = lib.mkForce false; fonts.fontconfig.enable = lib.mkForce false;
isoImage.edition = lib.mkForce "minimal"; isoImage.edition = lib.mkForce "minimal";

View file

@ -52,7 +52,7 @@ let
buildMenuAdditionalParamsGrub2 = additional: buildMenuAdditionalParamsGrub2 = additional:
let let
finalCfg = { finalCfg = {
name = "NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel}"; name = "${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel}";
params = "init=${config.system.build.toplevel}/init ${additional} ${toString config.boot.kernelParams}"; params = "init=${config.system.build.toplevel}/init ${additional} ${toString config.boot.kernelParams}";
image = "/boot/${config.system.boot.loader.kernelFile}"; image = "/boot/${config.system.boot.loader.kernelFile}";
initrd = "/boot/initrd"; initrd = "/boot/initrd";
@ -109,35 +109,35 @@ let
DEFAULT boot DEFAULT boot
LABEL boot LABEL boot
MENU LABEL NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} MENU LABEL ${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel}
LINUX /boot/${config.system.boot.loader.kernelFile} LINUX /boot/${config.system.boot.loader.kernelFile}
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
INITRD /boot/${config.system.boot.loader.initrdFile} INITRD /boot/${config.system.boot.loader.initrdFile}
# A variant to boot with 'nomodeset' # A variant to boot with 'nomodeset'
LABEL boot-nomodeset LABEL boot-nomodeset
MENU LABEL NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (nomodeset) MENU LABEL ${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (nomodeset)
LINUX /boot/${config.system.boot.loader.kernelFile} LINUX /boot/${config.system.boot.loader.kernelFile}
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
INITRD /boot/${config.system.boot.loader.initrdFile} INITRD /boot/${config.system.boot.loader.initrdFile}
# A variant to boot with 'copytoram' # A variant to boot with 'copytoram'
LABEL boot-copytoram LABEL boot-copytoram
MENU LABEL NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (copytoram) MENU LABEL ${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (copytoram)
LINUX /boot/${config.system.boot.loader.kernelFile} LINUX /boot/${config.system.boot.loader.kernelFile}
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
INITRD /boot/${config.system.boot.loader.initrdFile} INITRD /boot/${config.system.boot.loader.initrdFile}
# A variant to boot with verbose logging to the console # A variant to boot with verbose logging to the console
LABEL boot-debug LABEL boot-debug
MENU LABEL NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (debug) MENU LABEL ${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (debug)
LINUX /boot/${config.system.boot.loader.kernelFile} LINUX /boot/${config.system.boot.loader.kernelFile}
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7 APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
INITRD /boot/${config.system.boot.loader.initrdFile} INITRD /boot/${config.system.boot.loader.initrdFile}
# A variant to boot with a serial console enabled # A variant to boot with a serial console enabled
LABEL boot-serial LABEL boot-serial
MENU LABEL NixOS ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (serial console=ttyS0,115200n8) MENU LABEL ${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (serial console=ttyS0,115200n8)
LINUX /boot/${config.system.boot.loader.kernelFile} LINUX /boot/${config.system.boot.loader.kernelFile}
APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} console=ttyS0,115200n8 APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} console=ttyS0,115200n8
INITRD /boot/${config.system.boot.loader.initrdFile} INITRD /boot/${config.system.boot.loader.initrdFile}
@ -458,7 +458,7 @@ in
}; };
isoImage.isoBaseName = mkOption { isoImage.isoBaseName = mkOption {
default = "nixos"; default = config.system.nixos.distroId;
description = lib.mdDoc '' description = lib.mdDoc ''
Prefix of the name of the generated ISO image file. Prefix of the name of the generated ISO image file.
''; '';
@ -579,7 +579,7 @@ in
isoImage.syslinuxTheme = mkOption { isoImage.syslinuxTheme = mkOption {
default = '' default = ''
MENU TITLE NixOS MENU TITLE ${config.system.nixos.distroName}
MENU RESOLUTION 800 600 MENU RESOLUTION 800 600
MENU CLEAR MENU CLEAR
MENU ROWS 6 MENU ROWS 6

11
third_party/nixpkgs/nixos/modules/installer/tools/nixos-install.sh vendored Normal file → Executable file
View file

@ -188,6 +188,17 @@ nix-env --store "$mountPoint" "${extraBuildFlags[@]}" \
mkdir -m 0755 -p "$mountPoint/etc" mkdir -m 0755 -p "$mountPoint/etc"
touch "$mountPoint/etc/NIXOS" touch "$mountPoint/etc/NIXOS"
# Create a bind mount for each of the mount points inside the target file
# system. This preserves the validity of their absolute paths after changing
# the root with `nixos-enter`.
# Without this the bootloader installation may fail due to options that
# contain paths referenced during evaluation, like initrd.secrets.
if (( EUID == 0 )); then
mount --rbind --mkdir "$mountPoint" "$mountPoint$mountPoint"
mount --make-rslave "$mountPoint$mountPoint"
trap 'umount -R "$mountPoint$mountPoint" && rmdir "$mountPoint$mountPoint"' EXIT
fi
# Switch to the new system configuration. This will install Grub with # Switch to the new system configuration. This will install Grub with
# a menu default pointing at the kernel/initrd/etc of the new # a menu default pointing at the kernel/initrd/etc of the new
# configuration. # configuration.

View file

@ -235,6 +235,8 @@ in
nixos-enter nixos-enter
] ++ lib.optional (nixos-option != null) nixos-option; ] ++ lib.optional (nixos-option != null) nixos-option;
documentation.man.man-db.skipPackages = [ nixos-version ];
system.build = { system.build = {
inherit nixos-install nixos-generate-config nixos-option nixos-rebuild nixos-enter; inherit nixos-install nixos-generate-config nixos-option nixos-rebuild nixos-enter;
}; };

View file

@ -13,11 +13,21 @@ in
example = false; example = false;
}; };
skipPackages = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = [];
internal = true;
description = lib.mdDoc ''
Packages to *not* include in the man-db.
This can be useful to avoid unnecessary rebuilds due to packages that change frequently, like nixos-version.
'';
};
manualPages = lib.mkOption { manualPages = lib.mkOption {
type = lib.types.path; type = lib.types.path;
default = pkgs.buildEnv { default = pkgs.buildEnv {
name = "man-paths"; name = "man-paths";
paths = config.environment.systemPackages; paths = lib.subtractLists cfg.skipPackages config.environment.systemPackages;
pathsToLink = [ "/share/man" ]; pathsToLink = [ "/share/man" ];
extraOutputsToInstall = [ "man" ] extraOutputsToInstall = [ "man" ]
++ lib.optionals config.documentation.dev.enable [ "devman" ]; ++ lib.optionals config.documentation.dev.enable [ "devman" ];

View file

@ -16,18 +16,18 @@ let
) + "\n"; ) + "\n";
osReleaseContents = { osReleaseContents = {
NAME = "NixOS"; NAME = "${cfg.distroName}";
ID = "nixos"; ID = "${cfg.distroId}";
VERSION = "${cfg.release} (${cfg.codeName})"; VERSION = "${cfg.release} (${cfg.codeName})";
VERSION_CODENAME = toLower cfg.codeName; VERSION_CODENAME = toLower cfg.codeName;
VERSION_ID = cfg.release; VERSION_ID = cfg.release;
BUILD_ID = cfg.version; BUILD_ID = cfg.version;
PRETTY_NAME = "NixOS ${cfg.release} (${cfg.codeName})"; PRETTY_NAME = "${cfg.distroName} ${cfg.release} (${cfg.codeName})";
LOGO = "nix-snowflake"; LOGO = "nix-snowflake";
HOME_URL = "https://nixos.org/"; HOME_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/";
DOCUMENTATION_URL = "https://nixos.org/learn.html"; DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html";
SUPPORT_URL = "https://nixos.org/community.html"; SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html";
BUG_REPORT_URL = "https://github.com/NixOS/nixpkgs/issues"; BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues";
} // lib.optionalAttrs (cfg.variant_id != null) { } // lib.optionalAttrs (cfg.variant_id != null) {
VARIANT_ID = cfg.variant_id; VARIANT_ID = cfg.variant_id;
}; };
@ -89,6 +89,20 @@ in
description = lib.mdDoc "The NixOS release code name (e.g. `Emu`)."; description = lib.mdDoc "The NixOS release code name (e.g. `Emu`).";
}; };
nixos.distroId = mkOption {
internal = true;
type = types.str;
default = "nixos";
description = lib.mdDoc "The id of the operating system";
};
nixos.distroName = mkOption {
internal = true;
type = types.str;
default = "NixOS";
description = lib.mdDoc "The name of the operating system";
};
nixos.variant_id = mkOption { nixos.variant_id = mkOption {
type = types.nullOr (types.strMatching "^[a-z0-9._-]+$"); type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
default = null; default = null;
@ -155,10 +169,10 @@ in
environment.etc = { environment.etc = {
"lsb-release".text = attrsToText { "lsb-release".text = attrsToText {
LSB_VERSION = "${cfg.release} (${cfg.codeName})"; LSB_VERSION = "${cfg.release} (${cfg.codeName})";
DISTRIB_ID = "nixos"; DISTRIB_ID = "${cfg.distroId}";
DISTRIB_RELEASE = cfg.release; DISTRIB_RELEASE = cfg.release;
DISTRIB_CODENAME = toLower cfg.codeName; DISTRIB_CODENAME = toLower cfg.codeName;
DISTRIB_DESCRIPTION = "NixOS ${cfg.release} (${cfg.codeName})"; DISTRIB_DESCRIPTION = "${cfg.distroName} ${cfg.release} (${cfg.codeName})";
}; };
"os-release".text = attrsToText osReleaseContents; "os-release".text = attrsToText osReleaseContents;

View file

@ -295,6 +295,7 @@
./services/amqp/rabbitmq.nix ./services/amqp/rabbitmq.nix
./services/audio/alsa.nix ./services/audio/alsa.nix
./services/audio/botamusique.nix ./services/audio/botamusique.nix
./services/audio/gmediarender.nix
./services/audio/hqplayerd.nix ./services/audio/hqplayerd.nix
./services/audio/icecast.nix ./services/audio/icecast.nix
./services/audio/jack.nix ./services/audio/jack.nix
@ -559,6 +560,7 @@
./services/matrix/mautrix-facebook.nix ./services/matrix/mautrix-facebook.nix
./services/matrix/mautrix-telegram.nix ./services/matrix/mautrix-telegram.nix
./services/matrix/mjolnir.nix ./services/matrix/mjolnir.nix
./services/matrix/mx-puppet-discord.nix
./services/matrix/pantalaimon.nix ./services/matrix/pantalaimon.nix
./services/matrix/synapse.nix ./services/matrix/synapse.nix
./services/misc/airsonic.nix ./services/misc/airsonic.nix
@ -626,7 +628,6 @@
./services/misc/mediatomb.nix ./services/misc/mediatomb.nix
./services/misc/metabase.nix ./services/misc/metabase.nix
./services/misc/moonraker.nix ./services/misc/moonraker.nix
./services/misc/mx-puppet-discord.nix
./services/misc/n8n.nix ./services/misc/n8n.nix
./services/misc/nitter.nix ./services/misc/nitter.nix
./services/misc/nix-daemon.nix ./services/misc/nix-daemon.nix
@ -1165,6 +1166,7 @@
./services/web-apps/peertube.nix ./services/web-apps/peertube.nix
./services/web-apps/pgpkeyserver-lite.nix ./services/web-apps/pgpkeyserver-lite.nix
./services/web-apps/phylactery.nix ./services/web-apps/phylactery.nix
./services/web-apps/photoprism.nix
./services/web-apps/pict-rs.nix ./services/web-apps/pict-rs.nix
./services/web-apps/plantuml-server.nix ./services/web-apps/plantuml-server.nix
./services/web-apps/plausible.nix ./services/web-apps/plausible.nix
@ -1302,6 +1304,8 @@
./system/boot/systemd/shutdown.nix ./system/boot/systemd/shutdown.nix
./system/boot/systemd/tmpfiles.nix ./system/boot/systemd/tmpfiles.nix
./system/boot/systemd/user.nix ./system/boot/systemd/user.nix
./system/boot/systemd/userdbd.nix
./system/boot/systemd/homed.nix
./system/boot/timesyncd.nix ./system/boot/timesyncd.nix
./system/boot/tmp.nix ./system/boot/tmp.nix
./system/boot/uvesafb.nix ./system/boot/uvesafb.nix

View file

@ -35,6 +35,7 @@
pkgs.rsync pkgs.rsync
pkgs.socat pkgs.socat
pkgs.screen pkgs.screen
pkgs.tcpdump
# Hardware-related tools. # Hardware-related tools.
pkgs.sdparm pkgs.sdparm

View file

@ -72,7 +72,7 @@ with lib;
# mounting the storage in a different system. # mounting the storage in a different system.
services.openssh = { services.openssh = {
enable = true; enable = true;
permitRootLogin = "yes"; settings.PermitRootLogin = "yes";
}; };
# Enable wpa_supplicant, but don't start it by default. # Enable wpa_supplicant, but don't start it by default.

View file

@ -0,0 +1,47 @@
# Digital Bitbox {#module-programs-digitalbitbox}
Digital Bitbox is a hardware wallet and second-factor authenticator.
The `digitalbitbox` programs module may be installed by setting
`programs.digitalbitbox` to `true` in a manner similar to
```
programs.digitalbitbox.enable = true;
```
and bundles the `digitalbitbox` package (see [](#sec-digitalbitbox-package)),
which contains the `dbb-app` and `dbb-cli` binaries, along with the hardware
module (see [](#sec-digitalbitbox-hardware-module)) which sets up the necessary
udev rules to access the device.
Enabling the digitalbitbox module is pretty much the easiest way to get a
Digital Bitbox device working on your system.
For more information, see <https://digitalbitbox.com/start_linux>.
## Package {#sec-digitalbitbox-package}
The binaries, `dbb-app` (a GUI tool) and `dbb-cli` (a CLI tool), are available
through the `digitalbitbox` package which could be installed as follows:
```
environment.systemPackages = [
pkgs.digitalbitbox
];
```
## Hardware {#sec-digitalbitbox-hardware-module}
The digitalbitbox hardware package enables the udev rules for Digital Bitbox
devices and may be installed as follows:
```
hardware.digitalbitbox.enable = true;
```
In order to alter the udev rules, one may provide different values for the
`udevRule51` and `udevRule52` attributes by means of overriding as follows:
```
programs.digitalbitbox = {
enable = true;
package = pkgs.digitalbitbox.override {
udevRule51 = "something else";
};
};
```

View file

@ -33,7 +33,7 @@ in
}; };
meta = { meta = {
doc = ./doc.xml; doc = ./default.xml;
maintainers = with lib.maintainers; [ vidbina ]; maintainers = with lib.maintainers; [ vidbina ];
}; };
} }

View file

@ -0,0 +1,70 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-programs-digitalbitbox">
<title>Digital Bitbox</title>
<para>
Digital Bitbox is a hardware wallet and second-factor authenticator.
</para>
<para>
The <literal>digitalbitbox</literal> programs module may be
installed by setting <literal>programs.digitalbitbox</literal> to
<literal>true</literal> in a manner similar to
</para>
<programlisting>
programs.digitalbitbox.enable = true;
</programlisting>
<para>
and bundles the <literal>digitalbitbox</literal> package (see
<xref linkend="sec-digitalbitbox-package" />), which contains the
<literal>dbb-app</literal> and <literal>dbb-cli</literal> binaries,
along with the hardware module (see
<xref linkend="sec-digitalbitbox-hardware-module" />) which sets up
the necessary udev rules to access the device.
</para>
<para>
Enabling the digitalbitbox module is pretty much the easiest way to
get a Digital Bitbox device working on your system.
</para>
<para>
For more information, see
<link xlink:href="https://digitalbitbox.com/start_linux">https://digitalbitbox.com/start_linux</link>.
</para>
<section xml:id="sec-digitalbitbox-package">
<title>Package</title>
<para>
The binaries, <literal>dbb-app</literal> (a GUI tool) and
<literal>dbb-cli</literal> (a CLI tool), are available through the
<literal>digitalbitbox</literal> package which could be installed
as follows:
</para>
<programlisting>
environment.systemPackages = [
pkgs.digitalbitbox
];
</programlisting>
</section>
<section xml:id="sec-digitalbitbox-hardware-module">
<title>Hardware</title>
<para>
The digitalbitbox hardware package enables the udev rules for
Digital Bitbox devices and may be installed as follows:
</para>
<programlisting>
hardware.digitalbitbox.enable = true;
</programlisting>
<para>
In order to alter the udev rules, one may provide different values
for the <literal>udevRule51</literal> and
<literal>udevRule52</literal> attributes by means of overriding as
follows:
</para>
<programlisting>
programs.digitalbitbox = {
enable = true;
package = pkgs.digitalbitbox.override {
udevRule51 = &quot;something else&quot;;
};
};
</programlisting>
</section>
</chapter>

View file

@ -1,74 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-programs-digitalbitbox">
<title>Digital Bitbox</title>
<para>
Digital Bitbox is a hardware wallet and second-factor authenticator.
</para>
<para>
The <literal>digitalbitbox</literal> programs module may be installed by
setting <literal>programs.digitalbitbox</literal> to <literal>true</literal>
in a manner similar to
<programlisting>
<xref linkend="opt-programs.digitalbitbox.enable"/> = true;
</programlisting>
and bundles the <literal>digitalbitbox</literal> package (see
<xref
linkend="sec-digitalbitbox-package" />), which contains the
<literal>dbb-app</literal> and <literal>dbb-cli</literal> binaries, along
with the hardware module (see
<xref
linkend="sec-digitalbitbox-hardware-module" />) which sets up the
necessary udev rules to access the device.
</para>
<para>
Enabling the digitalbitbox module is pretty much the easiest way to get a
Digital Bitbox device working on your system.
</para>
<para>
For more information, see
<link xlink:href="https://digitalbitbox.com/start_linux" />.
</para>
<section xml:id="sec-digitalbitbox-package">
<title>Package</title>
<para>
The binaries, <literal>dbb-app</literal> (a GUI tool) and
<literal>dbb-cli</literal> (a CLI tool), are available through the
<literal>digitalbitbox</literal> package which could be installed as
follows:
<programlisting>
<xref linkend="opt-environment.systemPackages"/> = [
pkgs.digitalbitbox
];
</programlisting>
</para>
</section>
<section xml:id="sec-digitalbitbox-hardware-module">
<title>Hardware</title>
<para>
The digitalbitbox hardware package enables the udev rules for Digital Bitbox
devices and may be installed as follows:
<programlisting>
<xref linkend="opt-hardware.digitalbitbox.enable"/> = true;
</programlisting>
</para>
<para>
In order to alter the udev rules, one may provide different values for the
<literal>udevRule51</literal> and <literal>udevRule52</literal> attributes
by means of overriding as follows:
<programlisting>
programs.digitalbitbox = {
<link linkend="opt-programs.digitalbitbox.enable">enable</link> = true;
<link linkend="opt-programs.digitalbitbox.package">package</link> = pkgs.digitalbitbox.override {
udevRule51 = "something else";
};
};
</programlisting>
</para>
</section>
</chapter>

View file

@ -7,7 +7,7 @@ let
runtime' = filter (f: f.enable) (attrValues cfg.runtime); runtime' = filter (f: f.enable) (attrValues cfg.runtime);
runtime = pkgs.linkFarm "neovim-runtime" (map (x: { name = x.target; path = x.source; }) runtime'); runtime = pkgs.linkFarm "neovim-runtime" (map (x: { name = "etc/${x.target}"; path = x.source; }) runtime');
in { in {
options.programs.neovim = { options.programs.neovim = {

View file

@ -0,0 +1,17 @@
# Plotinus {#module-program-plotinus}
*Source:* {file}`modules/programs/plotinus.nix`
*Upstream documentation:* <https://github.com/p-e-w/plotinus>
Plotinus is a searchable command palette in every modern GTK application.
When in a GTK 3 application and Plotinus is enabled, you can press
`Ctrl+Shift+P` to open the command palette. The command
palette provides a searchable list of of all menu items in the application.
To enable Plotinus, add the following to your
{file}`configuration.nix`:
```
programs.plotinus.enable = true;
```

View file

@ -1,30 +1,30 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-program-plotinus">
version="5.0" <title>Plotinus</title>
xml:id="module-program-plotinus"> <para>
<title>Plotinus</title> <emphasis>Source:</emphasis>
<para> <filename>modules/programs/plotinus.nix</filename>
<emphasis>Source:</emphasis> </para>
<filename>modules/programs/plotinus.nix</filename> <para>
</para> <emphasis>Upstream documentation:</emphasis>
<para> <link xlink:href="https://github.com/p-e-w/plotinus">https://github.com/p-e-w/plotinus</link>
<emphasis>Upstream documentation:</emphasis> </para>
<link xlink:href="https://github.com/p-e-w/plotinus"/> <para>
</para> Plotinus is a searchable command palette in every modern GTK
<para> application.
Plotinus is a searchable command palette in every modern GTK application. </para>
</para> <para>
<para> When in a GTK 3 application and Plotinus is enabled, you can press
When in a GTK 3 application and Plotinus is enabled, you can press <literal>Ctrl+Shift+P</literal> to open the command palette. The
<literal>Ctrl+Shift+P</literal> to open the command palette. The command command palette provides a searchable list of of all menu items in
palette provides a searchable list of of all menu items in the application. the application.
</para> </para>
<para> <para>
To enable Plotinus, add the following to your To enable Plotinus, add the following to your
<filename>configuration.nix</filename>: <filename>configuration.nix</filename>:
<programlisting> </para>
<xref linkend="opt-programs.plotinus.enable"/> = true; <programlisting>
programs.plotinus.enable = true;
</programlisting> </programlisting>
</para>
</chapter> </chapter>

View file

@ -0,0 +1,109 @@
# Oh my ZSH {#module-programs-zsh-ohmyzsh}
[`oh-my-zsh`](https://ohmyz.sh/) is a framework to manage your [ZSH](https://www.zsh.org/)
configuration including completion scripts for several CLI tools or custom
prompt themes.
## Basic usage {#module-programs-oh-my-zsh-usage}
The module uses the `oh-my-zsh` package with all available
features. The initial setup using Nix expressions is fairly similar to the
configuration format of `oh-my-zsh`.
```
{
programs.zsh.ohMyZsh = {
enable = true;
plugins = [ "git" "python" "man" ];
theme = "agnoster";
};
}
```
For a detailed explanation of these arguments please refer to the
[`oh-my-zsh` docs](https://github.com/robbyrussell/oh-my-zsh/wiki).
The expression generates the needed configuration and writes it into your
`/etc/zshrc`.
## Custom additions {#module-programs-oh-my-zsh-additions}
Sometimes third-party or custom scripts such as a modified theme may be
needed. `oh-my-zsh` provides the
[`ZSH_CUSTOM`](https://github.com/robbyrussell/oh-my-zsh/wiki/Customization#overriding-internals)
environment variable for this which points to a directory with additional
scripts.
The module can do this as well:
```
{
programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts";
}
```
## Custom environments {#module-programs-oh-my-zsh-environments}
There are several extensions for `oh-my-zsh` packaged in
`nixpkgs`. One of them is
[nix-zsh-completions](https://github.com/spwhitt/nix-zsh-completions)
which bundles completion scripts and a plugin for `oh-my-zsh`.
Rather than using a single mutable path for `ZSH_CUSTOM`,
it's also possible to generate this path from a list of Nix packages:
```
{ pkgs, ... }:
{
programs.zsh.ohMyZsh.customPkgs = [
pkgs.nix-zsh-completions
# and even more...
];
}
```
Internally a single store path will be created using
`buildEnv`. Please refer to the docs of
[`buildEnv`](https://nixos.org/nixpkgs/manual/#sec-building-environment)
for further reference.
*Please keep in mind that this is not compatible with
`programs.zsh.ohMyZsh.custom` as it requires an immutable
store path while `custom` shall remain mutable! An
evaluation failure will be thrown if both `custom` and
`customPkgs` are set.*
## Package your own customizations {#module-programs-oh-my-zsh-packaging-customizations}
If third-party customizations (e.g. new themes) are supposed to be added to
`oh-my-zsh` there are several pitfalls to keep in mind:
- To comply with the default structure of `ZSH` the entire
output needs to be written to `$out/share/zsh.`
- Completion scripts are supposed to be stored at
`$out/share/zsh/site-functions`. This directory is part of the
[`fpath`](http://zsh.sourceforge.net/Doc/Release/Functions.html)
and the package should be compatible with pure `ZSH`
setups. The module will automatically link the contents of
`site-functions` to completions directory in the proper
store path.
- The `plugins` directory needs the structure
`pluginname/pluginname.plugin.zsh` as structured in the
[upstream repo.](https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins)
A derivation for `oh-my-zsh` may look like this:
```
{ stdenv, fetchFromGitHub }:
stdenv.mkDerivation rec {
name = "exemplary-zsh-customization-${version}";
version = "1.0.0";
src = fetchFromGitHub {
# path to the upstream repository
};
dontBuild = true;
installPhase = ''
mkdir -p $out/share/zsh/site-functions
cp {themes,plugins} $out/share/zsh
cp completions $out/share/zsh/site-functions
'';
}
```

View file

@ -1,76 +1,74 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-programs-zsh-ohmyzsh">
version="5.0" <title>Oh my ZSH</title>
xml:id="module-programs-zsh-ohmyzsh">
<title>Oh my ZSH</title>
<para>
<literal><link xlink:href="https://ohmyz.sh/">oh-my-zsh</link></literal> is a
framework to manage your <link xlink:href="https://www.zsh.org/">ZSH</link>
configuration including completion scripts for several CLI tools or custom
prompt themes.
</para>
<section xml:id="module-programs-oh-my-zsh-usage">
<title>Basic usage</title>
<para> <para>
The module uses the <literal>oh-my-zsh</literal> package with all available <link xlink:href="https://ohmyz.sh/"><literal>oh-my-zsh</literal></link>
features. The initial setup using Nix expressions is fairly similar to the is a framework to manage your
configuration format of <literal>oh-my-zsh</literal>. <link xlink:href="https://www.zsh.org/">ZSH</link> configuration
<programlisting> including completion scripts for several CLI tools or custom prompt
themes.
</para>
<section xml:id="module-programs-oh-my-zsh-usage">
<title>Basic usage</title>
<para>
The module uses the <literal>oh-my-zsh</literal> package with all
available features. The initial setup using Nix expressions is
fairly similar to the configuration format of
<literal>oh-my-zsh</literal>.
</para>
<programlisting>
{ {
programs.zsh.ohMyZsh = { programs.zsh.ohMyZsh = {
enable = true; enable = true;
plugins = [ "git" "python" "man" ]; plugins = [ &quot;git&quot; &quot;python&quot; &quot;man&quot; ];
theme = "agnoster"; theme = &quot;agnoster&quot;;
}; };
} }
</programlisting> </programlisting>
For a detailed explanation of these arguments please refer to the <para>
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki"><literal>oh-my-zsh</literal> For a detailed explanation of these arguments please refer to the
docs</link>. <link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki"><literal>oh-my-zsh</literal>
</para> docs</link>.
</para>
<para> <para>
The expression generates the needed configuration and writes it into your The expression generates the needed configuration and writes it
<literal>/etc/zshrc</literal>. into your <literal>/etc/zshrc</literal>.
</para> </para>
</section> </section>
<section xml:id="module-programs-oh-my-zsh-additions"> <section xml:id="module-programs-oh-my-zsh-additions">
<title>Custom additions</title> <title>Custom additions</title>
<para>
<para> Sometimes third-party or custom scripts such as a modified theme
Sometimes third-party or custom scripts such as a modified theme may be may be needed. <literal>oh-my-zsh</literal> provides the
needed. <literal>oh-my-zsh</literal> provides the <link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki/Customization#overriding-internals"><literal>ZSH_CUSTOM</literal></link>
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/wiki/Customization#overriding-internals"><literal>ZSH_CUSTOM</literal></link> environment variable for this which points to a directory with
environment variable for this which points to a directory with additional additional scripts.
scripts. </para>
</para> <para>
The module can do this as well:
<para> </para>
The module can do this as well: <programlisting>
<programlisting>
{ {
programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts"; programs.zsh.ohMyZsh.custom = &quot;~/path/to/custom/scripts&quot;;
} }
</programlisting> </programlisting>
</para> </section>
</section> <section xml:id="module-programs-oh-my-zsh-environments">
<section xml:id="module-programs-oh-my-zsh-environments"> <title>Custom environments</title>
<title>Custom environments</title> <para>
There are several extensions for <literal>oh-my-zsh</literal>
<para> packaged in <literal>nixpkgs</literal>. One of them is
There are several extensions for <literal>oh-my-zsh</literal> packaged in <link xlink:href="https://github.com/spwhitt/nix-zsh-completions">nix-zsh-completions</link>
<literal>nixpkgs</literal>. One of them is which bundles completion scripts and a plugin for
<link xlink:href="https://github.com/spwhitt/nix-zsh-completions">nix-zsh-completions</link> <literal>oh-my-zsh</literal>.
which bundles completion scripts and a plugin for </para>
<literal>oh-my-zsh</literal>. <para>
</para> Rather than using a single mutable path for
<literal>ZSH_CUSTOM</literal>, its also possible to generate this
<para> path from a list of Nix packages:
Rather than using a single mutable path for <literal>ZSH_CUSTOM</literal>, </para>
it's also possible to generate this path from a list of Nix packages: <programlisting>
<programlisting>
{ pkgs, ... }: { pkgs, ... }:
{ {
programs.zsh.ohMyZsh.customPkgs = [ programs.zsh.ohMyZsh.customPkgs = [
@ -79,65 +77,67 @@
]; ];
} }
</programlisting> </programlisting>
Internally a single store path will be created using
<literal>buildEnv</literal>. Please refer to the docs of
<link xlink:href="https://nixos.org/nixpkgs/manual/#sec-building-environment"><literal>buildEnv</literal></link>
for further reference.
</para>
<para>
<emphasis>Please keep in mind that this is not compatible with
<literal>programs.zsh.ohMyZsh.custom</literal> as it requires an immutable
store path while <literal>custom</literal> shall remain mutable! An
evaluation failure will be thrown if both <literal>custom</literal> and
<literal>customPkgs</literal> are set.</emphasis>
</para>
</section>
<section xml:id="module-programs-oh-my-zsh-packaging-customizations">
<title>Package your own customizations</title>
<para>
If third-party customizations (e.g. new themes) are supposed to be added to
<literal>oh-my-zsh</literal> there are several pitfalls to keep in mind:
</para>
<itemizedlist>
<listitem>
<para> <para>
To comply with the default structure of <literal>ZSH</literal> the entire Internally a single store path will be created using
output needs to be written to <literal>$out/share/zsh.</literal> <literal>buildEnv</literal>. Please refer to the docs of
<link xlink:href="https://nixos.org/nixpkgs/manual/#sec-building-environment"><literal>buildEnv</literal></link>
for further reference.
</para> </para>
</listitem>
<listitem>
<para> <para>
Completion scripts are supposed to be stored at <emphasis>Please keep in mind that this is not compatible with
<literal>$out/share/zsh/site-functions</literal>. This directory is part <literal>programs.zsh.ohMyZsh.custom</literal> as it requires an
of the immutable store path while <literal>custom</literal> shall remain
<literal><link xlink:href="http://zsh.sourceforge.net/Doc/Release/Functions.html">fpath</link></literal> mutable! An evaluation failure will be thrown if both
and the package should be compatible with pure <literal>ZSH</literal> <literal>custom</literal> and <literal>customPkgs</literal> are
setups. The module will automatically link the contents of set.</emphasis>
<literal>site-functions</literal> to completions directory in the proper
store path.
</para> </para>
</listitem> </section>
<listitem> <section xml:id="module-programs-oh-my-zsh-packaging-customizations">
<title>Package your own customizations</title>
<para> <para>
The <literal>plugins</literal> directory needs the structure If third-party customizations (e.g. new themes) are supposed to be
<literal>pluginname/pluginname.plugin.zsh</literal> as structured in the added to <literal>oh-my-zsh</literal> there are several pitfalls
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins">upstream to keep in mind:
repo.</link>
</para> </para>
</listitem> <itemizedlist>
</itemizedlist> <listitem>
<para>
<para> To comply with the default structure of <literal>ZSH</literal>
A derivation for <literal>oh-my-zsh</literal> may look like this: the entire output needs to be written to
<programlisting> <literal>$out/share/zsh.</literal>
</para>
</listitem>
<listitem>
<para>
Completion scripts are supposed to be stored at
<literal>$out/share/zsh/site-functions</literal>. This
directory is part of the
<link xlink:href="http://zsh.sourceforge.net/Doc/Release/Functions.html"><literal>fpath</literal></link>
and the package should be compatible with pure
<literal>ZSH</literal> setups. The module will automatically
link the contents of <literal>site-functions</literal> to
completions directory in the proper store path.
</para>
</listitem>
<listitem>
<para>
The <literal>plugins</literal> directory needs the structure
<literal>pluginname/pluginname.plugin.zsh</literal> as
structured in the
<link xlink:href="https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins">upstream
repo.</link>
</para>
</listitem>
</itemizedlist>
<para>
A derivation for <literal>oh-my-zsh</literal> may look like this:
</para>
<programlisting>
{ stdenv, fetchFromGitHub }: { stdenv, fetchFromGitHub }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
name = "exemplary-zsh-customization-${version}"; name = &quot;exemplary-zsh-customization-${version}&quot;;
version = "1.0.0"; version = &quot;1.0.0&quot;;
src = fetchFromGitHub { src = fetchFromGitHub {
# path to the upstream repository # path to the upstream repository
}; };
@ -150,6 +150,5 @@ stdenv.mkDerivation rec {
''; '';
} }
</programlisting> </programlisting>
</para> </section>
</section>
</chapter> </chapter>

View file

@ -26,6 +26,7 @@ in
"brackets" "brackets"
"pattern" "pattern"
"cursor" "cursor"
"regexp"
"root" "root"
"line" "line"
])); ]));

View file

@ -0,0 +1,354 @@
# SSL/TLS Certificates with ACME {#module-security-acme}
NixOS supports automatic domain validation & certificate retrieval and
renewal using the ACME protocol. Any provider can be used, but by default
NixOS uses Let's Encrypt. The alternative ACME client
[lego](https://go-acme.github.io/lego/) is used under
the hood.
Automatic cert validation and configuration for Apache and Nginx virtual
hosts is included in NixOS, however if you would like to generate a wildcard
cert or you are not using a web server you will have to configure DNS
based validation.
## Prerequisites {#module-security-acme-prerequisites}
To use the ACME module, you must accept the provider's terms of service
by setting [](#opt-security.acme.acceptTerms)
to `true`. The Let's Encrypt ToS can be found
[here](https://letsencrypt.org/repository/).
You must also set an email address to be used when creating accounts with
Let's Encrypt. You can set this for all certs with
[](#opt-security.acme.defaults.email)
and/or on a per-cert basis with
[](#opt-security.acme.certs._name_.email).
This address is only used for registration and renewal reminders,
and cannot be used to administer the certificates in any way.
Alternatively, you can use a different ACME server by changing the
[](#opt-security.acme.defaults.server) option
to a provider of your choosing, or just change the server for one cert with
[](#opt-security.acme.certs._name_.server).
You will need an HTTP server or DNS server for verification. For HTTP,
the server must have a webroot defined that can serve
{file}`.well-known/acme-challenge`. This directory must be
writeable by the user that will run the ACME client. For DNS, you must
set up credentials with your provider/server for use with lego.
## Using ACME certificates in Nginx {#module-security-acme-nginx}
NixOS supports fetching ACME certificates for you by setting
`enableACME = true;` in a virtualHost config. We first create self-signed
placeholder certificates in place of the real ACME certs. The placeholder
certs are overwritten when the ACME certs arrive. For
`foo.example.com` the config would look like this:
```
security.acme.acceptTerms = true;
security.acme.defaults.email = "admin+acme@example.com";
services.nginx = {
enable = true;
virtualHosts = {
"foo.example.com" = {
forceSSL = true;
enableACME = true;
# All serverAliases will be added as extra domain names on the certificate.
serverAliases = [ "bar.example.com" ];
locations."/" = {
root = "/var/www";
};
};
# We can also add a different vhost and reuse the same certificate
# but we have to append extraDomainNames manually beforehand:
# security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
"baz.example.com" = {
forceSSL = true;
useACMEHost = "foo.example.com";
locations."/" = {
root = "/var/www";
};
};
};
}
```
## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
Using ACME certificates with Apache virtual hosts is identical
to using them with Nginx. The attribute names are all the same, just replace
"nginx" with "httpd" where appropriate.
## Manual configuration of HTTP-01 validation {#module-security-acme-configuring}
First off you will need to set up a virtual host to serve the challenges.
This example uses a vhost called `certs.example.com`, with
the intent that you will generate certs for all your vhosts and redirect
everyone to HTTPS.
```
security.acme.acceptTerms = true;
security.acme.defaults.email = "admin+acme@example.com";
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
users.users.nginx.extraGroups = [ "acme" ];
services.nginx = {
enable = true;
virtualHosts = {
"acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ "*.example.com" ];
locations."/.well-known/acme-challenge" = {
root = "/var/lib/acme/.challenges";
};
locations."/" = {
return = "301 https://$host$request_uri";
};
};
};
}
# Alternative config for Apache
users.users.wwwrun.extraGroups = [ "acme" ];
services.httpd = {
enable = true;
virtualHosts = {
"acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ "*.example.com" ];
# /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
# By default, this is the case.
documentRoot = "/var/lib/acme/.challenges";
extraConfig = ''
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
'';
};
};
}
```
Now you need to configure ACME to generate a certificate.
```
security.acme.certs."foo.example.com" = {
webroot = "/var/lib/acme/.challenges";
email = "foo@example.com";
# Ensure that the web server you use can read the generated certs
# Take a look at the group option for the web server you choose.
group = "nginx";
# Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything!
# Just make sure your DNS resolves them.
extraDomainNames = [ "mail.example.com" ];
};
```
The private key {file}`key.pem` and certificate
{file}`fullchain.pem` will be put into
{file}`/var/lib/acme/foo.example.com`.
Refer to [](#ch-options) for all available configuration
options for the [security.acme](#opt-security.acme.certs)
module.
## Configuring ACME for DNS validation {#module-security-acme-config-dns}
This is useful if you want to generate a wildcard certificate, since
ACME servers will only hand out wildcard certs over DNS validation.
There are a number of supported DNS providers and servers you can utilise,
see the [lego docs](https://go-acme.github.io/lego/dns/)
for provider/server specific configuration values. For the sake of these
docs, we will provide a fully self-hosted example using bind.
```
services.bind = {
enable = true;
extraConfig = ''
include "/var/lib/secrets/dnskeys.conf";
'';
zones = [
rec {
name = "example.com";
file = "/var/db/bind/${name}";
master = true;
extraConfig = "allow-update { key rfc2136key.example.com.; };";
}
];
}
# Now we can configure ACME
security.acme.acceptTerms = true;
security.acme.defaults.email = "admin+acme@example.com";
security.acme.certs."example.com" = {
domain = "*.example.com";
dnsProvider = "rfc2136";
credentialsFile = "/var/lib/secrets/certs.secret";
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
```
The {file}`dnskeys.conf` and {file}`certs.secret`
must be kept secure and thus you should not keep their contents in your
Nix config. Instead, generate them one time with a systemd service:
```
systemd.services.dns-rfc2136-conf = {
requiredBy = ["acme-example.com.service" "bind.service"];
before = ["acme-example.com.service" "bind.service"];
unitConfig = {
ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
};
serviceConfig = {
Type = "oneshot";
UMask = 0077;
};
path = [ pkgs.bind ];
script = ''
mkdir -p /var/lib/secrets
chmod 755 /var/lib/secrets
tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
chown named:root /var/lib/secrets/dnskeys.conf
chmod 400 /var/lib/secrets/dnskeys.conf
# extract secret value from the dnskeys.conf
while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
cat > /var/lib/secrets/certs.secret << EOF
RFC2136_NAMESERVER='127.0.0.1:53'
RFC2136_TSIG_ALGORITHM='hmac-sha256.'
RFC2136_TSIG_KEY='rfc2136key.example.com'
RFC2136_TSIG_SECRET='$secret'
EOF
chmod 400 /var/lib/secrets/certs.secret
'';
};
```
Now you're all set to generate certs! You should monitor the first invocation
by running `systemctl start acme-example.com.service &
journalctl -fu acme-example.com.service` and watching its log output.
## Using DNS validation with web server virtual hosts {#module-security-acme-config-dns-with-vhosts}
It is possible to use DNS-01 validation with all certificates,
including those automatically configured via the Nginx/Apache
[`enableACME`](#opt-services.nginx.virtualHosts._name_.enableACME)
option. This configuration pattern is fully
supported and part of the module's test suite for Nginx + Apache.
You must follow the guide above on configuring DNS-01 validation
first, however instead of setting the options for one certificate
(e.g. [](#opt-security.acme.certs._name_.dnsProvider))
you will set them as defaults
(e.g. [](#opt-security.acme.defaults.dnsProvider)).
```
# Configure ACME appropriately
security.acme.acceptTerms = true;
security.acme.defaults.email = "admin+acme@example.com";
security.acme.defaults = {
dnsProvider = "rfc2136";
credentialsFile = "/var/lib/secrets/certs.secret";
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
# For each virtual host you would like to use DNS-01 validation with,
# set acmeRoot = null
services.nginx = {
enable = true;
virtualHosts = {
"foo.example.com" = {
enableACME = true;
acmeRoot = null;
};
};
}
```
And that's it! Next time your configuration is rebuilt, or when
you add a new virtualHost, it will be DNS-01 validated.
## Using ACME with services demanding root owned certificates {#module-security-acme-root-owned}
Some services refuse to start if the configured certificate files
are not owned by root. PostgreSQL and OpenSMTPD are examples of these.
There is no way to change the user the ACME module uses (it will always be
`acme`), however you can use systemd's
`LoadCredential` feature to resolve this elegantly.
Below is an example configuration for OpenSMTPD, but this pattern
can be applied to any service.
```
# Configure ACME however you like (DNS or HTTP validation), adding
# the following configuration for the relevant certificate.
# Note: You cannot use `systemctl reload` here as that would mean
# the LoadCredential configuration below would be skipped and
# the service would continue to use old certificates.
security.acme.certs."mail.example.com".postRun = ''
systemctl restart opensmtpd
'';
# Now you must augment OpenSMTPD's systemd service to load
# the certificate files.
systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
systemd.services.opensmtpd.serviceConfig.LoadCredential = let
certDir = config.security.acme.certs."mail.example.com".directory;
in [
"cert.pem:${certDir}/cert.pem"
"key.pem:${certDir}/key.pem"
];
# Finally, configure OpenSMTPD to use these certs.
services.opensmtpd = let
credsDir = "/run/credentials/opensmtpd.service";
in {
enable = true;
setSendmail = false;
serverConfiguration = ''
pki mail.example.com cert "${credsDir}/cert.pem"
pki mail.example.com key "${credsDir}/key.pem"
listen on localhost tls pki mail.example.com
action act1 relay host smtp://127.0.0.1:10027
match for local action act1
'';
};
```
## Regenerating certificates {#module-security-acme-regenerate}
Should you need to regenerate a particular certificate in a hurry, such
as when a vulnerability is found in Let's Encrypt, there is now a convenient
mechanism for doing so. Running
`systemctl clean --what=state acme-example.com.service`
will remove all certificate files and the account data for the given domain,
allowing you to then `systemctl start acme-example.com.service`
to generate fresh ones.
## Fixing JWS Verification error {#module-security-acme-fix-jws}
It is possible that your account credentials file may become corrupt and need
to be regenerated. In this scenario lego will produce the error `JWS verification error`.
The solution is to simply delete the associated accounts file and
re-run the affected service(s).
```
# Find the accounts folder for the certificate
systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
export accountdir="$(!!)"
# Move this folder to some place else
mv /var/lib/acme/.lego/$accountdir{,.bak}
# Recreate the folder using systemd-tmpfiles
systemd-tmpfiles --create
# Get a new account and reissue certificates
# Note: Do this for all certs that share the same account email address
systemctl start acme-example.com.service
```

View file

@ -916,6 +916,6 @@ in {
meta = { meta = {
maintainers = lib.teams.acme.members; maintainers = lib.teams.acme.members;
doc = ./doc.xml; doc = ./default.xml;
}; };
} }

View file

@ -0,0 +1,395 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-security-acme">
<title>SSL/TLS Certificates with ACME</title>
<para>
NixOS supports automatic domain validation &amp; certificate
retrieval and renewal using the ACME protocol. Any provider can be
used, but by default NixOS uses Lets Encrypt. The alternative ACME
client
<link xlink:href="https://go-acme.github.io/lego/">lego</link> is
used under the hood.
</para>
<para>
Automatic cert validation and configuration for Apache and Nginx
virtual hosts is included in NixOS, however if you would like to
generate a wildcard cert or you are not using a web server you will
have to configure DNS based validation.
</para>
<section xml:id="module-security-acme-prerequisites">
<title>Prerequisites</title>
<para>
To use the ACME module, you must accept the providers terms of
service by setting
<xref linkend="opt-security.acme.acceptTerms" /> to
<literal>true</literal>. The Lets Encrypt ToS can be found
<link xlink:href="https://letsencrypt.org/repository/">here</link>.
</para>
<para>
You must also set an email address to be used when creating
accounts with Lets Encrypt. You can set this for all certs with
<xref linkend="opt-security.acme.defaults.email" /> and/or on a
per-cert basis with
<xref linkend="opt-security.acme.certs._name_.email" />. This
address is only used for registration and renewal reminders, and
cannot be used to administer the certificates in any way.
</para>
<para>
Alternatively, you can use a different ACME server by changing the
<xref linkend="opt-security.acme.defaults.server" /> option to a
provider of your choosing, or just change the server for one cert
with <xref linkend="opt-security.acme.certs._name_.server" />.
</para>
<para>
You will need an HTTP server or DNS server for verification. For
HTTP, the server must have a webroot defined that can serve
<filename>.well-known/acme-challenge</filename>. This directory
must be writeable by the user that will run the ACME client. For
DNS, you must set up credentials with your provider/server for use
with lego.
</para>
</section>
<section xml:id="module-security-acme-nginx">
<title>Using ACME certificates in Nginx</title>
<para>
NixOS supports fetching ACME certificates for you by setting
<literal>enableACME = true;</literal> in a virtualHost config. We
first create self-signed placeholder certificates in place of the
real ACME certs. The placeholder certs are overwritten when the
ACME certs arrive. For <literal>foo.example.com</literal> the
config would look like this:
</para>
<programlisting>
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
services.nginx = {
enable = true;
virtualHosts = {
&quot;foo.example.com&quot; = {
forceSSL = true;
enableACME = true;
# All serverAliases will be added as extra domain names on the certificate.
serverAliases = [ &quot;bar.example.com&quot; ];
locations.&quot;/&quot; = {
root = &quot;/var/www&quot;;
};
};
# We can also add a different vhost and reuse the same certificate
# but we have to append extraDomainNames manually beforehand:
# security.acme.certs.&quot;foo.example.com&quot;.extraDomainNames = [ &quot;baz.example.com&quot; ];
&quot;baz.example.com&quot; = {
forceSSL = true;
useACMEHost = &quot;foo.example.com&quot;;
locations.&quot;/&quot; = {
root = &quot;/var/www&quot;;
};
};
};
}
</programlisting>
</section>
<section xml:id="module-security-acme-httpd">
<title>Using ACME certificates in Apache/httpd</title>
<para>
Using ACME certificates with Apache virtual hosts is identical to
using them with Nginx. The attribute names are all the same, just
replace <quote>nginx</quote> with <quote>httpd</quote> where
appropriate.
</para>
</section>
<section xml:id="module-security-acme-configuring">
<title>Manual configuration of HTTP-01 validation</title>
<para>
First off you will need to set up a virtual host to serve the
challenges. This example uses a vhost called
<literal>certs.example.com</literal>, with the intent that you
will generate certs for all your vhosts and redirect everyone to
HTTPS.
</para>
<programlisting>
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
users.users.nginx.extraGroups = [ &quot;acme&quot; ];
services.nginx = {
enable = true;
virtualHosts = {
&quot;acmechallenge.example.com&quot; = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ &quot;*.example.com&quot; ];
locations.&quot;/.well-known/acme-challenge&quot; = {
root = &quot;/var/lib/acme/.challenges&quot;;
};
locations.&quot;/&quot; = {
return = &quot;301 https://$host$request_uri&quot;;
};
};
};
}
# Alternative config for Apache
users.users.wwwrun.extraGroups = [ &quot;acme&quot; ];
services.httpd = {
enable = true;
virtualHosts = {
&quot;acmechallenge.example.com&quot; = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
serverAliases = [ &quot;*.example.com&quot; ];
# /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
# By default, this is the case.
documentRoot = &quot;/var/lib/acme/.challenges&quot;;
extraConfig = ''
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
'';
};
};
}
</programlisting>
<para>
Now you need to configure ACME to generate a certificate.
</para>
<programlisting>
security.acme.certs.&quot;foo.example.com&quot; = {
webroot = &quot;/var/lib/acme/.challenges&quot;;
email = &quot;foo@example.com&quot;;
# Ensure that the web server you use can read the generated certs
# Take a look at the group option for the web server you choose.
group = &quot;nginx&quot;;
# Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything!
# Just make sure your DNS resolves them.
extraDomainNames = [ &quot;mail.example.com&quot; ];
};
</programlisting>
<para>
The private key <filename>key.pem</filename> and certificate
<filename>fullchain.pem</filename> will be put into
<filename>/var/lib/acme/foo.example.com</filename>.
</para>
<para>
Refer to <xref linkend="ch-options" /> for all available
configuration options for the
<link linkend="opt-security.acme.certs">security.acme</link>
module.
</para>
</section>
<section xml:id="module-security-acme-config-dns">
<title>Configuring ACME for DNS validation</title>
<para>
This is useful if you want to generate a wildcard certificate,
since ACME servers will only hand out wildcard certs over DNS
validation. There are a number of supported DNS providers and
servers you can utilise, see the
<link xlink:href="https://go-acme.github.io/lego/dns/">lego
docs</link> for provider/server specific configuration values. For
the sake of these docs, we will provide a fully self-hosted
example using bind.
</para>
<programlisting>
services.bind = {
enable = true;
extraConfig = ''
include &quot;/var/lib/secrets/dnskeys.conf&quot;;
'';
zones = [
rec {
name = &quot;example.com&quot;;
file = &quot;/var/db/bind/${name}&quot;;
master = true;
extraConfig = &quot;allow-update { key rfc2136key.example.com.; };&quot;;
}
];
}
# Now we can configure ACME
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
security.acme.certs.&quot;example.com&quot; = {
domain = &quot;*.example.com&quot;;
dnsProvider = &quot;rfc2136&quot;;
credentialsFile = &quot;/var/lib/secrets/certs.secret&quot;;
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
</programlisting>
<para>
The <filename>dnskeys.conf</filename> and
<filename>certs.secret</filename> must be kept secure and thus you
should not keep their contents in your Nix config. Instead,
generate them one time with a systemd service:
</para>
<programlisting>
systemd.services.dns-rfc2136-conf = {
requiredBy = [&quot;acme-example.com.service&quot; &quot;bind.service&quot;];
before = [&quot;acme-example.com.service&quot; &quot;bind.service&quot;];
unitConfig = {
ConditionPathExists = &quot;!/var/lib/secrets/dnskeys.conf&quot;;
};
serviceConfig = {
Type = &quot;oneshot&quot;;
UMask = 0077;
};
path = [ pkgs.bind ];
script = ''
mkdir -p /var/lib/secrets
chmod 755 /var/lib/secrets
tsig-keygen rfc2136key.example.com &gt; /var/lib/secrets/dnskeys.conf
chown named:root /var/lib/secrets/dnskeys.conf
chmod 400 /var/lib/secrets/dnskeys.conf
# extract secret value from the dnskeys.conf
while read x y; do if [ &quot;$x&quot; = &quot;secret&quot; ]; then secret=&quot;''${y:1:''${#y}-3}&quot;; fi; done &lt; /var/lib/secrets/dnskeys.conf
cat &gt; /var/lib/secrets/certs.secret &lt;&lt; EOF
RFC2136_NAMESERVER='127.0.0.1:53'
RFC2136_TSIG_ALGORITHM='hmac-sha256.'
RFC2136_TSIG_KEY='rfc2136key.example.com'
RFC2136_TSIG_SECRET='$secret'
EOF
chmod 400 /var/lib/secrets/certs.secret
'';
};
</programlisting>
<para>
Now youre all set to generate certs! You should monitor the first
invocation by running
<literal>systemctl start acme-example.com.service &amp; journalctl -fu acme-example.com.service</literal>
and watching its log output.
</para>
</section>
<section xml:id="module-security-acme-config-dns-with-vhosts">
<title>Using DNS validation with web server virtual hosts</title>
<para>
It is possible to use DNS-01 validation with all certificates,
including those automatically configured via the Nginx/Apache
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME"><literal>enableACME</literal></link>
option. This configuration pattern is fully supported and part of
the modules test suite for Nginx + Apache.
</para>
<para>
You must follow the guide above on configuring DNS-01 validation
first, however instead of setting the options for one certificate
(e.g.
<xref linkend="opt-security.acme.certs._name_.dnsProvider" />) you
will set them as defaults (e.g.
<xref linkend="opt-security.acme.defaults.dnsProvider" />).
</para>
<programlisting>
# Configure ACME appropriately
security.acme.acceptTerms = true;
security.acme.defaults.email = &quot;admin+acme@example.com&quot;;
security.acme.defaults = {
dnsProvider = &quot;rfc2136&quot;;
credentialsFile = &quot;/var/lib/secrets/certs.secret&quot;;
# We don't need to wait for propagation since this is a local DNS server
dnsPropagationCheck = false;
};
# For each virtual host you would like to use DNS-01 validation with,
# set acmeRoot = null
services.nginx = {
enable = true;
virtualHosts = {
&quot;foo.example.com&quot; = {
enableACME = true;
acmeRoot = null;
};
};
}
</programlisting>
<para>
And thats it! Next time your configuration is rebuilt, or when
you add a new virtualHost, it will be DNS-01 validated.
</para>
</section>
<section xml:id="module-security-acme-root-owned">
<title>Using ACME with services demanding root owned
certificates</title>
<para>
Some services refuse to start if the configured certificate files
are not owned by root. PostgreSQL and OpenSMTPD are examples of
these. There is no way to change the user the ACME module uses (it
will always be <literal>acme</literal>), however you can use
systemds <literal>LoadCredential</literal> feature to resolve
this elegantly. Below is an example configuration for OpenSMTPD,
but this pattern can be applied to any service.
</para>
<programlisting>
# Configure ACME however you like (DNS or HTTP validation), adding
# the following configuration for the relevant certificate.
# Note: You cannot use `systemctl reload` here as that would mean
# the LoadCredential configuration below would be skipped and
# the service would continue to use old certificates.
security.acme.certs.&quot;mail.example.com&quot;.postRun = ''
systemctl restart opensmtpd
'';
# Now you must augment OpenSMTPD's systemd service to load
# the certificate files.
systemd.services.opensmtpd.requires = [&quot;acme-finished-mail.example.com.target&quot;];
systemd.services.opensmtpd.serviceConfig.LoadCredential = let
certDir = config.security.acme.certs.&quot;mail.example.com&quot;.directory;
in [
&quot;cert.pem:${certDir}/cert.pem&quot;
&quot;key.pem:${certDir}/key.pem&quot;
];
# Finally, configure OpenSMTPD to use these certs.
services.opensmtpd = let
credsDir = &quot;/run/credentials/opensmtpd.service&quot;;
in {
enable = true;
setSendmail = false;
serverConfiguration = ''
pki mail.example.com cert &quot;${credsDir}/cert.pem&quot;
pki mail.example.com key &quot;${credsDir}/key.pem&quot;
listen on localhost tls pki mail.example.com
action act1 relay host smtp://127.0.0.1:10027
match for local action act1
'';
};
</programlisting>
</section>
<section xml:id="module-security-acme-regenerate">
<title>Regenerating certificates</title>
<para>
Should you need to regenerate a particular certificate in a hurry,
such as when a vulnerability is found in Lets Encrypt, there is
now a convenient mechanism for doing so. Running
<literal>systemctl clean --what=state acme-example.com.service</literal>
will remove all certificate files and the account data for the
given domain, allowing you to then
<literal>systemctl start acme-example.com.service</literal> to
generate fresh ones.
</para>
</section>
<section xml:id="module-security-acme-fix-jws">
<title>Fixing JWS Verification error</title>
<para>
It is possible that your account credentials file may become
corrupt and need to be regenerated. In this scenario lego will
produce the error <literal>JWS verification error</literal>. The
solution is to simply delete the associated accounts file and
re-run the affected service(s).
</para>
<programlisting>
# Find the accounts folder for the certificate
systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
export accountdir=&quot;$(!!)&quot;
# Move this folder to some place else
mv /var/lib/acme/.lego/$accountdir{,.bak}
# Recreate the folder using systemd-tmpfiles
systemd-tmpfiles --create
# Get a new account and reissue certificates
# Note: Do this for all certs that share the same account email address
systemctl start acme-example.com.service
</programlisting>
</section>
</chapter>

View file

@ -1,414 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-security-acme">
<title>SSL/TLS Certificates with ACME</title>
<para>
NixOS supports automatic domain validation &amp; certificate retrieval and
renewal using the ACME protocol. Any provider can be used, but by default
NixOS uses Let's Encrypt. The alternative ACME client
<link xlink:href="https://go-acme.github.io/lego/">lego</link> is used under
the hood.
</para>
<para>
Automatic cert validation and configuration for Apache and Nginx virtual
hosts is included in NixOS, however if you would like to generate a wildcard
cert or you are not using a web server you will have to configure DNS
based validation.
</para>
<section xml:id="module-security-acme-prerequisites">
<title>Prerequisites</title>
<para>
To use the ACME module, you must accept the provider's terms of service
by setting <literal><xref linkend="opt-security.acme.acceptTerms" /></literal>
to <literal>true</literal>. The Let's Encrypt ToS can be found
<link xlink:href="https://letsencrypt.org/repository/">here</link>.
</para>
<para>
You must also set an email address to be used when creating accounts with
Let's Encrypt. You can set this for all certs with
<literal><xref linkend="opt-security.acme.defaults.email" /></literal>
and/or on a per-cert basis with
<literal><xref linkend="opt-security.acme.certs._name_.email" /></literal>.
This address is only used for registration and renewal reminders,
and cannot be used to administer the certificates in any way.
</para>
<para>
Alternatively, you can use a different ACME server by changing the
<literal><xref linkend="opt-security.acme.defaults.server" /></literal> option
to a provider of your choosing, or just change the server for one cert with
<literal><xref linkend="opt-security.acme.certs._name_.server" /></literal>.
</para>
<para>
You will need an HTTP server or DNS server for verification. For HTTP,
the server must have a webroot defined that can serve
<filename>.well-known/acme-challenge</filename>. This directory must be
writeable by the user that will run the ACME client. For DNS, you must
set up credentials with your provider/server for use with lego.
</para>
</section>
<section xml:id="module-security-acme-nginx">
<title>Using ACME certificates in Nginx</title>
<para>
NixOS supports fetching ACME certificates for you by setting
<literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link>
= true;</literal> in a virtualHost config. We first create self-signed
placeholder certificates in place of the real ACME certs. The placeholder
certs are overwritten when the ACME certs arrive. For
<literal>foo.example.com</literal> the config would look like this:
</para>
<programlisting>
<xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.defaults.email" /> = "admin+acme@example.com";
services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
"foo.example.com" = {
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
# All serverAliases will be added as <link linkend="opt-security.acme.certs._name_.extraDomainNames">extra domain names</link> on the certificate.
<link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "bar.example.com" ];
locations."/" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
};
};
# We can also add a different vhost and reuse the same certificate
# but we have to append extraDomainNames manually beforehand:
# <link linkend="opt-security.acme.certs._name_.extraDomainNames">security.acme.certs."foo.example.com".extraDomainNames</link> = [ "baz.example.com" ];
"baz.example.com" = {
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">useACMEHost</link> = "foo.example.com";
locations."/" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/www";
};
};
};
}
</programlisting>
</section>
<section xml:id="module-security-acme-httpd">
<title>Using ACME certificates in Apache/httpd</title>
<para>
Using ACME certificates with Apache virtual hosts is identical
to using them with Nginx. The attribute names are all the same, just replace
"nginx" with "httpd" where appropriate.
</para>
</section>
<section xml:id="module-security-acme-configuring">
<title>Manual configuration of HTTP-01 validation</title>
<para>
First off you will need to set up a virtual host to serve the challenges.
This example uses a vhost called <literal>certs.example.com</literal>, with
the intent that you will generate certs for all your vhosts and redirect
everyone to HTTPS.
</para>
<programlisting>
<xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.defaults.email" /> = "admin+acme@example.com";
# /var/lib/acme/.challenges must be writable by the ACME user
# and readable by the Nginx user. The easiest way to achieve
# this is to add the Nginx user to the ACME group.
<link linkend="opt-users.users._name_.extraGroups">users.users.nginx.extraGroups</link> = [ "acme" ];
services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
"acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
<link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
locations."/.well-known/acme-challenge" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.root">root</link> = "/var/lib/acme/.challenges";
};
locations."/" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.return">return</link> = "301 https://$host$request_uri";
};
};
};
}
# Alternative config for Apache
<link linkend="opt-users.users._name_.extraGroups">users.users.wwwrun.extraGroups</link> = [ "acme" ];
services.httpd = {
<link linkend="opt-services.httpd.enable">enable = true;</link>
<link linkend="opt-services.httpd.virtualHosts">virtualHosts</link> = {
"acmechallenge.example.com" = {
# Catchall vhost, will redirect users to HTTPS for all vhosts
<link linkend="opt-services.httpd.virtualHosts._name_.serverAliases">serverAliases</link> = [ "*.example.com" ];
# /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
# By default, this is the case.
<link linkend="opt-services.httpd.virtualHosts._name_.documentRoot">documentRoot</link> = "/var/lib/acme/.challenges";
<link linkend="opt-services.httpd.virtualHosts._name_.extraConfig">extraConfig</link> = ''
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
'';
};
};
}
</programlisting>
<para>
Now you need to configure ACME to generate a certificate.
</para>
<programlisting>
<xref linkend="opt-security.acme.certs"/>."foo.example.com" = {
<link linkend="opt-security.acme.certs._name_.webroot">webroot</link> = "/var/lib/acme/.challenges";
<link linkend="opt-security.acme.certs._name_.email">email</link> = "foo@example.com";
# Ensure that the web server you use can read the generated certs
# Take a look at the <link linkend="opt-services.nginx.group">group</link> option for the web server you choose.
<link linkend="opt-security.acme.certs._name_.group">group</link> = "nginx";
# Since we have a wildcard vhost to handle port 80,
# we can generate certs for anything!
# Just make sure your DNS resolves them.
<link linkend="opt-security.acme.certs._name_.extraDomainNames">extraDomainNames</link> = [ "mail.example.com" ];
};
</programlisting>
<para>
The private key <filename>key.pem</filename> and certificate
<filename>fullchain.pem</filename> will be put into
<filename>/var/lib/acme/foo.example.com</filename>.
</para>
<para>
Refer to <xref linkend="ch-options" /> for all available configuration
options for the <link linkend="opt-security.acme.certs">security.acme</link>
module.
</para>
</section>
<section xml:id="module-security-acme-config-dns">
<title>Configuring ACME for DNS validation</title>
<para>
This is useful if you want to generate a wildcard certificate, since
ACME servers will only hand out wildcard certs over DNS validation.
There are a number of supported DNS providers and servers you can utilise,
see the <link xlink:href="https://go-acme.github.io/lego/dns/">lego docs</link>
for provider/server specific configuration values. For the sake of these
docs, we will provide a fully self-hosted example using bind.
</para>
<programlisting>
services.bind = {
<link linkend="opt-services.bind.enable">enable</link> = true;
<link linkend="opt-services.bind.extraConfig">extraConfig</link> = ''
include "/var/lib/secrets/dnskeys.conf";
'';
<link linkend="opt-services.bind.zones">zones</link> = [
rec {
name = "example.com";
file = "/var/db/bind/${name}";
master = true;
extraConfig = "allow-update { key rfc2136key.example.com.; };";
}
];
}
# Now we can configure ACME
<xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.defaults.email" /> = "admin+acme@example.com";
<xref linkend="opt-security.acme.certs" />."example.com" = {
<link linkend="opt-security.acme.certs._name_.domain">domain</link> = "*.example.com";
<link linkend="opt-security.acme.certs._name_.dnsProvider">dnsProvider</link> = "rfc2136";
<link linkend="opt-security.acme.certs._name_.credentialsFile">credentialsFile</link> = "/var/lib/secrets/certs.secret";
# We don't need to wait for propagation since this is a local DNS server
<link linkend="opt-security.acme.certs._name_.dnsPropagationCheck">dnsPropagationCheck</link> = false;
};
</programlisting>
<para>
The <filename>dnskeys.conf</filename> and <filename>certs.secret</filename>
must be kept secure and thus you should not keep their contents in your
Nix config. Instead, generate them one time with a systemd service:
</para>
<programlisting>
systemd.services.dns-rfc2136-conf = {
requiredBy = ["acme-example.com.service" "bind.service"];
before = ["acme-example.com.service" "bind.service"];
unitConfig = {
ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
};
serviceConfig = {
Type = "oneshot";
UMask = 0077;
};
path = [ pkgs.bind ];
script = ''
mkdir -p /var/lib/secrets
chmod 755 /var/lib/secrets
tsig-keygen rfc2136key.example.com &gt; /var/lib/secrets/dnskeys.conf
chown named:root /var/lib/secrets/dnskeys.conf
chmod 400 /var/lib/secrets/dnskeys.conf
# extract secret value from the dnskeys.conf
while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done &lt; /var/lib/secrets/dnskeys.conf
cat &gt; /var/lib/secrets/certs.secret &lt;&lt; EOF
RFC2136_NAMESERVER='127.0.0.1:53'
RFC2136_TSIG_ALGORITHM='hmac-sha256.'
RFC2136_TSIG_KEY='rfc2136key.example.com'
RFC2136_TSIG_SECRET='$secret'
EOF
chmod 400 /var/lib/secrets/certs.secret
'';
};
</programlisting>
<para>
Now you're all set to generate certs! You should monitor the first invocation
by running <literal>systemctl start acme-example.com.service &amp;
journalctl -fu acme-example.com.service</literal> and watching its log output.
</para>
</section>
<section xml:id="module-security-acme-config-dns-with-vhosts">
<title>Using DNS validation with web server virtual hosts</title>
<para>
It is possible to use DNS-01 validation with all certificates,
including those automatically configured via the Nginx/Apache
<literal><link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link></literal>
option. This configuration pattern is fully
supported and part of the module's test suite for Nginx + Apache.
</para>
<para>
You must follow the guide above on configuring DNS-01 validation
first, however instead of setting the options for one certificate
(e.g. <xref linkend="opt-security.acme.certs._name_.dnsProvider" />)
you will set them as defaults
(e.g. <xref linkend="opt-security.acme.defaults.dnsProvider" />).
</para>
<programlisting>
# Configure ACME appropriately
<xref linkend="opt-security.acme.acceptTerms" /> = true;
<xref linkend="opt-security.acme.defaults.email" /> = "admin+acme@example.com";
<xref linkend="opt-security.acme.defaults" /> = {
<link linkend="opt-security.acme.defaults.dnsProvider">dnsProvider</link> = "rfc2136";
<link linkend="opt-security.acme.defaults.credentialsFile">credentialsFile</link> = "/var/lib/secrets/certs.secret";
# We don't need to wait for propagation since this is a local DNS server
<link linkend="opt-security.acme.defaults.dnsPropagationCheck">dnsPropagationCheck</link> = false;
};
# For each virtual host you would like to use DNS-01 validation with,
# set acmeRoot = null
services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
"foo.example.com" = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.acmeRoot">acmeRoot</link> = null;
};
};
}
</programlisting>
<para>
And that's it! Next time your configuration is rebuilt, or when
you add a new virtualHost, it will be DNS-01 validated.
</para>
</section>
<section xml:id="module-security-acme-root-owned">
<title>Using ACME with services demanding root owned certificates</title>
<para>
Some services refuse to start if the configured certificate files
are not owned by root. PostgreSQL and OpenSMTPD are examples of these.
There is no way to change the user the ACME module uses (it will always be
<literal>acme</literal>), however you can use systemd's
<literal>LoadCredential</literal> feature to resolve this elegantly.
Below is an example configuration for OpenSMTPD, but this pattern
can be applied to any service.
</para>
<programlisting>
# Configure ACME however you like (DNS or HTTP validation), adding
# the following configuration for the relevant certificate.
# Note: You cannot use `systemctl reload` here as that would mean
# the LoadCredential configuration below would be skipped and
# the service would continue to use old certificates.
security.acme.certs."mail.example.com".postRun = ''
systemctl restart opensmtpd
'';
# Now you must augment OpenSMTPD's systemd service to load
# the certificate files.
<link linkend="opt-systemd.services._name_.requires">systemd.services.opensmtpd.requires</link> = ["acme-finished-mail.example.com.target"];
<link linkend="opt-systemd.services._name_.serviceConfig">systemd.services.opensmtpd.serviceConfig.LoadCredential</link> = let
certDir = config.security.acme.certs."mail.example.com".directory;
in [
"cert.pem:${certDir}/cert.pem"
"key.pem:${certDir}/key.pem"
];
# Finally, configure OpenSMTPD to use these certs.
services.opensmtpd = let
credsDir = "/run/credentials/opensmtpd.service";
in {
enable = true;
setSendmail = false;
serverConfiguration = ''
pki mail.example.com cert "${credsDir}/cert.pem"
pki mail.example.com key "${credsDir}/key.pem"
listen on localhost tls pki mail.example.com
action act1 relay host smtp://127.0.0.1:10027
match for local action act1
'';
};
</programlisting>
</section>
<section xml:id="module-security-acme-regenerate">
<title>Regenerating certificates</title>
<para>
Should you need to regenerate a particular certificate in a hurry, such
as when a vulnerability is found in Let's Encrypt, there is now a convenient
mechanism for doing so. Running
<literal>systemctl clean --what=state acme-example.com.service</literal>
will remove all certificate files and the account data for the given domain,
allowing you to then <literal>systemctl start acme-example.com.service</literal>
to generate fresh ones.
</para>
</section>
<section xml:id="module-security-acme-fix-jws">
<title>Fixing JWS Verification error</title>
<para>
It is possible that your account credentials file may become corrupt and need
to be regenerated. In this scenario lego will produce the error <literal>JWS verification error</literal>.
The solution is to simply delete the associated accounts file and
re-run the affected service(s).
</para>
<programlisting>
# Find the accounts folder for the certificate
systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
export accountdir="$(!!)"
# Move this folder to some place else
mv /var/lib/acme/.lego/$accountdir{,.bak}
# Recreate the folder using systemd-tmpfiles
systemd-tmpfiles --create
# Get a new account and reissue certificates
# Note: Do this for all certs that share the same account email address
systemctl start acme-example.com.service
</programlisting>
</section>
</chapter>

View file

@ -488,6 +488,9 @@ let
account [success=ok ignore=ignore default=die] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so account [success=ok ignore=ignore default=die] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so
account [success=ok default=ignore] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so account [success=ok default=ignore] ${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so
'' + '' +
optionalString config.services.homed.enable ''
account sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so
'' +
# The required pam_unix.so module has to come after all the sufficient modules # The required pam_unix.so module has to come after all the sufficient modules
# because otherwise, the account lookup will fail if the user does not exist # because otherwise, the account lookup will fail if the user does not exist
# locally, for example with MySQL- or LDAP-auth. # locally, for example with MySQL- or LDAP-auth.
@ -541,8 +544,10 @@ let
# after it succeeds. Certain modules need to run after pam_unix # after it succeeds. Certain modules need to run after pam_unix
# prompts the user for password so we run it once with 'optional' at an # prompts the user for password so we run it once with 'optional' at an
# earlier point and it will run again with 'sufficient' further down. # earlier point and it will run again with 'sufficient' further down.
# We use try_first_pass the second time to avoid prompting password twice # We use try_first_pass the second time to avoid prompting password twice.
(optionalString (cfg.unixAuth && #
# The same principle applies to systemd-homed
(optionalString ((cfg.unixAuth || config.services.homed.enable) &&
(config.security.pam.enableEcryptfs (config.security.pam.enableEcryptfs
|| config.security.pam.enableFscrypt || config.security.pam.enableFscrypt
|| cfg.pamMount || cfg.pamMount
@ -553,7 +558,10 @@ let
|| cfg.failDelay.enable || cfg.failDelay.enable
|| cfg.duoSecurity.enable)) || cfg.duoSecurity.enable))
( (
'' optionalString config.services.homed.enable ''
auth optional ${config.systemd.package}/lib/security/pam_systemd_home.so
'' +
optionalString cfg.unixAuth ''
auth optional pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth auth optional pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth
'' + '' +
optionalString config.security.pam.enableEcryptfs '' optionalString config.security.pam.enableEcryptfs ''
@ -584,6 +592,9 @@ let
auth required ${pkgs.duo-unix}/lib/security/pam_duo.so auth required ${pkgs.duo-unix}/lib/security/pam_duo.so
'' ''
)) + )) +
optionalString config.services.homed.enable ''
auth sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so
'' +
optionalString cfg.unixAuth '' optionalString cfg.unixAuth ''
auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth try_first_pass auth sufficient pam_unix.so ${optionalString cfg.allowNullPassword "nullok"} ${optionalString cfg.nodelay "nodelay"} likeauth try_first_pass
'' + '' +
@ -605,6 +616,10 @@ let
auth required pam_deny.so auth required pam_deny.so
# Password management. # Password management.
'' +
optionalString config.services.homed.enable ''
password sufficient ${config.systemd.package}/lib/security/pam_systemd_home.so
'' + ''
password sufficient pam_unix.so nullok sha512 password sufficient pam_unix.so nullok sha512
'' + '' +
optionalString config.security.pam.enableEcryptfs '' optionalString config.security.pam.enableEcryptfs ''
@ -650,6 +665,9 @@ let
++ optional (cfg.ttyAudit.enablePattern != null) "enable=${cfg.ttyAudit.enablePattern}" ++ optional (cfg.ttyAudit.enablePattern != null) "enable=${cfg.ttyAudit.enablePattern}"
++ optional (cfg.ttyAudit.disablePattern != null) "disable=${cfg.ttyAudit.disablePattern}" ++ optional (cfg.ttyAudit.disablePattern != null) "disable=${cfg.ttyAudit.disablePattern}"
)) + )) +
optionalString config.services.homed.enable ''
session required ${config.systemd.package}/lib/security/pam_systemd_home.so
'' +
optionalString cfg.makeHomeDir '' optionalString cfg.makeHomeDir ''
session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=0077 session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=0077
'' + '' +
@ -1361,6 +1379,9 @@ in
'' + '' +
optionalString config.virtualisation.lxc.lxcfs.enable '' optionalString config.virtualisation.lxc.lxcfs.enable ''
mr ${pkgs.lxc}/lib/security/pam_cgfs.so mr ${pkgs.lxc}/lib/security/pam_cgfs.so
'' +
optionalString config.services.homed.enable ''
mr ${config.systemd.package}/lib/security/pam_systemd_home.so
''; '';
}; };

View file

@ -0,0 +1,116 @@
{ pkgs, lib, config, utils, ... }:
with lib;
let
cfg = config.services.gmediarender;
in
{
options.services.gmediarender = {
enable = mkEnableOption (mdDoc "the gmediarender DLNA renderer");
audioDevice = mkOption {
type = types.nullOr types.str;
default = null;
description = mdDoc ''
The audio device to use.
'';
};
audioSink = mkOption {
type = types.nullOr types.str;
default = null;
description = mdDoc ''
The audio sink to use.
'';
};
friendlyName = mkOption {
type = types.nullOr types.str;
default = null;
description = mdDoc ''
A "friendly name" for identifying the endpoint.
'';
};
initialVolume = mkOption {
type = types.nullOr types.int;
default = 0;
description = mdDoc ''
A default volume attenuation (in dB) for the endpoint.
'';
};
package = mkPackageOptionMD pkgs "gmediarender" {
default = "gmrender-resurrect";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
description = mdDoc "Port that will be used to accept client connections.";
};
uuid = mkOption {
type = types.nullOr types.str;
default = null;
description = mdDoc ''
A UUID for uniquely identifying the endpoint. If you have
multiple renderers on your network, you MUST set this.
'';
};
};
config = mkIf cfg.enable {
systemd = {
services.gmediarender = {
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
description = "gmediarender server daemon";
environment = {
XDG_CACHE_HOME = "%t/gmediarender";
};
serviceConfig = {
DynamicUser = true;
User = "gmediarender";
Group = "gmediarender";
SupplementaryGroups = [ "audio" ];
ExecStart =
"${cfg.package}/bin/gmediarender " +
optionalString (cfg.audioDevice != null) ("--gstout-audiodevice=${utils.escapeSystemdExecArg cfg.audioDevice} ") +
optionalString (cfg.audioSink != null) ("--gstout-audiosink=${utils.escapeSystemdExecArg cfg.audioSink} ") +
optionalString (cfg.friendlyName != null) ("--friendly-name=${utils.escapeSystemdExecArg cfg.friendlyName} ") +
optionalString (cfg.initialVolume != 0) ("--initial-volume=${toString cfg.initialVolume} ") +
optionalString (cfg.port != null) ("--port=${toString cfg.port} ") +
optionalString (cfg.uuid != null) ("--uuid=${utils.escapeSystemdExecArg cfg.uuid} ");
Restart = "always";
RuntimeDirectory = "gmediarender";
# Security options:
CapabilityBoundingSet = "";
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
# PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ];
UMask = 066;
};
};
};
};
}

View file

@ -0,0 +1,163 @@
# BorgBackup {#module-borgbase}
*Source:* {file}`modules/services/backup/borgbackup.nix`
*Upstream documentation:* <https://borgbackup.readthedocs.io/>
[BorgBackup](https://www.borgbackup.org/) (short: Borg)
is a deduplicating backup program. Optionally, it supports compression and
authenticated encryption.
The main goal of Borg is to provide an efficient and secure way to backup
data. The data deduplication technique used makes Borg suitable for daily
backups since only changes are stored. The authenticated encryption technique
makes it suitable for backups to not fully trusted targets.
## Configuring {#module-services-backup-borgbackup-configuring}
A complete list of options for the Borgbase module may be found
[here](#opt-services.borgbackup.jobs).
## Basic usage for a local backup {#opt-services-backup-borgbackup-local-directory}
A very basic configuration for backing up to a locally accessible directory is:
```
{
opt.services.borgbackup.jobs = {
{ rootBackup = {
paths = "/";
exclude = [ "/nix" "/path/to/local/repo" ];
repo = "/path/to/local/repo";
doInit = true;
encryption = {
mode = "repokey";
passphrase = "secret";
};
compression = "auto,lzma";
startAt = "weekly";
};
}
};
}
```
::: {.warning}
If you do not want the passphrase to be stored in the world-readable
Nix store, use passCommand. You find an example below.
:::
## Create a borg backup server {#opt-services-backup-create-server}
You should use a different SSH key for each repository you write to,
because the specified keys are restricted to running borg serve and can only
access this single repository. You need the output of the generate pub file.
```ShellSession
# sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
# cat /run/keys/id_ed25519_my_borg_repo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos
```
Add the following snippet to your NixOS configuration:
```
{
services.borgbackup.repos = {
my_borg_repo = {
authorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos"
] ;
path = "/var/lib/my_borg_repo" ;
};
};
}
```
## Backup to the borg repository server {#opt-services-backup-borgbackup-remote-server}
The following NixOS snippet creates an hourly backup to the service
(on the host nixos) as created in the section above. We assume
that you have stored a secret passphrasse in the file
{file}`/run/keys/borgbackup_passphrase`, which should be only
accessible by root
```
{
services.borgbackup.jobs = {
backupToLocalServer = {
paths = [ "/etc/nixos" ];
doInit = true;
repo = "borg@nixos:." ;
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/borgbackup_passphrase";
};
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_my_borg_repo"; };
compression = "auto,lzma";
startAt = "hourly";
};
};
};
```
The following few commands (run as root) let you test your backup.
```
> nixos-rebuild switch
...restarting the following units: polkit.service
> systemctl restart borgbackup-job-backupToLocalServer
> sleep 10
> systemctl restart borgbackup-job-backupToLocalServer
> export BORG_PASSPHRASE=topSecrect
> borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:.
nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac]
nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]
```
## Backup to a hosting service {#opt-services-backup-borgbackup-borgbase}
Several companies offer [(paid) hosting services](https://www.borgbackup.org/support/commercial.html)
for Borg repositories.
To backup your home directory to borgbase you have to:
- Generate a SSH key without a password, to access the remote server. E.g.
sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase
- Create the repository on the server by following the instructions for your
hosting server.
- Initialize the repository on the server. Eg.
sudo borg init --encryption=repokey-blake2 \
-rsh "ssh -i /run/keys/id_ed25519_borgbase" \
zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo
- Add it to your NixOS configuration, e.g.
{
services.borgbackup.jobs = {
my_Remote_Backup = {
paths = [ "/" ];
exclude = [ "/nix" "'**/.cache'" ];
repo = "zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo";
encryption = {
mode = "repokey-blake2";
passCommand = "cat /run/keys/borgbackup_passphrase";
};
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; };
compression = "auto,lzma";
startAt = "daily";
};
};
}}
## Vorta backup client for the desktop {#opt-services-backup-borgbackup-vorta}
Vorta is a backup client for macOS and Linux desktops. It integrates the
mighty BorgBackup with your desktop environment to protect your data from
disk failure, ransomware and theft.
It can be installed in NixOS e.g. by adding `pkgs.vorta`
to [](#opt-environment.systemPackages).
Details about using Vorta can be found under
[https://vorta.borgbase.com](https://vorta.borgbase.com/usage) .

View file

@ -1,209 +1,215 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-borgbase">
version="5.0" <title>BorgBackup</title>
xml:id="module-borgbase">
<title>BorgBackup</title>
<para> <para>
<emphasis>Source:</emphasis> <emphasis>Source:</emphasis>
<filename>modules/services/backup/borgbackup.nix</filename> <filename>modules/services/backup/borgbackup.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://borgbackup.readthedocs.io/"/>
</para>
<para>
<link xlink:href="https://www.borgbackup.org/">BorgBackup</link> (short: Borg)
is a deduplicating backup program. Optionally, it supports compression and
authenticated encryption.
</para> </para>
<para> <para>
The main goal of Borg is to provide an efficient and secure way to backup <emphasis>Upstream documentation:</emphasis>
data. The data deduplication technique used makes Borg suitable for daily <link xlink:href="https://borgbackup.readthedocs.io/">https://borgbackup.readthedocs.io/</link>
backups since only changes are stored. The authenticated encryption technique </para>
makes it suitable for backups to not fully trusted targets. <para>
</para> <link xlink:href="https://www.borgbackup.org/">BorgBackup</link>
(short: Borg) is a deduplicating backup program. Optionally, it
supports compression and authenticated encryption.
</para>
<para>
The main goal of Borg is to provide an efficient and secure way to
backup data. The data deduplication technique used makes Borg
suitable for daily backups since only changes are stored. The
authenticated encryption technique makes it suitable for backups to
not fully trusted targets.
</para>
<section xml:id="module-services-backup-borgbackup-configuring"> <section xml:id="module-services-backup-borgbackup-configuring">
<title>Configuring</title> <title>Configuring</title>
<para> <para>
A complete list of options for the Borgbase module may be found A complete list of options for the Borgbase module may be found
<link linkend="opt-services.borgbackup.jobs">here</link>. <link linkend="opt-services.borgbackup.jobs">here</link>.
</para> </para>
</section> </section>
<section xml:id="opt-services-backup-borgbackup-local-directory"> <section xml:id="opt-services-backup-borgbackup-local-directory">
<title>Basic usage for a local backup</title> <title>Basic usage for a local backup</title>
<para>
<para> A very basic configuration for backing up to a locally accessible
A very basic configuration for backing up to a locally accessible directory directory is:
is: </para>
<programlisting> <programlisting>
{ {
opt.services.borgbackup.jobs = { opt.services.borgbackup.jobs = {
{ rootBackup = { { rootBackup = {
paths = "/"; paths = &quot;/&quot;;
exclude = [ "/nix" "/path/to/local/repo" ]; exclude = [ &quot;/nix&quot; &quot;/path/to/local/repo&quot; ];
repo = "/path/to/local/repo"; repo = &quot;/path/to/local/repo&quot;;
doInit = true; doInit = true;
encryption = { encryption = {
mode = "repokey"; mode = &quot;repokey&quot;;
passphrase = "secret"; passphrase = &quot;secret&quot;;
}; };
compression = "auto,lzma"; compression = &quot;auto,lzma&quot;;
startAt = "weekly"; startAt = &quot;weekly&quot;;
}; };
} }
}; };
}</programlisting> }
</para> </programlisting>
<warning> <warning>
<para>
If you do not want the passphrase to be stored in the
world-readable Nix store, use passCommand. You find an example
below.
</para>
</warning>
</section>
<section xml:id="opt-services-backup-create-server">
<title>Create a borg backup server</title>
<para> <para>
If you do not want the passphrase to be stored in the world-readable You should use a different SSH key for each repository you write
Nix store, use passCommand. You find an example below. to, because the specified keys are restricted to running borg
</para> serve and can only access this single repository. You need the
</warning> output of the generate pub file.
</section>
<section xml:id="opt-services-backup-create-server">
<title>Create a borg backup server</title>
<para>You should use a different SSH key for each repository you write to,
because the specified keys are restricted to running borg serve and can only
access this single repository. You need the output of the generate pub file.
</para>
<para>
<screen>
<prompt># </prompt>sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
<prompt># </prompt>cat /run/keys/id_ed25519_my_borg_repo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos</screen>
</para> </para>
<programlisting>
# sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
# cat /run/keys/id_ed25519_my_borg_repo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos
</programlisting>
<para> <para>
Add the following snippet to your NixOS configuration: Add the following snippet to your NixOS configuration:
<programlisting> </para>
<programlisting>
{ {
services.borgbackup.repos = { services.borgbackup.repos = {
my_borg_repo = { my_borg_repo = {
authorizedKeys = [ authorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos" &quot;ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos&quot;
] ; ] ;
path = "/var/lib/my_borg_repo" ; path = &quot;/var/lib/my_borg_repo&quot; ;
}; };
}; };
}</programlisting> }
</programlisting>
</section>
<section xml:id="opt-services-backup-borgbackup-remote-server">
<title>Backup to the borg repository server</title>
<para>
The following NixOS snippet creates an hourly backup to the
service (on the host nixos) as created in the section above. We
assume that you have stored a secret passphrasse in the file
<filename>/run/keys/borgbackup_passphrase</filename>, which should
be only accessible by root
</para> </para>
</section> <programlisting>
<section xml:id="opt-services-backup-borgbackup-remote-server">
<title>Backup to the borg repository server</title>
<para>The following NixOS snippet creates an hourly backup to the service
(on the host nixos) as created in the section above. We assume
that you have stored a secret passphrasse in the file
<code>/run/keys/borgbackup_passphrase</code>, which should be only
accessible by root
</para>
<para>
<programlisting>
{ {
services.borgbackup.jobs = { services.borgbackup.jobs = {
backupToLocalServer = { backupToLocalServer = {
paths = [ "/etc/nixos" ]; paths = [ &quot;/etc/nixos&quot; ];
doInit = true; doInit = true;
repo = "borg@nixos:." ; repo = &quot;borg@nixos:.&quot; ;
encryption = { encryption = {
mode = "repokey-blake2"; mode = &quot;repokey-blake2&quot;;
passCommand = "cat /run/keys/borgbackup_passphrase"; passCommand = &quot;cat /run/keys/borgbackup_passphrase&quot;;
}; };
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_my_borg_repo"; }; environment = { BORG_RSH = &quot;ssh -i /run/keys/id_ed25519_my_borg_repo&quot;; };
compression = "auto,lzma"; compression = &quot;auto,lzma&quot;;
startAt = "hourly"; startAt = &quot;hourly&quot;;
}; };
}; };
};</programlisting> };
</para> </programlisting>
<para>The following few commands (run as root) let you test your backup. <para>
<programlisting> The following few commands (run as root) let you test your backup.
> nixos-rebuild switch </para>
<programlisting>
&gt; nixos-rebuild switch
...restarting the following units: polkit.service ...restarting the following units: polkit.service
> systemctl restart borgbackup-job-backupToLocalServer &gt; systemctl restart borgbackup-job-backupToLocalServer
> sleep 10 &gt; sleep 10
> systemctl restart borgbackup-job-backupToLocalServer &gt; systemctl restart borgbackup-job-backupToLocalServer
> export BORG_PASSPHRASE=topSecrect &gt; export BORG_PASSPHRASE=topSecrect
> borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:. &gt; borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:.
nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac] nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac]
nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]</programlisting> nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]
</para> </programlisting>
</section> </section>
<section xml:id="opt-services-backup-borgbackup-borgbase">
<section xml:id="opt-services-backup-borgbackup-borgbase"> <title>Backup to a hosting service</title>
<title>Backup to a hosting service</title> <para>
Several companies offer
<para> <link xlink:href="https://www.borgbackup.org/support/commercial.html">(paid)
Several companies offer <link
xlink:href="https://www.borgbackup.org/support/commercial.html">(paid)
hosting services</link> for Borg repositories. hosting services</link> for Borg repositories.
</para>
<para>
To backup your home directory to borgbase you have to:
</para>
<itemizedlist>
<listitem>
<para>
Generate a SSH key without a password, to access the remote server. E.g.
</para> </para>
<para> <para>
<programlisting>sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase</programlisting> To backup your home directory to borgbase you have to:
</para> </para>
</listitem> <itemizedlist>
<listitem> <listitem>
<para> <para>
Create the repository on the server by following the instructions for your Generate a SSH key without a password, to access the remote
hosting server. server. E.g.
</para> </para>
</listitem> <programlisting>
<listitem> sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase
<para> </programlisting>
Initialize the repository on the server. Eg. </listitem>
<programlisting> <listitem>
<para>
Create the repository on the server by following the
instructions for your hosting server.
</para>
</listitem>
<listitem>
<para>
Initialize the repository on the server. Eg.
</para>
<programlisting>
sudo borg init --encryption=repokey-blake2 \ sudo borg init --encryption=repokey-blake2 \
-rsh "ssh -i /run/keys/id_ed25519_borgbase" \ -rsh &quot;ssh -i /run/keys/id_ed25519_borgbase&quot; \
zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo</programlisting> zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo
</para> </programlisting>
</listitem> </listitem>
<listitem> <listitem>
<para>Add it to your NixOS configuration, e.g. <para>
<programlisting> Add it to your NixOS configuration, e.g.
</para>
<programlisting>
{ {
services.borgbackup.jobs = { services.borgbackup.jobs = {
my_Remote_Backup = { my_Remote_Backup = {
paths = [ "/" ]; paths = [ &quot;/&quot; ];
exclude = [ "/nix" "'**/.cache'" ]; exclude = [ &quot;/nix&quot; &quot;'**/.cache'&quot; ];
repo = "zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo"; repo = &quot;zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo&quot;;
encryption = { encryption = {
mode = "repokey-blake2"; mode = &quot;repokey-blake2&quot;;
passCommand = "cat /run/keys/borgbackup_passphrase"; passCommand = &quot;cat /run/keys/borgbackup_passphrase&quot;;
}; };
environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; }; environment = { BORG_RSH = &quot;ssh -i /run/keys/id_ed25519_borgbase&quot;; };
compression = "auto,lzma"; compression = &quot;auto,lzma&quot;;
startAt = "daily"; startAt = &quot;daily&quot;;
}; };
}; };
}}</programlisting> }}
</para> </programlisting>
</listitem> </listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="opt-services-backup-borgbackup-vorta"> <section xml:id="opt-services-backup-borgbackup-vorta">
<title>Vorta backup client for the desktop</title> <title>Vorta backup client for the desktop</title>
<para> <para>
Vorta is a backup client for macOS and Linux desktops. It integrates the Vorta is a backup client for macOS and Linux desktops. It
mighty BorgBackup with your desktop environment to protect your data from integrates the mighty BorgBackup with your desktop environment to
disk failure, ransomware and theft. protect your data from disk failure, ransomware and theft.
</para> </para>
<para> <para>
It can be installed in NixOS e.g. by adding <package>pkgs.vorta</package> It can be installed in NixOS e.g. by adding
to <xref linkend="opt-environment.systemPackages" />. <literal>pkgs.vorta</literal> to
</para> <xref linkend="opt-environment.systemPackages" />.
<para> </para>
Details about using Vorta can be found under <link <para>
xlink:href="https://vorta.borgbase.com/usage">https://vorta.borgbase.com Details about using Vorta can be found under
</link>. <link xlink:href="https://vorta.borgbase.com/usage">https://vorta.borgbase.com</link>
</para> .
</section> </para>
</section>
</chapter> </chapter>

View file

@ -20,4 +20,6 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.github-runners.${cfg.name} = cfg; services.github-runners.${cfg.name} = cfg;
}; };
meta.maintainers = with maintainers; [ veehaitch newam ];
} }

View file

@ -127,10 +127,11 @@ with lib;
serviceOverrides = mkOption { serviceOverrides = mkOption {
type = types.attrs; type = types.attrs;
description = lib.mdDoc '' description = lib.mdDoc ''
Overrides for the systemd service. Can be used to adjust the sandboxing options. Modify the systemd service. Can be used to, e.g., adjust the sandboxing options.
''; '';
example = { example = {
ProtectHome = false; ProtectHome = false;
RestrictAddressFamilies = [ "AF_PACKET" ];
}; };
default = {}; default = {};
}; };

View file

@ -45,222 +45,224 @@ in
config.nix.package config.nix.package
] ++ cfg.extraPackages; ] ++ cfg.extraPackages;
serviceConfig = { serviceConfig = mkMerge [
ExecStart = "${cfg.package}/bin/Runner.Listener run --startuptype service"; {
ExecStart = "${cfg.package}/bin/Runner.Listener run --startuptype service";
# Does the following, sequentially: # Does the following, sequentially:
# - If the module configuration or the token has changed, purge the state directory, # - If the module configuration or the token has changed, purge the state directory,
# and create the current and the new token file with the contents of the configured # and create the current and the new token file with the contents of the configured
# token. While both files have the same content, only the later is accessible by # token. While both files have the same content, only the later is accessible by
# the service user. # the service user.
# - Configure the runner using the new token file. When finished, delete it. # - Configure the runner using the new token file. When finished, delete it.
# - Set up the directory structure by creating the necessary symlinks. # - Set up the directory structure by creating the necessary symlinks.
ExecStartPre = ExecStartPre =
let let
# Wrapper script which expects the full path of the state, working and logs # Wrapper script which expects the full path of the state, working and logs
# directory as arguments. Overrides the respective systemd variables to provide # directory as arguments. Overrides the respective systemd variables to provide
# unambiguous directory names. This becomes relevant, for example, if the # unambiguous directory names. This becomes relevant, for example, if the
# caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory= # caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory=
# to contain more than one directory. This causes systemd to set the respective # to contain more than one directory. This causes systemd to set the respective
# environment variables with the path of all of the given directories, separated # environment variables with the path of all of the given directories, separated
# by a colon. # by a colon.
writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" '' writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
set -euo pipefail set -euo pipefail
STATE_DIRECTORY="$1" STATE_DIRECTORY="$1"
WORK_DIRECTORY="$2" WORK_DIRECTORY="$2"
LOGS_DIRECTORY="$3" LOGS_DIRECTORY="$3"
${lines} ${lines}
''; '';
runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" "workDir" ] cfg; runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" "workDir" ] cfg;
newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig); newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json"; currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
newConfigTokenPath= "$STATE_DIRECTORY/.new-token"; newConfigTokenPath = "$STATE_DIRECTORY/.new-token";
currentConfigTokenPath = "$STATE_DIRECTORY/${currentConfigTokenFilename}"; currentConfigTokenPath = "$STATE_DIRECTORY/${currentConfigTokenFilename}";
runnerCredFiles = [ runnerCredFiles = [
".credentials" ".credentials"
".credentials_rsaparams" ".credentials_rsaparams"
".runner" ".runner"
]; ];
unconfigureRunner = writeScript "unconfigure" '' unconfigureRunner = writeScript "unconfigure" ''
copy_tokens() { copy_tokens() {
# Copy the configured token file to the state dir and allow the service user to read the file # Copy the configured token file to the state dir and allow the service user to read the file
install --mode=666 ${escapeShellArg cfg.tokenFile} "${newConfigTokenPath}" install --mode=666 ${escapeShellArg cfg.tokenFile} "${newConfigTokenPath}"
# Also copy current file to allow for a diff on the next start # Also copy current file to allow for a diff on the next start
install --mode=600 ${escapeShellArg cfg.tokenFile} "${currentConfigTokenPath}" install --mode=600 ${escapeShellArg cfg.tokenFile} "${currentConfigTokenPath}"
} }
clean_state() { clean_state() {
find "$STATE_DIRECTORY/" -mindepth 1 -delete find "$STATE_DIRECTORY/" -mindepth 1 -delete
copy_tokens copy_tokens
} }
diff_config() { diff_config() {
changed=0 changed=0
# Check for module config changes # Check for module config changes
[[ -f "${currentConfigPath}" ]] \ [[ -f "${currentConfigPath}" ]] \
&& ${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 \ && ${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 \
|| changed=1 || changed=1
# Also check the content of the token file # Also check the content of the token file
[[ -f "${currentConfigTokenPath}" ]] \ [[ -f "${currentConfigTokenPath}" ]] \
&& ${pkgs.diffutils}/bin/diff -q "${currentConfigTokenPath}" ${escapeShellArg cfg.tokenFile} >/dev/null 2>&1 \ && ${pkgs.diffutils}/bin/diff -q "${currentConfigTokenPath}" ${escapeShellArg cfg.tokenFile} >/dev/null 2>&1 \
|| changed=1 || changed=1
# If the config has changed, remove old state and copy tokens # If the config has changed, remove old state and copy tokens
if [[ "$changed" -eq 1 ]]; then if [[ "$changed" -eq 1 ]]; then
echo "Config has changed, removing old runner state." echo "Config has changed, removing old runner state."
echo "The old runner will still appear in the GitHub Actions UI." \ echo "The old runner will still appear in the GitHub Actions UI." \
"You have to remove it manually." "You have to remove it manually."
clean_state
fi
}
if [[ "${optionalString cfg.ephemeral "1"}" ]]; then
# In ephemeral mode, we always want to start with a clean state
clean_state clean_state
fi elif [[ "$(ls -A "$STATE_DIRECTORY")" ]]; then
} # There are state files from a previous run; diff them to decide if we need a new registration
if [[ "${optionalString cfg.ephemeral "1"}" ]]; then diff_config
# In ephemeral mode, we always want to start with a clean state
clean_state
elif [[ "$(ls -A "$STATE_DIRECTORY")" ]]; then
# There are state files from a previous run; diff them to decide if we need a new registration
diff_config
else
# The state directory is entirely empty which indicates a first start
copy_tokens
fi
'';
configureRunner = writeScript "configure" ''
if [[ -e "${newConfigTokenPath}" ]]; then
echo "Configuring GitHub Actions Runner"
args=(
--unattended
--disableupdate
--work "$WORK_DIRECTORY"
--url ${escapeShellArg cfg.url}
--labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)}
--name ${escapeShellArg cfg.name}
${optionalString cfg.replace "--replace"}
${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
${optionalString cfg.ephemeral "--ephemeral"}
)
# If the token file contains a PAT (i.e., it starts with "ghp_" or "github_pat_"), we have to use the --pat option,
# if it is not a PAT, we assume it contains a registration token and use the --token option
token=$(<"${newConfigTokenPath}")
if [[ "$token" =~ ^ghp_* ]] || [[ "$token" =~ ^github_pat_* ]]; then
args+=(--pat "$token")
else else
args+=(--token "$token") # The state directory is entirely empty which indicates a first start
copy_tokens
fi fi
${cfg.package}/bin/config.sh "''${args[@]}" '';
# Move the automatically created _diag dir to the logs dir configureRunner = writeScript "configure" ''
mkdir -p "$STATE_DIRECTORY/_diag" if [[ -e "${newConfigTokenPath}" ]]; then
cp -r "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/" echo "Configuring GitHub Actions Runner"
rm -rf "$STATE_DIRECTORY/_diag/" args=(
# Cleanup token from config --unattended
rm "${newConfigTokenPath}" --disableupdate
# Symlink to new config --work "$WORK_DIRECTORY"
ln -s '${newConfigPath}' "${currentConfigPath}" --url ${escapeShellArg cfg.url}
fi --labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)}
''; --name ${escapeShellArg cfg.name}
setupWorkDir = writeScript "setup-work-dirs" '' ${optionalString cfg.replace "--replace"}
# Cleanup previous service ${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
${pkgs.findutils}/bin/find -H "$WORK_DIRECTORY" -mindepth 1 -delete ${optionalString cfg.ephemeral "--ephemeral"}
)
# If the token file contains a PAT (i.e., it starts with "ghp_" or "github_pat_"), we have to use the --pat option,
# if it is not a PAT, we assume it contains a registration token and use the --token option
token=$(<"${newConfigTokenPath}")
if [[ "$token" =~ ^ghp_* ]] || [[ "$token" =~ ^github_pat_* ]]; then
args+=(--pat "$token")
else
args+=(--token "$token")
fi
${cfg.package}/bin/config.sh "''${args[@]}"
# Move the automatically created _diag dir to the logs dir
mkdir -p "$STATE_DIRECTORY/_diag"
cp -r "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
rm -rf "$STATE_DIRECTORY/_diag/"
# Cleanup token from config
rm "${newConfigTokenPath}"
# Symlink to new config
ln -s '${newConfigPath}' "${currentConfigPath}"
fi
'';
setupWorkDir = writeScript "setup-work-dirs" ''
# Cleanup previous service
${pkgs.findutils}/bin/find -H "$WORK_DIRECTORY" -mindepth 1 -delete
# Link _diag dir # Link _diag dir
ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag" ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag"
# Link the runner credentials to the work dir # Link the runner credentials to the work dir
ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$WORK_DIRECTORY/" ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$WORK_DIRECTORY/"
''; '';
in in
map (x: "${x} ${escapeShellArgs [ stateDir workDir logsDir ]}") [ map (x: "${x} ${escapeShellArgs [ stateDir workDir logsDir ]}") [
"+${unconfigureRunner}" # runs as root "+${unconfigureRunner}" # runs as root
configureRunner configureRunner
setupWorkDir setupWorkDir
]; ];
# If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner) # If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner)
# to trigger a fresh registration. # to trigger a fresh registration.
Restart = if cfg.ephemeral then "on-success" else "no"; Restart = if cfg.ephemeral then "on-success" else "no";
# If the runner exits with `ReturnCode.RetryableError = 2`, always restart the service: # If the runner exits with `ReturnCode.RetryableError = 2`, always restart the service:
# https://github.com/actions/runner/blob/40ed7f8/src/Runner.Common/Constants.cs#L146 # https://github.com/actions/runner/blob/40ed7f8/src/Runner.Common/Constants.cs#L146
RestartForceExitStatus = [ 2 ]; RestartForceExitStatus = [ 2 ];
# Contains _diag # Contains _diag
LogsDirectory = [ systemdDir ]; LogsDirectory = [ systemdDir ];
# Default RUNNER_ROOT which contains ephemeral Runner data # Default RUNNER_ROOT which contains ephemeral Runner data
RuntimeDirectory = [ systemdDir ]; RuntimeDirectory = [ systemdDir ];
# Home of persistent runner data, e.g., credentials # Home of persistent runner data, e.g., credentials
StateDirectory = [ systemdDir ]; StateDirectory = [ systemdDir ];
StateDirectoryMode = "0700"; StateDirectoryMode = "0700";
WorkingDirectory = workDir; WorkingDirectory = workDir;
InaccessiblePaths = [ InaccessiblePaths = [
# Token file path given in the configuration, if visible to the service # Token file path given in the configuration, if visible to the service
"-${cfg.tokenFile}" "-${cfg.tokenFile}"
# Token file in the state directory # Token file in the state directory
"${stateDir}/${currentConfigTokenFilename}" "${stateDir}/${currentConfigTokenFilename}"
]; ];
KillSignal = "SIGINT"; KillSignal = "SIGINT";
# Hardening (may overlap with DynamicUser=) # Hardening (may overlap with DynamicUser=)
# The following options are only for optimizing: # The following options are only for optimizing:
# systemd-analyze security github-runner # systemd-analyze security github-runner
AmbientCapabilities = ""; AmbientCapabilities = mkBefore [ "" ];
CapabilityBoundingSet = ""; CapabilityBoundingSet = mkBefore [ "" ];
# ProtectClock= adds DeviceAllow=char-rtc r # ProtectClock= adds DeviceAllow=char-rtc r
DeviceAllow = ""; DeviceAllow = mkBefore [ "" ];
NoNewPrivileges = true; NoNewPrivileges = mkDefault true;
PrivateDevices = true; PrivateDevices = mkDefault true;
PrivateMounts = true; PrivateMounts = mkDefault true;
PrivateTmp = true; PrivateTmp = mkDefault true;
PrivateUsers = true; PrivateUsers = mkDefault true;
ProtectClock = true; ProtectClock = mkDefault true;
ProtectControlGroups = true; ProtectControlGroups = mkDefault true;
ProtectHome = true; ProtectHome = mkDefault true;
ProtectHostname = true; ProtectHostname = mkDefault true;
ProtectKernelLogs = true; ProtectKernelLogs = mkDefault true;
ProtectKernelModules = true; ProtectKernelModules = mkDefault true;
ProtectKernelTunables = true; ProtectKernelTunables = mkDefault true;
ProtectSystem = "strict"; ProtectSystem = mkDefault "strict";
RemoveIPC = true; RemoveIPC = mkDefault true;
RestrictNamespaces = true; RestrictNamespaces = mkDefault true;
RestrictRealtime = true; RestrictRealtime = mkDefault true;
RestrictSUIDSGID = true; RestrictSUIDSGID = mkDefault true;
UMask = "0066"; UMask = mkDefault "0066";
ProtectProc = "invisible"; ProtectProc = mkDefault "invisible";
SystemCallFilter = [ SystemCallFilter = mkBefore [
"~@clock" "~@clock"
"~@cpu-emulation" "~@cpu-emulation"
"~@module" "~@module"
"~@mount" "~@mount"
"~@obsolete" "~@obsolete"
"~@raw-io" "~@raw-io"
"~@reboot" "~@reboot"
"~capset" "~capset"
"~setdomainname" "~setdomainname"
"~sethostname" "~sethostname"
]; ];
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ]; RestrictAddressFamilies = mkBefore [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
BindPaths = lib.optionals (cfg.workDir != null) [ cfg.workDir ]; BindPaths = lib.optionals (cfg.workDir != null) [ cfg.workDir ];
# Needs network access # Needs network access
PrivateNetwork = false; PrivateNetwork = mkDefault false;
# Cannot be true due to Node # Cannot be true due to Node
MemoryDenyWriteExecute = false; MemoryDenyWriteExecute = mkDefault false;
# The more restrictive "pid" option makes `nix` commands in CI emit # The more restrictive "pid" option makes `nix` commands in CI emit
# "GC Warning: Couldn't read /proc/stat" # "GC Warning: Couldn't read /proc/stat"
# You may want to set this to "pid" if not using `nix` commands # You may want to set this to "pid" if not using `nix` commands
ProcSubset = "all"; ProcSubset = mkDefault "all";
# Coverage programs for compiled code such as `cargo-tarpaulin` disable # Coverage programs for compiled code such as `cargo-tarpaulin` disable
# ASLR (address space layout randomization) which requires the # ASLR (address space layout randomization) which requires the
# `personality` syscall # `personality` syscall
# You may want to set this to `true` if not using coverage tooling on # You may want to set this to `true` if not using coverage tooling on
# compiled code # compiled code
LockPersonality = false; LockPersonality = mkDefault false;
# Note that this has some interactions with the User setting; so you may # Note that this has some interactions with the User setting; so you may
# want to consult the systemd docs if using both. # want to consult the systemd docs if using both.
DynamicUser = true; DynamicUser = mkDefault true;
} // ( }
lib.optionalAttrs (cfg.user != null) { User = cfg.user; } (mkIf (cfg.user != null) { User = cfg.user; })
) // cfg.serviceOverrides; cfg.serviceOverrides
];
} }

View file

@ -53,4 +53,6 @@ in
})) }))
); );
}; };
meta.maintainers = with maintainers; [ veehaitch newam ];
} }

View file

@ -0,0 +1,309 @@
# FoundationDB {#module-services-foundationdb}
*Source:* {file}`modules/services/databases/foundationdb.nix`
*Upstream documentation:* <https://apple.github.io/foundationdb/>
*Maintainer:* Austin Seipp
*Available version(s):* 5.1.x, 5.2.x, 6.0.x
FoundationDB (or "FDB") is an open source, distributed, transactional
key-value store.
## Configuring and basic setup {#module-services-foundationdb-configuring}
To enable FoundationDB, add the following to your
{file}`configuration.nix`:
```
services.foundationdb.enable = true;
services.foundationdb.package = pkgs.foundationdb52; # FoundationDB 5.2.x
```
The {option}`services.foundationdb.package` option is required, and
must always be specified. Due to the fact FoundationDB network protocols and
on-disk storage formats may change between (major) versions, and upgrades
must be explicitly handled by the user, you must always manually specify
this yourself so that the NixOS module will use the proper version. Note
that minor, bugfix releases are always compatible.
After running {command}`nixos-rebuild`, you can verify whether
FoundationDB is running by executing {command}`fdbcli` (which is
added to {option}`environment.systemPackages`):
```ShellSession
$ sudo -u foundationdb fdbcli
Using cluster file `/etc/foundationdb/fdb.cluster'.
The database is available.
Welcome to the fdbcli. For help, type `help'.
fdb> status
Using cluster file `/etc/foundationdb/fdb.cluster'.
Configuration:
Redundancy mode - single
Storage engine - memory
Coordinators - 1
Cluster:
FoundationDB processes - 1
Machines - 1
Memory availability - 5.4 GB per process on machine with least available
Fault Tolerance - 0 machines
Server time - 04/20/18 15:21:14
...
fdb>
```
You can also write programs using the available client libraries. For
example, the following Python program can be run in order to grab the
cluster status, as a quick example. (This example uses
{command}`nix-shell` shebang support to automatically supply the
necessary Python modules).
```ShellSession
a@link> cat fdb-status.py
#! /usr/bin/env nix-shell
#! nix-shell -i python -p python pythonPackages.foundationdb52
import fdb
import json
def main():
fdb.api_version(520)
db = fdb.open()
@fdb.transactional
def get_status(tr):
return str(tr['\xff\xff/status/json'])
obj = json.loads(get_status(db))
print('FoundationDB available: %s' % obj['client']['database_status']['available'])
if __name__ == "__main__":
main()
a@link> chmod +x fdb-status.py
a@link> ./fdb-status.py
FoundationDB available: True
a@link>
```
FoundationDB is run under the {command}`foundationdb` user and group
by default, but this may be changed in the NixOS configuration. The systemd
unit {command}`foundationdb.service` controls the
{command}`fdbmonitor` process.
By default, the NixOS module for FoundationDB creates a single SSD-storage
based database for development and basic usage. This storage engine is
designed for SSDs and will perform poorly on HDDs; however it can handle far
more data than the alternative "memory" engine and is a better default
choice for most deployments. (Note that you can change the storage backend
on-the-fly for a given FoundationDB cluster using
{command}`fdbcli`.)
Furthermore, only 1 server process and 1 backup agent are started in the
default configuration. See below for more on scaling to increase this.
FoundationDB stores all data for all server processes under
{file}`/var/lib/foundationdb`. You can override this using
{option}`services.foundationdb.dataDir`, e.g.
```
services.foundationdb.dataDir = "/data/fdb";
```
Similarly, logs are stored under {file}`/var/log/foundationdb`
by default, and there is a corresponding
{option}`services.foundationdb.logDir` as well.
## Scaling processes and backup agents {#module-services-foundationdb-scaling}
Scaling the number of server processes is quite easy; simply specify
{option}`services.foundationdb.serverProcesses` to be the number of
FoundationDB worker processes that should be started on the machine.
FoundationDB worker processes typically require 4GB of RAM per-process at
minimum for good performance, so this option is set to 1 by default since
the maximum amount of RAM is unknown. You're advised to abide by this
restriction, so pick a number of processes so that each has 4GB or more.
A similar option exists in order to scale backup agent processes,
{option}`services.foundationdb.backupProcesses`. Backup agents are
not as performance/RAM sensitive, so feel free to experiment with the number
of available backup processes.
## Clustering {#module-services-foundationdb-clustering}
FoundationDB on NixOS works similarly to other Linux systems, so this
section will be brief. Please refer to the full FoundationDB documentation
for more on clustering.
FoundationDB organizes clusters using a set of
*coordinators*, which are just specially-designated
worker processes. By default, every installation of FoundationDB on NixOS
will start as its own individual cluster, with a single coordinator: the
first worker process on {command}`localhost`.
Coordinators are specified globally using the
{command}`/etc/foundationdb/fdb.cluster` file, which all servers and
client applications will use to find and join coordinators. Note that this
file *can not* be managed by NixOS so easily:
FoundationDB is designed so that it will rewrite the file at runtime for all
clients and nodes when cluster coordinators change, with clients
transparently handling this without intervention. It is fundamentally a
mutable file, and you should not try to manage it in any way in NixOS.
When dealing with a cluster, there are two main things you want to do:
- Add a node to the cluster for storage/compute.
- Promote an ordinary worker to a coordinator.
A node must already be a member of the cluster in order to properly be
promoted to a coordinator, so you must always add it first if you wish to
promote it.
To add a machine to a FoundationDB cluster:
- Choose one of the servers to start as the initial coordinator.
- Copy the {command}`/etc/foundationdb/fdb.cluster` file from this
server to all the other servers. Restart FoundationDB on all of these
other servers, so they join the cluster.
- All of these servers are now connected and working together in the
cluster, under the chosen coordinator.
At this point, you can add as many nodes as you want by just repeating the
above steps. By default there will still be a single coordinator: you can
use {command}`fdbcli` to change this and add new coordinators.
As a convenience, FoundationDB can automatically assign coordinators based
on the redundancy mode you wish to achieve for the cluster. Once all the
nodes have been joined, simply set the replication policy, and then issue
the {command}`coordinators auto` command
For example, assuming we have 3 nodes available, we can enable double
redundancy mode, then auto-select coordinators. For double redundancy, 3
coordinators is ideal: therefore FoundationDB will make
*every* node a coordinator automatically:
```ShellSession
fdbcli> configure double ssd
fdbcli> coordinators auto
```
This will transparently update all the servers within seconds, and
appropriately rewrite the {command}`fdb.cluster` file, as well as
informing all client processes to do the same.
## Client connectivity {#module-services-foundationdb-connectivity}
By default, all clients must use the current {command}`fdb.cluster`
file to access a given FoundationDB cluster. This file is located by default
in {command}`/etc/foundationdb/fdb.cluster` on all machines with the
FoundationDB service enabled, so you may copy the active one from your
cluster to a new node in order to connect, if it is not part of the cluster.
## Client authorization and TLS {#module-services-foundationdb-authorization}
By default, any user who can connect to a FoundationDB process with the
correct cluster configuration can access anything. FoundationDB uses a
pluggable design to transport security, and out of the box it supports a
LibreSSL-based plugin for TLS support. This plugin not only does in-flight
encryption, but also performs client authorization based on the given
endpoint's certificate chain. For example, a FoundationDB server may be
configured to only accept client connections over TLS, where the client TLS
certificate is from organization *Acme Co* in the
*Research and Development* unit.
Configuring TLS with FoundationDB is done using the
{option}`services.foundationdb.tls` options in order to control the
peer verification string, as well as the certificate and its private key.
Note that the certificate and its private key must be accessible to the
FoundationDB user account that the server runs under. These files are also
NOT managed by NixOS, as putting them into the store may reveal private
information.
After you have a key and certificate file in place, it is not enough to
simply set the NixOS module options -- you must also configure the
{command}`fdb.cluster` file to specify that a given set of
coordinators use TLS. This is as simple as adding the suffix
{command}`:tls` to your cluster coordinator configuration, after the
port number. For example, assuming you have a coordinator on localhost with
the default configuration, simply specifying:
```
XXXXXX:XXXXXX@127.0.0.1:4500:tls
```
will configure all clients and server processes to use TLS from now on.
## Backups and Disaster Recovery {#module-services-foundationdb-disaster-recovery}
The usual rules for doing FoundationDB backups apply on NixOS as written in
the FoundationDB manual. However, one important difference is the security
profile for NixOS: by default, the {command}`foundationdb` systemd
unit uses *Linux namespaces* to restrict write access to
the system, except for the log directory, data directory, and the
{command}`/etc/foundationdb/` directory. This is enforced by default
and cannot be disabled.
However, a side effect of this is that the {command}`fdbbackup`
command doesn't work properly for local filesystem backups: FoundationDB
uses a server process alongside the database processes to perform backups
and copy the backups to the filesystem. As a result, this process is put
under the restricted namespaces above: the backup process can only write to
a limited number of paths.
In order to allow flexible backup locations on local disks, the FoundationDB
NixOS module supports a
{option}`services.foundationdb.extraReadWritePaths` option. This
option takes a list of paths, and adds them to the systemd unit, allowing
the processes inside the service to write (and read) the specified
directories.
For example, to create backups in {command}`/opt/fdb-backups`, first
set up the paths in the module options:
```
services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
```
Restart the FoundationDB service, and it will now be able to write to this
directory (even if it does not yet exist.) Note: this path
*must* exist before restarting the unit. Otherwise,
systemd will not include it in the private FoundationDB namespace (and it
will not add it dynamically at runtime).
You can now perform a backup:
```ShellSession
$ sudo -u foundationdb fdbbackup start -t default -d file:///opt/fdb-backups
$ sudo -u foundationdb fdbbackup status -t default
```
## Known limitations {#module-services-foundationdb-limitations}
The FoundationDB setup for NixOS should currently be considered beta.
FoundationDB is not new software, but the NixOS compilation and integration
has only undergone fairly basic testing of all the available functionality.
- There is no way to specify individual parameters for individual
{command}`fdbserver` processes. Currently, all server processes
inherit all the global {command}`fdbmonitor` settings.
- Ruby bindings are not currently installed.
- Go bindings are not currently installed.
## Options {#module-services-foundationdb-options}
NixOS's FoundationDB module allows you to configure all of the most relevant
configuration options for {command}`fdbmonitor`, matching it quite
closely. A complete list of options for the FoundationDB module may be found
[here](#opt-services.foundationdb.enable). You should
also read the FoundationDB documentation as well.
## Full documentation {#module-services-foundationdb-full-docs}
FoundationDB is a complex piece of software, and requires careful
administration to properly use. Full documentation for administration can be
found here: <https://apple.github.io/foundationdb/>.

View file

@ -1,60 +1,58 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-foundationdb">
version="5.0" <title>FoundationDB</title>
xml:id="module-services-foundationdb">
<title>FoundationDB</title>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/databases/foundationdb.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://apple.github.io/foundationdb/"/>
</para>
<para>
<emphasis>Maintainer:</emphasis> Austin Seipp
</para>
<para>
<emphasis>Available version(s):</emphasis> 5.1.x, 5.2.x, 6.0.x
</para>
<para>
FoundationDB (or "FDB") is an open source, distributed, transactional
key-value store.
</para>
<section xml:id="module-services-foundationdb-configuring">
<title>Configuring and basic setup</title>
<para> <para>
To enable FoundationDB, add the following to your <emphasis>Source:</emphasis>
<filename>configuration.nix</filename>: <filename>modules/services/databases/foundationdb.nix</filename>
<programlisting> </para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://apple.github.io/foundationdb/">https://apple.github.io/foundationdb/</link>
</para>
<para>
<emphasis>Maintainer:</emphasis> Austin Seipp
</para>
<para>
<emphasis>Available version(s):</emphasis> 5.1.x, 5.2.x, 6.0.x
</para>
<para>
FoundationDB (or <quote>FDB</quote>) is an open source, distributed,
transactional key-value store.
</para>
<section xml:id="module-services-foundationdb-configuring">
<title>Configuring and basic setup</title>
<para>
To enable FoundationDB, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.foundationdb.enable = true; services.foundationdb.enable = true;
services.foundationdb.package = pkgs.foundationdb52; # FoundationDB 5.2.x services.foundationdb.package = pkgs.foundationdb52; # FoundationDB 5.2.x
</programlisting> </programlisting>
</para> <para>
The <option>services.foundationdb.package</option> option is
<para> required, and must always be specified. Due to the fact
The <option>services.foundationdb.package</option> option is required, and FoundationDB network protocols and on-disk storage formats may
must always be specified. Due to the fact FoundationDB network protocols and change between (major) versions, and upgrades must be explicitly
on-disk storage formats may change between (major) versions, and upgrades handled by the user, you must always manually specify this
must be explicitly handled by the user, you must always manually specify yourself so that the NixOS module will use the proper version.
this yourself so that the NixOS module will use the proper version. Note Note that minor, bugfix releases are always compatible.
that minor, bugfix releases are always compatible. </para>
</para> <para>
After running <command>nixos-rebuild</command>, you can verify
<para> whether FoundationDB is running by executing
After running <command>nixos-rebuild</command>, you can verify whether <command>fdbcli</command> (which is added to
FoundationDB is running by executing <command>fdbcli</command> (which is <option>environment.systemPackages</option>):
added to <option>environment.systemPackages</option>): </para>
<screen> <programlisting>
<prompt>$ </prompt>sudo -u foundationdb fdbcli $ sudo -u foundationdb fdbcli
Using cluster file `/etc/foundationdb/fdb.cluster'. Using cluster file `/etc/foundationdb/fdb.cluster'.
The database is available. The database is available.
Welcome to the fdbcli. For help, type `help'. Welcome to the fdbcli. For help, type `help'.
<prompt>fdb> </prompt>status fdb&gt; status
Using cluster file `/etc/foundationdb/fdb.cluster'. Using cluster file `/etc/foundationdb/fdb.cluster'.
@ -72,18 +70,17 @@ Cluster:
... ...
<prompt>fdb></prompt> fdb&gt;
</screen> </programlisting>
</para> <para>
You can also write programs using the available client libraries.
<para> For example, the following Python program can be run in order to
You can also write programs using the available client libraries. For grab the cluster status, as a quick example. (This example uses
example, the following Python program can be run in order to grab the <command>nix-shell</command> shebang support to automatically
cluster status, as a quick example. (This example uses supply the necessary Python modules).
<command>nix-shell</command> shebang support to automatically supply the </para>
necessary Python modules). <programlisting>
<screen> a@link&gt; cat fdb-status.py
<prompt>a@link> </prompt>cat fdb-status.py
#! /usr/bin/env nix-shell #! /usr/bin/env nix-shell
#! nix-shell -i python -p python pythonPackages.foundationdb52 #! nix-shell -i python -p python pythonPackages.foundationdb52
@ -101,343 +98,328 @@ def main():
obj = json.loads(get_status(db)) obj = json.loads(get_status(db))
print('FoundationDB available: %s' % obj['client']['database_status']['available']) print('FoundationDB available: %s' % obj['client']['database_status']['available'])
if __name__ == "__main__": if __name__ == &quot;__main__&quot;:
main() main()
<prompt>a@link> </prompt>chmod +x fdb-status.py a@link&gt; chmod +x fdb-status.py
<prompt>a@link> </prompt>./fdb-status.py a@link&gt; ./fdb-status.py
FoundationDB available: True FoundationDB available: True
<prompt>a@link></prompt> a@link&gt;
</screen>
</para>
<para>
FoundationDB is run under the <command>foundationdb</command> user and group
by default, but this may be changed in the NixOS configuration. The systemd
unit <command>foundationdb.service</command> controls the
<command>fdbmonitor</command> process.
</para>
<para>
By default, the NixOS module for FoundationDB creates a single SSD-storage
based database for development and basic usage. This storage engine is
designed for SSDs and will perform poorly on HDDs; however it can handle far
more data than the alternative "memory" engine and is a better default
choice for most deployments. (Note that you can change the storage backend
on-the-fly for a given FoundationDB cluster using
<command>fdbcli</command>.)
</para>
<para>
Furthermore, only 1 server process and 1 backup agent are started in the
default configuration. See below for more on scaling to increase this.
</para>
<para>
FoundationDB stores all data for all server processes under
<filename>/var/lib/foundationdb</filename>. You can override this using
<option>services.foundationdb.dataDir</option>, e.g.
<programlisting>
services.foundationdb.dataDir = "/data/fdb";
</programlisting> </programlisting>
</para>
<para>
Similarly, logs are stored under <filename>/var/log/foundationdb</filename>
by default, and there is a corresponding
<option>services.foundationdb.logDir</option> as well.
</para>
</section>
<section xml:id="module-services-foundationdb-scaling">
<title>Scaling processes and backup agents</title>
<para>
Scaling the number of server processes is quite easy; simply specify
<option>services.foundationdb.serverProcesses</option> to be the number of
FoundationDB worker processes that should be started on the machine.
</para>
<para>
FoundationDB worker processes typically require 4GB of RAM per-process at
minimum for good performance, so this option is set to 1 by default since
the maximum amount of RAM is unknown. You're advised to abide by this
restriction, so pick a number of processes so that each has 4GB or more.
</para>
<para>
A similar option exists in order to scale backup agent processes,
<option>services.foundationdb.backupProcesses</option>. Backup agents are
not as performance/RAM sensitive, so feel free to experiment with the number
of available backup processes.
</para>
</section>
<section xml:id="module-services-foundationdb-clustering">
<title>Clustering</title>
<para>
FoundationDB on NixOS works similarly to other Linux systems, so this
section will be brief. Please refer to the full FoundationDB documentation
for more on clustering.
</para>
<para>
FoundationDB organizes clusters using a set of
<emphasis>coordinators</emphasis>, which are just specially-designated
worker processes. By default, every installation of FoundationDB on NixOS
will start as its own individual cluster, with a single coordinator: the
first worker process on <command>localhost</command>.
</para>
<para>
Coordinators are specified globally using the
<command>/etc/foundationdb/fdb.cluster</command> file, which all servers and
client applications will use to find and join coordinators. Note that this
file <emphasis>can not</emphasis> be managed by NixOS so easily:
FoundationDB is designed so that it will rewrite the file at runtime for all
clients and nodes when cluster coordinators change, with clients
transparently handling this without intervention. It is fundamentally a
mutable file, and you should not try to manage it in any way in NixOS.
</para>
<para>
When dealing with a cluster, there are two main things you want to do:
</para>
<itemizedlist>
<listitem>
<para> <para>
Add a node to the cluster for storage/compute. FoundationDB is run under the <command>foundationdb</command> user
and group by default, but this may be changed in the NixOS
configuration. The systemd unit
<command>foundationdb.service</command> controls the
<command>fdbmonitor</command> process.
</para> </para>
</listitem>
<listitem>
<para> <para>
Promote an ordinary worker to a coordinator. By default, the NixOS module for FoundationDB creates a single
SSD-storage based database for development and basic usage. This
storage engine is designed for SSDs and will perform poorly on
HDDs; however it can handle far more data than the alternative
<quote>memory</quote> engine and is a better default choice for
most deployments. (Note that you can change the storage backend
on-the-fly for a given FoundationDB cluster using
<command>fdbcli</command>.)
</para> </para>
</listitem>
</itemizedlist>
<para>
A node must already be a member of the cluster in order to properly be
promoted to a coordinator, so you must always add it first if you wish to
promote it.
</para>
<para>
To add a machine to a FoundationDB cluster:
</para>
<itemizedlist>
<listitem>
<para> <para>
Choose one of the servers to start as the initial coordinator. Furthermore, only 1 server process and 1 backup agent are started
in the default configuration. See below for more on scaling to
increase this.
</para> </para>
</listitem>
<listitem>
<para> <para>
Copy the <command>/etc/foundationdb/fdb.cluster</command> file from this FoundationDB stores all data for all server processes under
server to all the other servers. Restart FoundationDB on all of these <filename>/var/lib/foundationdb</filename>. You can override this
other servers, so they join the cluster. using <option>services.foundationdb.dataDir</option>, e.g.
</para> </para>
</listitem> <programlisting>
<listitem> services.foundationdb.dataDir = &quot;/data/fdb&quot;;
</programlisting>
<para> <para>
All of these servers are now connected and working together in the Similarly, logs are stored under
cluster, under the chosen coordinator. <filename>/var/log/foundationdb</filename> by default, and there
is a corresponding <option>services.foundationdb.logDir</option>
as well.
</para> </para>
</listitem> </section>
</itemizedlist> <section xml:id="module-services-foundationdb-scaling">
<title>Scaling processes and backup agents</title>
<para> <para>
At this point, you can add as many nodes as you want by just repeating the Scaling the number of server processes is quite easy; simply
above steps. By default there will still be a single coordinator: you can specify <option>services.foundationdb.serverProcesses</option> to
use <command>fdbcli</command> to change this and add new coordinators. be the number of FoundationDB worker processes that should be
</para> started on the machine.
</para>
<para> <para>
As a convenience, FoundationDB can automatically assign coordinators based FoundationDB worker processes typically require 4GB of RAM
on the redundancy mode you wish to achieve for the cluster. Once all the per-process at minimum for good performance, so this option is set
nodes have been joined, simply set the replication policy, and then issue to 1 by default since the maximum amount of RAM is unknown. Youre
the <command>coordinators auto</command> command advised to abide by this restriction, so pick a number of
</para> processes so that each has 4GB or more.
</para>
<para> <para>
For example, assuming we have 3 nodes available, we can enable double A similar option exists in order to scale backup agent processes,
redundancy mode, then auto-select coordinators. For double redundancy, 3 <option>services.foundationdb.backupProcesses</option>. Backup
coordinators is ideal: therefore FoundationDB will make agents are not as performance/RAM sensitive, so feel free to
<emphasis>every</emphasis> node a coordinator automatically: experiment with the number of available backup processes.
</para> </para>
</section>
<screen> <section xml:id="module-services-foundationdb-clustering">
<prompt>fdbcli> </prompt>configure double ssd <title>Clustering</title>
<prompt>fdbcli> </prompt>coordinators auto <para>
</screen> FoundationDB on NixOS works similarly to other Linux systems, so
this section will be brief. Please refer to the full FoundationDB
<para> documentation for more on clustering.
This will transparently update all the servers within seconds, and </para>
appropriately rewrite the <command>fdb.cluster</command> file, as well as <para>
informing all client processes to do the same. FoundationDB organizes clusters using a set of
</para> <emphasis>coordinators</emphasis>, which are just
</section> specially-designated worker processes. By default, every
<section xml:id="module-services-foundationdb-connectivity"> installation of FoundationDB on NixOS will start as its own
<title>Client connectivity</title> individual cluster, with a single coordinator: the first worker
process on <command>localhost</command>.
<para> </para>
By default, all clients must use the current <command>fdb.cluster</command> <para>
file to access a given FoundationDB cluster. This file is located by default Coordinators are specified globally using the
in <command>/etc/foundationdb/fdb.cluster</command> on all machines with the <command>/etc/foundationdb/fdb.cluster</command> file, which all
FoundationDB service enabled, so you may copy the active one from your servers and client applications will use to find and join
cluster to a new node in order to connect, if it is not part of the cluster. coordinators. Note that this file <emphasis>can not</emphasis> be
</para> managed by NixOS so easily: FoundationDB is designed so that it
</section> will rewrite the file at runtime for all clients and nodes when
<section xml:id="module-services-foundationdb-authorization"> cluster coordinators change, with clients transparently handling
<title>Client authorization and TLS</title> this without intervention. It is fundamentally a mutable file, and
you should not try to manage it in any way in NixOS.
<para> </para>
By default, any user who can connect to a FoundationDB process with the <para>
correct cluster configuration can access anything. FoundationDB uses a When dealing with a cluster, there are two main things you want to
pluggable design to transport security, and out of the box it supports a do:
LibreSSL-based plugin for TLS support. This plugin not only does in-flight </para>
encryption, but also performs client authorization based on the given <itemizedlist spacing="compact">
endpoint's certificate chain. For example, a FoundationDB server may be <listitem>
configured to only accept client connections over TLS, where the client TLS <para>
certificate is from organization <emphasis>Acme Co</emphasis> in the Add a node to the cluster for storage/compute.
<emphasis>Research and Development</emphasis> unit. </para>
</para> </listitem>
<listitem>
<para> <para>
Configuring TLS with FoundationDB is done using the Promote an ordinary worker to a coordinator.
<option>services.foundationdb.tls</option> options in order to control the </para>
peer verification string, as well as the certificate and its private key. </listitem>
</para> </itemizedlist>
<para>
<para> A node must already be a member of the cluster in order to
Note that the certificate and its private key must be accessible to the properly be promoted to a coordinator, so you must always add it
FoundationDB user account that the server runs under. These files are also first if you wish to promote it.
NOT managed by NixOS, as putting them into the store may reveal private </para>
information. <para>
</para> To add a machine to a FoundationDB cluster:
</para>
<para> <itemizedlist spacing="compact">
After you have a key and certificate file in place, it is not enough to <listitem>
simply set the NixOS module options -- you must also configure the <para>
<command>fdb.cluster</command> file to specify that a given set of Choose one of the servers to start as the initial coordinator.
coordinators use TLS. This is as simple as adding the suffix </para>
<command>:tls</command> to your cluster coordinator configuration, after the </listitem>
port number. For example, assuming you have a coordinator on localhost with <listitem>
the default configuration, simply specifying: <para>
</para> Copy the <command>/etc/foundationdb/fdb.cluster</command> file
from this server to all the other servers. Restart
<programlisting> FoundationDB on all of these other servers, so they join the
cluster.
</para>
</listitem>
<listitem>
<para>
All of these servers are now connected and working together in
the cluster, under the chosen coordinator.
</para>
</listitem>
</itemizedlist>
<para>
At this point, you can add as many nodes as you want by just
repeating the above steps. By default there will still be a single
coordinator: you can use <command>fdbcli</command> to change this
and add new coordinators.
</para>
<para>
As a convenience, FoundationDB can automatically assign
coordinators based on the redundancy mode you wish to achieve for
the cluster. Once all the nodes have been joined, simply set the
replication policy, and then issue the
<command>coordinators auto</command> command
</para>
<para>
For example, assuming we have 3 nodes available, we can enable
double redundancy mode, then auto-select coordinators. For double
redundancy, 3 coordinators is ideal: therefore FoundationDB will
make <emphasis>every</emphasis> node a coordinator automatically:
</para>
<programlisting>
fdbcli&gt; configure double ssd
fdbcli&gt; coordinators auto
</programlisting>
<para>
This will transparently update all the servers within seconds, and
appropriately rewrite the <command>fdb.cluster</command> file, as
well as informing all client processes to do the same.
</para>
</section>
<section xml:id="module-services-foundationdb-connectivity">
<title>Client connectivity</title>
<para>
By default, all clients must use the current
<command>fdb.cluster</command> file to access a given FoundationDB
cluster. This file is located by default in
<command>/etc/foundationdb/fdb.cluster</command> on all machines
with the FoundationDB service enabled, so you may copy the active
one from your cluster to a new node in order to connect, if it is
not part of the cluster.
</para>
</section>
<section xml:id="module-services-foundationdb-authorization">
<title>Client authorization and TLS</title>
<para>
By default, any user who can connect to a FoundationDB process
with the correct cluster configuration can access anything.
FoundationDB uses a pluggable design to transport security, and
out of the box it supports a LibreSSL-based plugin for TLS
support. This plugin not only does in-flight encryption, but also
performs client authorization based on the given endpoints
certificate chain. For example, a FoundationDB server may be
configured to only accept client connections over TLS, where the
client TLS certificate is from organization <emphasis>Acme
Co</emphasis> in the <emphasis>Research and Development</emphasis>
unit.
</para>
<para>
Configuring TLS with FoundationDB is done using the
<option>services.foundationdb.tls</option> options in order to
control the peer verification string, as well as the certificate
and its private key.
</para>
<para>
Note that the certificate and its private key must be accessible
to the FoundationDB user account that the server runs under. These
files are also NOT managed by NixOS, as putting them into the
store may reveal private information.
</para>
<para>
After you have a key and certificate file in place, it is not
enough to simply set the NixOS module options you must also
configure the <command>fdb.cluster</command> file to specify that
a given set of coordinators use TLS. This is as simple as adding
the suffix <command>:tls</command> to your cluster coordinator
configuration, after the port number. For example, assuming you
have a coordinator on localhost with the default configuration,
simply specifying:
</para>
<programlisting>
XXXXXX:XXXXXX@127.0.0.1:4500:tls XXXXXX:XXXXXX@127.0.0.1:4500:tls
</programlisting> </programlisting>
<para>
<para> will configure all clients and server processes to use TLS from
will configure all clients and server processes to use TLS from now on. now on.
</para> </para>
</section> </section>
<section xml:id="module-services-foundationdb-disaster-recovery"> <section xml:id="module-services-foundationdb-disaster-recovery">
<title>Backups and Disaster Recovery</title> <title>Backups and Disaster Recovery</title>
<para>
<para> The usual rules for doing FoundationDB backups apply on NixOS as
The usual rules for doing FoundationDB backups apply on NixOS as written in written in the FoundationDB manual. However, one important
the FoundationDB manual. However, one important difference is the security difference is the security profile for NixOS: by default, the
profile for NixOS: by default, the <command>foundationdb</command> systemd <command>foundationdb</command> systemd unit uses <emphasis>Linux
unit uses <emphasis>Linux namespaces</emphasis> to restrict write access to namespaces</emphasis> to restrict write access to the system,
the system, except for the log directory, data directory, and the except for the log directory, data directory, and the
<command>/etc/foundationdb/</command> directory. This is enforced by default <command>/etc/foundationdb/</command> directory. This is enforced
and cannot be disabled. by default and cannot be disabled.
</para> </para>
<para>
<para> However, a side effect of this is that the
However, a side effect of this is that the <command>fdbbackup</command> <command>fdbbackup</command> command doesnt work properly for
command doesn't work properly for local filesystem backups: FoundationDB local filesystem backups: FoundationDB uses a server process
uses a server process alongside the database processes to perform backups alongside the database processes to perform backups and copy the
and copy the backups to the filesystem. As a result, this process is put backups to the filesystem. As a result, this process is put under
under the restricted namespaces above: the backup process can only write to the restricted namespaces above: the backup process can only write
a limited number of paths. to a limited number of paths.
</para> </para>
<para>
<para> In order to allow flexible backup locations on local disks, the
In order to allow flexible backup locations on local disks, the FoundationDB FoundationDB NixOS module supports a
NixOS module supports a <option>services.foundationdb.extraReadWritePaths</option> option.
<option>services.foundationdb.extraReadWritePaths</option> option. This This option takes a list of paths, and adds them to the systemd
option takes a list of paths, and adds them to the systemd unit, allowing unit, allowing the processes inside the service to write (and
the processes inside the service to write (and read) the specified read) the specified directories.
directories. </para>
</para> <para>
For example, to create backups in
<para> <command>/opt/fdb-backups</command>, first set up the paths in the
For example, to create backups in <command>/opt/fdb-backups</command>, first module options:
set up the paths in the module options: </para>
</para> <programlisting>
services.foundationdb.extraReadWritePaths = [ &quot;/opt/fdb-backups&quot; ];
<programlisting>
services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
</programlisting> </programlisting>
<para>
Restart the FoundationDB service, and it will now be able to write to this
directory (even if it does not yet exist.) Note: this path
<emphasis>must</emphasis> exist before restarting the unit. Otherwise,
systemd will not include it in the private FoundationDB namespace (and it
will not add it dynamically at runtime).
</para>
<para>
You can now perform a backup:
</para>
<screen>
<prompt>$ </prompt>sudo -u foundationdb fdbbackup start -t default -d file:///opt/fdb-backups
<prompt>$ </prompt>sudo -u foundationdb fdbbackup status -t default
</screen>
</section>
<section xml:id="module-services-foundationdb-limitations">
<title>Known limitations</title>
<para>
The FoundationDB setup for NixOS should currently be considered beta.
FoundationDB is not new software, but the NixOS compilation and integration
has only undergone fairly basic testing of all the available functionality.
</para>
<itemizedlist>
<listitem>
<para> <para>
There is no way to specify individual parameters for individual Restart the FoundationDB service, and it will now be able to write
<command>fdbserver</command> processes. Currently, all server processes to this directory (even if it does not yet exist.) Note: this path
inherit all the global <command>fdbmonitor</command> settings. <emphasis>must</emphasis> exist before restarting the unit.
Otherwise, systemd will not include it in the private FoundationDB
namespace (and it will not add it dynamically at runtime).
</para> </para>
</listitem>
<listitem>
<para> <para>
Ruby bindings are not currently installed. You can now perform a backup:
</para> </para>
</listitem> <programlisting>
<listitem> $ sudo -u foundationdb fdbbackup start -t default -d file:///opt/fdb-backups
$ sudo -u foundationdb fdbbackup status -t default
</programlisting>
</section>
<section xml:id="module-services-foundationdb-limitations">
<title>Known limitations</title>
<para> <para>
Go bindings are not currently installed. The FoundationDB setup for NixOS should currently be considered
beta. FoundationDB is not new software, but the NixOS compilation
and integration has only undergone fairly basic testing of all the
available functionality.
</para> </para>
</listitem> <itemizedlist spacing="compact">
</itemizedlist> <listitem>
</section> <para>
<section xml:id="module-services-foundationdb-options"> There is no way to specify individual parameters for
<title>Options</title> individual <command>fdbserver</command> processes. Currently,
all server processes inherit all the global
<para> <command>fdbmonitor</command> settings.
NixOS's FoundationDB module allows you to configure all of the most relevant </para>
configuration options for <command>fdbmonitor</command>, matching it quite </listitem>
closely. A complete list of options for the FoundationDB module may be found <listitem>
<link linkend="opt-services.foundationdb.enable">here</link>. You should <para>
also read the FoundationDB documentation as well. Ruby bindings are not currently installed.
</para> </para>
</section> </listitem>
<section xml:id="module-services-foundationdb-full-docs"> <listitem>
<title>Full documentation</title> <para>
Go bindings are not currently installed.
<para> </para>
FoundationDB is a complex piece of software, and requires careful </listitem>
administration to properly use. Full documentation for administration can be </itemizedlist>
found here: <link xlink:href="https://apple.github.io/foundationdb/"/>. </section>
</para> <section xml:id="module-services-foundationdb-options">
</section> <title>Options</title>
<para>
NixOSs FoundationDB module allows you to configure all of the
most relevant configuration options for
<command>fdbmonitor</command>, matching it quite closely. A
complete list of options for the FoundationDB module may be found
<link linkend="opt-services.foundationdb.enable">here</link>. You
should also read the FoundationDB documentation as well.
</para>
</section>
<section xml:id="module-services-foundationdb-full-docs">
<title>Full documentation</title>
<para>
FoundationDB is a complex piece of software, and requires careful
administration to properly use. Full documentation for
administration can be found here:
<link xlink:href="https://apple.github.io/foundationdb/">https://apple.github.io/foundationdb/</link>.
</para>
</section>
</chapter> </chapter>

View file

@ -0,0 +1,173 @@
# PostgreSQL {#module-postgresql}
<!-- FIXME: render nicely -->
<!-- FIXME: source can be added automatically -->
*Source:* {file}`modules/services/databases/postgresql.nix`
*Upstream documentation:* <http://www.postgresql.org/docs/>
<!-- FIXME: more stuff, like maintainer? -->
PostgreSQL is an advanced, free relational database.
<!-- MORE -->
## Configuring {#module-services-postgres-configuring}
To enable PostgreSQL, add the following to your {file}`configuration.nix`:
```
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_11;
```
Note that you are required to specify the desired version of PostgreSQL (e.g. `pkgs.postgresql_11`). Since upgrading your PostgreSQL version requires a database dump and reload (see below), NixOS cannot provide a default value for [](#opt-services.postgresql.package) such as the most recent release of PostgreSQL.
<!--
After running {command}`nixos-rebuild`, you can verify
whether PostgreSQL works by running {command}`psql`:
```ShellSession
$ psql
psql (9.2.9)
Type "help" for help.
alice=>
```
-->
By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlSchema`. You can override this using [](#opt-services.postgresql.dataDir), e.g.
```
services.postgresql.dataDir = "/data/postgresql";
```
## Upgrading {#module-services-postgres-upgrading}
::: {.note}
The steps below demonstrate how to upgrade from an older version to `pkgs.postgresql_13`.
These instructions are also applicable to other versions.
:::
Major PostgreSQL upgrades require a downtime and a few imperative steps to be called. This is the case because
each major version has some internal changes in the databases' state during major releases. Because of that,
NixOS places the state into {file}`/var/lib/postgresql/&lt;version&gt;` where each `version`
can be obtained like this:
```
$ nix-instantiate --eval -A postgresql_13.psqlSchema
"13"
```
For an upgrade, a script like this can be used to simplify the process:
```
{ config, pkgs, ... }:
{
environment.systemPackages = [
(let
# XXX specify the postgresql package you'd like to upgrade to.
# Do not forget to list the extensions you need.
newPostgres = pkgs.postgresql_13.withPackages (pp: [
# pp.plv8
]);
in pkgs.writeScriptBin "upgrade-pg-cluster" ''
set -eux
# XXX it's perhaps advisable to stop all services that depend on postgresql
systemctl stop postgresql
export NEWDATA="/var/lib/postgresql/${newPostgres.psqlSchema}"
export NEWBIN="${newPostgres}/bin"
export OLDDATA="${config.services.postgresql.dataDir}"
export OLDBIN="${config.services.postgresql.package}/bin"
install -d -m 0700 -o postgres -g postgres "$NEWDATA"
cd "$NEWDATA"
sudo -u postgres $NEWBIN/initdb -D "$NEWDATA"
sudo -u postgres $NEWBIN/pg_upgrade \
--old-datadir "$OLDDATA" --new-datadir "$NEWDATA" \
--old-bindir $OLDBIN --new-bindir $NEWBIN \
"$@"
'')
];
}
```
The upgrade process is:
1. Rebuild nixos configuration with the configuration above added to your {file}`configuration.nix`. Alternatively, add that into separate file and reference it in `imports` list.
2. Login as root (`sudo su -`)
3. Run `upgrade-pg-cluster`. It will stop old postgresql, initialize a new one and migrate the old one to the new one. You may supply arguments like `--jobs 4` and `--link` to speedup migration process. See <https://www.postgresql.org/docs/current/pgupgrade.html> for details.
4. Change postgresql package in NixOS configuration to the one you were upgrading to via [](#opt-services.postgresql.package). Rebuild NixOS. This should start new postgres using upgraded data directory and all services you stopped during the upgrade.
5. After the upgrade it's advisable to analyze the new cluster.
- For PostgreSQL ≥ 14, use the `vacuumdb` command printed by the upgrades script.
- For PostgreSQL < 14, run (as `su -l postgres` in the [](#opt-services.postgresql.dataDir), in this example {file}`/var/lib/postgresql/13`):
```
$ ./analyze_new_cluster.sh
```
::: {.warning}
The next step removes the old state-directory!
:::
```
$ ./delete_old_cluster.sh
```
## Options {#module-services-postgres-options}
A complete list of options for the PostgreSQL module may be found [here](#opt-services.postgresql.enable).
## Plugins {#module-services-postgres-plugins}
Plugins collection for each PostgreSQL version can be accessed with `.pkgs`. For example, for `pkgs.postgresql_11` package, its plugin collection is accessed by `pkgs.postgresql_11.pkgs`:
```ShellSession
$ nix repl '<nixpkgs>'
Loading '<nixpkgs>'...
Added 10574 variables.
nix-repl> postgresql_11.pkgs.<TAB><TAB>
postgresql_11.pkgs.cstore_fdw postgresql_11.pkgs.pg_repack
postgresql_11.pkgs.pg_auto_failover postgresql_11.pkgs.pg_safeupdate
postgresql_11.pkgs.pg_bigm postgresql_11.pkgs.pg_similarity
postgresql_11.pkgs.pg_cron postgresql_11.pkgs.pg_topn
postgresql_11.pkgs.pg_hll postgresql_11.pkgs.pgjwt
postgresql_11.pkgs.pg_partman postgresql_11.pkgs.pgroonga
...
```
To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
```
services.postgresql.package = pkgs.postgresql_11;
services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [
pg_repack
postgis
];
```
You can build custom PostgreSQL-with-plugins (to be used outside of NixOS) using function `.withPackages`. For example, creating a custom PostgreSQL package in an overlay can look like:
```
self: super: {
postgresql_custom = self.postgresql_11.withPackages (ps: [
ps.pg_repack
ps.postgis
]);
}
```
Here's a recipe on how to override a particular plugin through an overlay:
```
self: super: {
postgresql_11 = super.postgresql_11.override { this = self.postgresql_11; } // {
pkgs = super.postgresql_11.pkgs // {
pg_repack = super.postgresql_11.pkgs.pg_repack.overrideAttrs (_: {
name = "pg_repack-v20181024";
src = self.fetchzip {
url = "https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz";
sha256 = "17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5";
};
});
};
};
}
```

View file

@ -1,181 +1,199 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-postgresql">
version="5.0" <title>PostgreSQL</title>
xml:id="module-postgresql">
<title>PostgreSQL</title>
<!-- FIXME: render nicely -->
<!-- FIXME: source can be added automatically -->
<para>
<emphasis>Source:</emphasis> <filename>modules/services/databases/postgresql.nix</filename>
</para>
<para>
<emphasis>Upstream documentation:</emphasis> <link xlink:href="http://www.postgresql.org/docs/"/>
</para>
<!-- FIXME: more stuff, like maintainer? -->
<para>
PostgreSQL is an advanced, free relational database.
<!-- MORE -->
</para>
<section xml:id="module-services-postgres-configuring">
<title>Configuring</title>
<para> <para>
To enable PostgreSQL, add the following to your <filename>configuration.nix</filename>: <emphasis>Source:</emphasis>
<programlisting> <filename>modules/services/databases/postgresql.nix</filename>
<xref linkend="opt-services.postgresql.enable"/> = true;
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_11;
</programlisting>
Note that you are required to specify the desired version of PostgreSQL (e.g. <literal>pkgs.postgresql_11</literal>). Since upgrading your PostgreSQL version requires a database dump and reload (see below), NixOS cannot provide a default value for <xref linkend="opt-services.postgresql.package"/> such as the most recent release of PostgreSQL.
</para> </para>
<!--
<para>After running <command>nixos-rebuild</command>, you can verify
whether PostgreSQL works by running <command>psql</command>:
<screen>
<prompt>$ </prompt>psql
psql (9.2.9)
Type "help" for help.
<prompt>alice=></prompt>
</screen>
-->
<para> <para>
By default, PostgreSQL stores its databases in <filename>/var/lib/postgresql/$psqlSchema</filename>. You can override this using <xref linkend="opt-services.postgresql.dataDir"/>, e.g. <emphasis>Upstream documentation:</emphasis>
<programlisting> <link xlink:href="http://www.postgresql.org/docs/">http://www.postgresql.org/docs/</link>
<xref linkend="opt-services.postgresql.dataDir"/> = "/data/postgresql";
</programlisting>
</para> </para>
</section>
<section xml:id="module-services-postgres-upgrading">
<title>Upgrading</title>
<note>
<para>
The steps below demonstrate how to upgrade from an older version to <package>pkgs.postgresql_13</package>.
These instructions are also applicable to other versions.
</para>
</note>
<para> <para>
Major PostgreSQL upgrades require a downtime and a few imperative steps to be called. This is the case because PostgreSQL is an advanced, free relational database.
each major version has some internal changes in the databases' state during major releases. Because of that, </para>
NixOS places the state into <filename>/var/lib/postgresql/&lt;version&gt;</filename> where each <literal>version</literal> <section xml:id="module-services-postgres-configuring">
can be obtained like this: <title>Configuring</title>
<programlisting> <para>
<prompt>$ </prompt>nix-instantiate --eval -A postgresql_13.psqlSchema To enable PostgreSQL, add the following to your
"13" <filename>configuration.nix</filename>:
</para>
<programlisting>
services.postgresql.enable = true;
services.postgresql.package = pkgs.postgresql_11;
</programlisting> </programlisting>
For an upgrade, a script like this can be used to simplify the process: <para>
<programlisting> Note that you are required to specify the desired version of
PostgreSQL (e.g. <literal>pkgs.postgresql_11</literal>). Since
upgrading your PostgreSQL version requires a database dump and
reload (see below), NixOS cannot provide a default value for
<xref linkend="opt-services.postgresql.package" /> such as the
most recent release of PostgreSQL.
</para>
<para>
By default, PostgreSQL stores its databases in
<filename>/var/lib/postgresql/$psqlSchema</filename>. You can
override this using
<xref linkend="opt-services.postgresql.dataDir" />, e.g.
</para>
<programlisting>
services.postgresql.dataDir = &quot;/data/postgresql&quot;;
</programlisting>
</section>
<section xml:id="module-services-postgres-upgrading">
<title>Upgrading</title>
<note>
<para>
The steps below demonstrate how to upgrade from an older version
to <literal>pkgs.postgresql_13</literal>. These instructions are
also applicable to other versions.
</para>
</note>
<para>
Major PostgreSQL upgrades require a downtime and a few imperative
steps to be called. This is the case because each major version
has some internal changes in the databases state during major
releases. Because of that, NixOS places the state into
<filename>/var/lib/postgresql/&lt;version&gt;</filename> where
each <literal>version</literal> can be obtained like this:
</para>
<programlisting>
$ nix-instantiate --eval -A postgresql_13.psqlSchema
&quot;13&quot;
</programlisting>
<para>
For an upgrade, a script like this can be used to simplify the
process:
</para>
<programlisting>
{ config, pkgs, ... }: { config, pkgs, ... }:
{ {
<xref linkend="opt-environment.systemPackages" /> = [ environment.systemPackages = [
(let (let
# XXX specify the postgresql package you'd like to upgrade to. # XXX specify the postgresql package you'd like to upgrade to.
# Do not forget to list the extensions you need. # Do not forget to list the extensions you need.
newPostgres = pkgs.postgresql_13.withPackages (pp: [ newPostgres = pkgs.postgresql_13.withPackages (pp: [
# pp.plv8 # pp.plv8
]); ]);
in pkgs.writeScriptBin "upgrade-pg-cluster" '' in pkgs.writeScriptBin &quot;upgrade-pg-cluster&quot; ''
set -eux set -eux
# XXX it's perhaps advisable to stop all services that depend on postgresql # XXX it's perhaps advisable to stop all services that depend on postgresql
systemctl stop postgresql systemctl stop postgresql
export NEWDATA="/var/lib/postgresql/${newPostgres.psqlSchema}" export NEWDATA=&quot;/var/lib/postgresql/${newPostgres.psqlSchema}&quot;
export NEWBIN="${newPostgres}/bin" export NEWBIN=&quot;${newPostgres}/bin&quot;
export OLDDATA="${config.<xref linkend="opt-services.postgresql.dataDir"/>}" export OLDDATA=&quot;${config.services.postgresql.dataDir}&quot;
export OLDBIN="${config.<xref linkend="opt-services.postgresql.package"/>}/bin" export OLDBIN=&quot;${config.services.postgresql.package}/bin&quot;
install -d -m 0700 -o postgres -g postgres "$NEWDATA" install -d -m 0700 -o postgres -g postgres &quot;$NEWDATA&quot;
cd "$NEWDATA" cd &quot;$NEWDATA&quot;
sudo -u postgres $NEWBIN/initdb -D "$NEWDATA" sudo -u postgres $NEWBIN/initdb -D &quot;$NEWDATA&quot;
sudo -u postgres $NEWBIN/pg_upgrade \ sudo -u postgres $NEWBIN/pg_upgrade \
--old-datadir "$OLDDATA" --new-datadir "$NEWDATA" \ --old-datadir &quot;$OLDDATA&quot; --new-datadir &quot;$NEWDATA&quot; \
--old-bindir $OLDBIN --new-bindir $NEWBIN \ --old-bindir $OLDBIN --new-bindir $NEWBIN \
"$@" &quot;$@&quot;
'') '')
]; ];
} }
</programlisting> </programlisting>
</para>
<para>
The upgrade process is:
</para>
<orderedlist>
<listitem>
<para> <para>
Rebuild nixos configuration with the configuration above added to your <filename>configuration.nix</filename>. Alternatively, add that into separate file and reference it in <literal>imports</literal> list. The upgrade process is:
</para> </para>
</listitem> <orderedlist numeration="arabic">
<listitem> <listitem>
<para> <para>
Login as root (<literal>sudo su -</literal>) Rebuild nixos configuration with the configuration above added
</para> to your <filename>configuration.nix</filename>. Alternatively,
</listitem> add that into separate file and reference it in
<listitem> <literal>imports</literal> list.
<para> </para>
Run <literal>upgrade-pg-cluster</literal>. It will stop old postgresql, initialize a new one and migrate the old one to the new one. You may supply arguments like <literal>--jobs 4</literal> and <literal>--link</literal> to speedup migration process. See <link xlink:href="https://www.postgresql.org/docs/current/pgupgrade.html" /> for details. </listitem>
</para> <listitem>
</listitem> <para>
<listitem> Login as root (<literal>sudo su -</literal>)
<para> </para>
Change postgresql package in NixOS configuration to the one you were upgrading to via <xref linkend="opt-services.postgresql.package" />. Rebuild NixOS. This should start new postgres using upgraded data directory and all services you stopped during the upgrade. </listitem>
</para> <listitem>
</listitem> <para>
<listitem> Run <literal>upgrade-pg-cluster</literal>. It will stop old
<para> postgresql, initialize a new one and migrate the old one to
After the upgrade it's advisable to analyze the new cluster. the new one. You may supply arguments like
</para> <literal>--jobs 4</literal> and <literal>--link</literal> to
<itemizedlist> speedup migration process. See
<listitem> <link xlink:href="https://www.postgresql.org/docs/current/pgupgrade.html">https://www.postgresql.org/docs/current/pgupgrade.html</link>
<para> for details.
For PostgreSQL ≥ 14, use the <literal>vacuumdb</literal> command printed by the upgrades script. </para>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> Change postgresql package in NixOS configuration to the one
For PostgreSQL &lt; 14, run (as <literal>su -l postgres</literal> in the <xref linkend="opt-services.postgresql.dataDir" />, in this example <filename>/var/lib/postgresql/13</filename>): you were upgrading to via
<programlisting> <xref linkend="opt-services.postgresql.package" />. Rebuild
<prompt>$ </prompt>./analyze_new_cluster.sh NixOS. This should start new postgres using upgraded data
directory and all services you stopped during the upgrade.
</para>
</listitem>
<listitem>
<para>
After the upgrade its advisable to analyze the new cluster.
</para>
<itemizedlist>
<listitem>
<para>
For PostgreSQL ≥ 14, use the <literal>vacuumdb</literal>
command printed by the upgrades script.
</para>
</listitem>
<listitem>
<para>
For PostgreSQL &lt; 14, run (as
<literal>su -l postgres</literal> in the
<xref linkend="opt-services.postgresql.dataDir" />, in
this example <filename>/var/lib/postgresql/13</filename>):
</para>
<programlisting>
$ ./analyze_new_cluster.sh
</programlisting> </programlisting>
</para> </listitem>
</listitem> </itemizedlist>
</itemizedlist> <warning>
<para> <para>
<warning><para>The next step removes the old state-directory!</para></warning> The next step removes the old state-directory!
<programlisting> </para>
<prompt>$ </prompt>./delete_old_cluster.sh </warning>
<programlisting>
$ ./delete_old_cluster.sh
</programlisting> </programlisting>
</listitem>
</orderedlist>
</section>
<section xml:id="module-services-postgres-options">
<title>Options</title>
<para>
A complete list of options for the PostgreSQL module may be found
<link linkend="opt-services.postgresql.enable">here</link>.
</para> </para>
</listitem> </section>
</orderedlist> <section xml:id="module-services-postgres-plugins">
</section> <title>Plugins</title>
<section xml:id="module-services-postgres-options"> <para>
<title>Options</title> Plugins collection for each PostgreSQL version can be accessed
with <literal>.pkgs</literal>. For example, for
<para> <literal>pkgs.postgresql_11</literal> package, its plugin
A complete list of options for the PostgreSQL module may be found <link linkend="opt-services.postgresql.enable">here</link>. collection is accessed by
</para> <literal>pkgs.postgresql_11.pkgs</literal>:
</section> </para>
<section xml:id="module-services-postgres-plugins"> <programlisting>
<title>Plugins</title> $ nix repl '&lt;nixpkgs&gt;'
<para>
Plugins collection for each PostgreSQL version can be accessed with <literal>.pkgs</literal>. For example, for <literal>pkgs.postgresql_11</literal> package, its plugin collection is accessed by <literal>pkgs.postgresql_11.pkgs</literal>:
<screen>
<prompt>$ </prompt>nix repl '&lt;nixpkgs&gt;'
Loading '&lt;nixpkgs&gt;'... Loading '&lt;nixpkgs&gt;'...
Added 10574 variables. Added 10574 variables.
<prompt>nix-repl&gt; </prompt>postgresql_11.pkgs.&lt;TAB&gt;&lt;TAB&gt; nix-repl&gt; postgresql_11.pkgs.&lt;TAB&gt;&lt;TAB&gt;
postgresql_11.pkgs.cstore_fdw postgresql_11.pkgs.pg_repack postgresql_11.pkgs.cstore_fdw postgresql_11.pkgs.pg_repack
postgresql_11.pkgs.pg_auto_failover postgresql_11.pkgs.pg_safeupdate postgresql_11.pkgs.pg_auto_failover postgresql_11.pkgs.pg_safeupdate
postgresql_11.pkgs.pg_bigm postgresql_11.pkgs.pg_similarity postgresql_11.pkgs.pg_bigm postgresql_11.pkgs.pg_similarity
@ -183,23 +201,25 @@ postgresql_11.pkgs.pg_cron postgresql_11.pkgs.pg_topn
postgresql_11.pkgs.pg_hll postgresql_11.pkgs.pgjwt postgresql_11.pkgs.pg_hll postgresql_11.pkgs.pgjwt
postgresql_11.pkgs.pg_partman postgresql_11.pkgs.pgroonga postgresql_11.pkgs.pg_partman postgresql_11.pkgs.pgroonga
... ...
</screen> </programlisting>
</para> <para>
To add plugins via NixOS configuration, set
<para> <literal>services.postgresql.extraPlugins</literal>:
To add plugins via NixOS configuration, set <literal>services.postgresql.extraPlugins</literal>: </para>
<programlisting> <programlisting>
<xref linkend="opt-services.postgresql.package"/> = pkgs.postgresql_11; services.postgresql.package = pkgs.postgresql_11;
<xref linkend="opt-services.postgresql.extraPlugins"/> = with pkgs.postgresql_11.pkgs; [ services.postgresql.extraPlugins = with pkgs.postgresql_11.pkgs; [
pg_repack pg_repack
postgis postgis
]; ];
</programlisting> </programlisting>
</para> <para>
You can build custom PostgreSQL-with-plugins (to be used outside
<para> of NixOS) using function <literal>.withPackages</literal>. For
You can build custom PostgreSQL-with-plugins (to be used outside of NixOS) using function <literal>.withPackages</literal>. For example, creating a custom PostgreSQL package in an overlay can look like: example, creating a custom PostgreSQL package in an overlay can
<programlisting> look like:
</para>
<programlisting>
self: super: { self: super: {
postgresql_custom = self.postgresql_11.withPackages (ps: [ postgresql_custom = self.postgresql_11.withPackages (ps: [
ps.pg_repack ps.pg_repack
@ -207,25 +227,24 @@ self: super: {
]); ]);
} }
</programlisting> </programlisting>
</para> <para>
Heres a recipe on how to override a particular plugin through an
<para> overlay:
Here's a recipe on how to override a particular plugin through an overlay: </para>
<programlisting> <programlisting>
self: super: { self: super: {
postgresql_11 = super.postgresql_11.override { this = self.postgresql_11; } // { postgresql_11 = super.postgresql_11.override { this = self.postgresql_11; } // {
pkgs = super.postgresql_11.pkgs // { pkgs = super.postgresql_11.pkgs // {
pg_repack = super.postgresql_11.pkgs.pg_repack.overrideAttrs (_: { pg_repack = super.postgresql_11.pkgs.pg_repack.overrideAttrs (_: {
name = "pg_repack-v20181024"; name = &quot;pg_repack-v20181024&quot;;
src = self.fetchzip { src = self.fetchzip {
url = "https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz"; url = &quot;https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz&quot;;
sha256 = "17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5"; sha256 = &quot;17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5&quot;;
}; };
}); });
}; };
}; };
} }
</programlisting> </programlisting>
</para> </section>
</section>
</chapter> </chapter>

View file

@ -0,0 +1,39 @@
# Flatpak {#module-services-flatpak}
*Source:* {file}`modules/services/desktop/flatpak.nix`
*Upstream documentation:* <https://github.com/flatpak/flatpak/wiki>
Flatpak is a system for building, distributing, and running sandboxed desktop
applications on Linux.
To enable Flatpak, add the following to your {file}`configuration.nix`:
```
services.flatpak.enable = true;
```
For the sandboxed apps to work correctly, desktop integration portals need to
be installed. If you run GNOME, this will be handled automatically for you;
in other cases, you will need to add something like the following to your
{file}`configuration.nix`:
```
xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
```
Then, you will need to add a repository, for example,
[Flathub](https://github.com/flatpak/flatpak/wiki),
either using the following commands:
```ShellSession
$ flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
$ flatpak update
```
or by opening the
[repository file](https://flathub.org/repo/flathub.flatpakrepo) in GNOME Software.
Finally, you can search and install programs:
```ShellSession
$ flatpak search bustle
$ flatpak install flathub org.freedesktop.Bustle
$ flatpak run org.freedesktop.Bustle
```
Again, GNOME Software offers graphical interface for these tasks.

View file

@ -1,56 +1,59 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-flatpak">
version="5.0" <title>Flatpak</title>
xml:id="module-services-flatpak"> <para>
<title>Flatpak</title> <emphasis>Source:</emphasis>
<para> <filename>modules/services/desktop/flatpak.nix</filename>
<emphasis>Source:</emphasis> </para>
<filename>modules/services/desktop/flatpak.nix</filename> <para>
</para> <emphasis>Upstream documentation:</emphasis>
<para> <link xlink:href="https://github.com/flatpak/flatpak/wiki">https://github.com/flatpak/flatpak/wiki</link>
<emphasis>Upstream documentation:</emphasis> </para>
<link xlink:href="https://github.com/flatpak/flatpak/wiki"/> <para>
</para> Flatpak is a system for building, distributing, and running
<para> sandboxed desktop applications on Linux.
Flatpak is a system for building, distributing, and running sandboxed desktop </para>
applications on Linux. <para>
</para> To enable Flatpak, add the following to your
<para> <filename>configuration.nix</filename>:
To enable Flatpak, add the following to your </para>
<filename>configuration.nix</filename>: <programlisting>
<programlisting> services.flatpak.enable = true;
<xref linkend="opt-services.flatpak.enable"/> = true;
</programlisting> </programlisting>
</para> <para>
<para> For the sandboxed apps to work correctly, desktop integration
For the sandboxed apps to work correctly, desktop integration portals need to portals need to be installed. If you run GNOME, this will be handled
be installed. If you run GNOME, this will be handled automatically for you; automatically for you; in other cases, you will need to add
in other cases, you will need to add something like the following to your something like the following to your
<filename>configuration.nix</filename>: <filename>configuration.nix</filename>:
<programlisting> </para>
<xref linkend="opt-xdg.portal.extraPortals"/> = [ pkgs.xdg-desktop-portal-gtk ]; <programlisting>
xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
</programlisting> </programlisting>
</para> <para>
<para> Then, you will need to add a repository, for example,
Then, you will need to add a repository, for example, <link xlink:href="https://github.com/flatpak/flatpak/wiki">Flathub</link>,
<link xlink:href="https://github.com/flatpak/flatpak/wiki">Flathub</link>, either using the following commands:
either using the following commands: </para>
<screen> <programlisting>
<prompt>$ </prompt>flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo $ flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
<prompt>$ </prompt>flatpak update $ flatpak update
</screen> </programlisting>
or by opening the <para>
<link xlink:href="https://flathub.org/repo/flathub.flatpakrepo">repository or by opening the
file</link> in GNOME Software. <link xlink:href="https://flathub.org/repo/flathub.flatpakrepo">repository
</para> file</link> in GNOME Software.
<para> </para>
Finally, you can search and install programs: <para>
<screen> Finally, you can search and install programs:
<prompt>$ </prompt>flatpak search bustle </para>
<prompt>$ </prompt>flatpak install flathub org.freedesktop.Bustle <programlisting>
<prompt>$ </prompt>flatpak run org.freedesktop.Bustle $ flatpak search bustle
</screen> $ flatpak install flathub org.freedesktop.Bustle
Again, GNOME Software offers graphical interface for these tasks. $ flatpak run org.freedesktop.Bustle
</para> </programlisting>
<para>
Again, GNOME Software offers graphical interface for these tasks.
</para>
</chapter> </chapter>

View file

@ -0,0 +1,39 @@
# Blackfire profiler {#module-services-blackfire}
*Source:* {file}`modules/services/development/blackfire.nix`
*Upstream documentation:* <https://blackfire.io/docs/introduction>
[Blackfire](https://blackfire.io) is a proprietary tool for profiling applications. There are several languages supported by the product but currently only PHP support is packaged in Nixpkgs. The back-end consists of a module that is loaded into the language runtime (called *probe*) and a service (*agent*) that the probe connects to and that sends the profiles to the server.
To use it, you will need to enable the agent and the probe on your server. The exact method will depend on the way you use PHP but here is an example of NixOS configuration for PHP-FPM:
```
let
php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
blackfire
]));
in {
# Enable the probe extension for PHP-FPM.
services.phpfpm = {
phpPackage = php;
};
# Enable and configure the agent.
services.blackfire-agent = {
enable = true;
settings = {
# You will need to get credentials at https://blackfire.io/my/settings/credentials
# You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent
server-id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX";
server-token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
};
};
# Make the agent run on start-up.
# (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138)
# Alternately, you can start it manually with `systemctl start blackfire-agent`.
systemd.services.blackfire-agent.wantedBy = [ "phpfpm-foo.service" ];
}
```
On your developer machine, you will also want to install [the client](https://blackfire.io/docs/up-and-running/installation#install-a-profiling-client) (see `blackfire` package) or the browser extension to actually trigger the profiling.

View file

@ -1,19 +1,31 @@
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" version="5.0" xml:id="module-services-blackfire"> <!-- Do not edit this file directly, edit its companion .md instead
<title>Blackfire profiler</title> and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<para> <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-blackfire">
<emphasis>Source:</emphasis> <title>Blackfire profiler</title>
<filename>modules/services/development/blackfire.nix</filename> <para>
</para> <emphasis>Source:</emphasis>
<para> <filename>modules/services/development/blackfire.nix</filename>
<emphasis>Upstream documentation:</emphasis> </para>
<link xlink:href="https://blackfire.io/docs/introduction"/> <para>
</para> <emphasis>Upstream documentation:</emphasis>
<para> <link xlink:href="https://blackfire.io/docs/introduction">https://blackfire.io/docs/introduction</link>
<link xlink:href="https://blackfire.io">Blackfire</link> is a proprietary tool for profiling applications. There are several languages supported by the product but currently only PHP support is packaged in Nixpkgs. The back-end consists of a module that is loaded into the language runtime (called <firstterm>probe</firstterm>) and a service (<firstterm>agent</firstterm>) that the probe connects to and that sends the profiles to the server. </para>
</para> <para>
<para> <link xlink:href="https://blackfire.io">Blackfire</link> is a
To use it, you will need to enable the agent and the probe on your server. The exact method will depend on the way you use PHP but here is an example of NixOS configuration for PHP-FPM: proprietary tool for profiling applications. There are several
<programlisting>let languages supported by the product but currently only PHP support is
packaged in Nixpkgs. The back-end consists of a module that is
loaded into the language runtime (called <emphasis>probe</emphasis>)
and a service (<emphasis>agent</emphasis>) that the probe connects
to and that sends the profiles to the server.
</para>
<para>
To use it, you will need to enable the agent and the probe on your
server. The exact method will depend on the way you use PHP but here
is an example of NixOS configuration for PHP-FPM:
</para>
<programlisting>
let
php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [ php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
blackfire blackfire
])); ]));
@ -29,18 +41,21 @@ in {
settings = { settings = {
# You will need to get credentials at https://blackfire.io/my/settings/credentials # You will need to get credentials at https://blackfire.io/my/settings/credentials
# You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent # You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent
server-id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"; server-id = &quot;XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX&quot;;
server-token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"; server-token = &quot;XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX&quot;;
}; };
}; };
# Make the agent run on start-up. # Make the agent run on start-up.
# (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138) # (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138)
# Alternately, you can start it manually with `systemctl start blackfire-agent`. # Alternately, you can start it manually with `systemctl start blackfire-agent`.
systemd.services.blackfire-agent.wantedBy = [ "phpfpm-foo.service" ]; systemd.services.blackfire-agent.wantedBy = [ &quot;phpfpm-foo.service&quot; ];
}</programlisting> }
</para> </programlisting>
<para> <para>
On your developer machine, you will also want to install <link xlink:href="https://blackfire.io/docs/up-and-running/installation#install-a-profiling-client">the client</link> (see <package>blackfire</package> package) or the browser extension to actually trigger the profiling. On your developer machine, you will also want to install
</para> <link xlink:href="https://blackfire.io/docs/up-and-running/installation#install-a-profiling-client">the
client</link> (see <literal>blackfire</literal> package) or the
browser extension to actually trigger the profiling.
</para>
</chapter> </chapter>

View file

@ -0,0 +1,399 @@
# Emacs {#module-services-emacs}
<!--
Documentation contributors:
Damien Cassou @DamienCassou
Thomas Tuegel @ttuegel
Rodney Lorrimar @rvl
Adam Hoese @adisbladis
-->
[Emacs](https://www.gnu.org/software/emacs/) is an
extensible, customizable, self-documenting real-time display editor — and
more. At its core is an interpreter for Emacs Lisp, a dialect of the Lisp
programming language with extensions to support text editing.
Emacs runs within a graphical desktop environment using the X Window System,
but works equally well on a text terminal. Under
macOS, a "Mac port" edition is available, which
uses Apple's native GUI frameworks.
Nixpkgs provides a superior environment for
running Emacs. It's simple to create custom builds
by overriding the default packages. Chaotic collections of Emacs Lisp code
and extensions can be brought under control using declarative package
management. NixOS even provides a
{command}`systemd` user service for automatically starting the Emacs
daemon.
## Installing Emacs {#module-services-emacs-installing}
Emacs can be installed in the normal way for Nix (see
[](#sec-package-management)). In addition, a NixOS
*service* can be enabled.
### The Different Releases of Emacs {#module-services-emacs-releases}
Nixpkgs defines several basic Emacs packages.
The following are attributes belonging to the {var}`pkgs` set:
{var}`emacs`
: The latest stable version of Emacs using the [GTK 2](http://www.gtk.org)
widget toolkit.
{var}`emacs-nox`
: Emacs built without any dependency on X11 libraries.
{var}`emacsMacport`
: Emacs with the "Mac port" patches, providing a more native look and
feel under macOS.
If those aren't suitable, then the following imitation Emacs editors are
also available in Nixpkgs:
[Zile](https://www.gnu.org/software/zile/),
[mg](http://homepage.boetes.org/software/mg/),
[Yi](http://yi-editor.github.io/),
[jmacs](https://joe-editor.sourceforge.io/).
### Adding Packages to Emacs {#module-services-emacs-adding-packages}
Emacs includes an entire ecosystem of functionality beyond text editing,
including a project planner, mail and news reader, debugger interface,
calendar, and more.
Most extensions are gotten with the Emacs packaging system
({file}`package.el`) from
[Emacs Lisp Package Archive (ELPA)](https://elpa.gnu.org/),
[MELPA](https://melpa.org/),
[MELPA Stable](https://stable.melpa.org/), and
[Org ELPA](http://orgmode.org/elpa.html). Nixpkgs is
regularly updated to mirror all these archives.
Under NixOS, you can continue to use
`package-list-packages` and
`package-install` to install packages. You can also
declare the set of Emacs packages you need using the derivations from
Nixpkgs. The rest of this section discusses declarative installation of
Emacs packages through nixpkgs.
The first step to declare the list of packages you want in your Emacs
installation is to create a dedicated derivation. This can be done in a
dedicated {file}`emacs.nix` file such as:
[]{#ex-emacsNix}
```nix
/*
This is a nix expression to build Emacs and some Emacs packages I like
from source on any distribution where Nix is installed. This will install
all the dependencies from the nixpkgs repository and build the binary files
without interfering with the host distribution.
To build the project, type the following from the current directory:
$ nix-build emacs.nix
To run the newly compiled executable:
$ ./result/bin/emacs
*/
# The first non-comment line in this file indicates that
# the whole file represents a function.
{ pkgs ? import <nixpkgs> {} }:
let
# The let expression below defines a myEmacs binding pointing to the
# current stable version of Emacs. This binding is here to separate
# the choice of the Emacs binary from the specification of the
# required packages.
myEmacs = pkgs.emacs;
# This generates an emacsWithPackages function. It takes a single
# argument: a function from a package set to a list of packages
# (the packages that will be available in Emacs).
emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages;
in
# The rest of the file specifies the list of packages to install. In the
# example, two packages (magit and zerodark-theme) are taken from
# MELPA stable.
emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [
magit # ; Integrate git <C-x g>
zerodark-theme # ; Nicolas' theme
])
# Two packages (undo-tree and zoom-frm) are taken from MELPA.
++ (with epkgs.melpaPackages; [
undo-tree # ; <C-x u> to show the undo tree
zoom-frm # ; increase/decrease font size for all buffers %lt;C-x C-+>
])
# Three packages are taken from GNU ELPA.
++ (with epkgs.elpaPackages; [
auctex # ; LaTeX mode
beacon # ; highlight my cursor when scrolling
nameless # ; hide current package name everywhere in elisp code
])
# notmuch is taken from a nixpkgs derivation which contains an Emacs mode.
++ [
pkgs.notmuch # From main packages set
])
```
The result of this configuration will be an {command}`emacs`
command which launches Emacs with all of your chosen packages in the
{var}`load-path`.
You can check that it works by executing this in a terminal:
```ShellSession
$ nix-build emacs.nix
$ ./result/bin/emacs -q
```
and then typing `M-x package-initialize`. Check that you
can use all the packages you want in this Emacs instance. For example, try
switching to the zerodark theme through `M-x load-theme <RET> zerodark <RET> y`.
::: {.tip}
A few popular extensions worth checking out are: auctex, company,
edit-server, flycheck, helm, iedit, magit, multiple-cursors, projectile,
and yasnippet.
:::
The list of available packages in the various ELPA repositories can be seen
with the following commands:
[]{#module-services-emacs-querying-packages}
```
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.elpaPackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaPackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaStablePackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.orgPackages
```
If you are on NixOS, you can install this particular Emacs for all users by
adding it to the list of system packages (see
[](#sec-declarative-package-mgmt)). Simply modify your file
{file}`configuration.nix` to make it contain:
[]{#module-services-emacs-configuration-nix}
```
{
environment.systemPackages = [
# [...]
(import /path/to/emacs.nix { inherit pkgs; })
];
}
```
In this case, the next {command}`nixos-rebuild switch` will take
care of adding your {command}`emacs` to the {var}`PATH`
environment variable (see [](#sec-changing-config)).
<!-- fixme: i think the following is better done with config.nix
https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
-->
If you are not on NixOS or want to install this particular Emacs only for
yourself, you can do so by adding it to your
{file}`~/.config/nixpkgs/config.nix` (see
[Nixpkgs manual](https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides)):
[]{#module-services-emacs-config-nix}
```
{
packageOverrides = super: let self = super.pkgs; in {
myemacs = import /path/to/emacs.nix { pkgs = self; };
};
}
```
In this case, the next `nix-env -f '<nixpkgs>' -iA
myemacs` will take care of adding your emacs to the
{var}`PATH` environment variable.
### Advanced Emacs Configuration {#module-services-emacs-advanced}
If you want, you can tweak the Emacs package itself from your
{file}`emacs.nix`. For example, if you want to have a
GTK 3-based Emacs instead of the default GTK 2-based binary and remove the
automatically generated {file}`emacs.desktop` (useful if you
only use {command}`emacsclient`), you can change your file
{file}`emacs.nix` in this way:
[]{#ex-emacsGtk3Nix}
```
{ pkgs ? import <nixpkgs> {} }:
let
myEmacs = (pkgs.emacs.override {
# Use gtk3 instead of the default gtk2
withGTK3 = true;
withGTK2 = false;
}).overrideAttrs (attrs: {
# I don't want emacs.desktop file because I only use
# emacsclient.
postInstall = (attrs.postInstall or "") + ''
rm $out/share/applications/emacs.desktop
'';
});
in [...]
```
After building this file as shown in [the example above](#ex-emacsNix), you
will get an GTK 3-based Emacs binary pre-loaded with your favorite packages.
## Running Emacs as a Service {#module-services-emacs-running}
NixOS provides an optional
{command}`systemd` service which launches
[Emacs daemon](https://www.gnu.org/software/emacs/manual/html_node/emacs/Emacs-Server.html)
with the user's login session.
*Source:* {file}`modules/services/editors/emacs.nix`
### Enabling the Service {#module-services-emacs-enabling}
To install and enable the {command}`systemd` user service for Emacs
daemon, add the following to your {file}`configuration.nix`:
```
services.emacs.enable = true;
services.emacs.package = import /home/cassou/.emacs.d { pkgs = pkgs; };
```
The {var}`services.emacs.package` option allows a custom
derivation to be used, for example, one created by
`emacsWithPackages`.
Ensure that the Emacs server is enabled for your user's Emacs
configuration, either by customizing the {var}`server-mode`
variable, or by adding `(server-start)` to
{file}`~/.emacs.d/init.el`.
To start the daemon, execute the following:
```ShellSession
$ nixos-rebuild switch # to activate the new configuration.nix
$ systemctl --user daemon-reload # to force systemd reload
$ systemctl --user start emacs.service # to start the Emacs daemon
```
The server should now be ready to serve Emacs clients.
### Starting the client {#module-services-emacs-starting-client}
Ensure that the emacs server is enabled, either by customizing the
{var}`server-mode` variable, or by adding
`(server-start)` to {file}`~/.emacs`.
To connect to the emacs daemon, run one of the following:
```
emacsclient FILENAME
emacsclient --create-frame # opens a new frame (window)
emacsclient --create-frame --tty # opens a new frame on the current terminal
```
### Configuring the {var}`EDITOR` variable {#module-services-emacs-editor-variable}
<!--<title>{command}`emacsclient` as the Default Editor</title>-->
If [](#opt-services.emacs.defaultEditor) is
`true`, the {var}`EDITOR` variable will be set
to a wrapper script which launches {command}`emacsclient`.
Any setting of {var}`EDITOR` in the shell config files will
override {var}`services.emacs.defaultEditor`. To make sure
{var}`EDITOR` refers to the Emacs wrapper script, remove any
existing {var}`EDITOR` assignment from
{file}`.profile`, {file}`.bashrc`,
{file}`.zshenv` or any other shell config file.
If you have formed certain bad habits when editing files, these can be
corrected with a shell alias to the wrapper script:
```
alias vi=$EDITOR
```
### Per-User Enabling of the Service {#module-services-emacs-per-user}
In general, {command}`systemd` user services are globally enabled
by symlinks in {file}`/etc/systemd/user`. In the case where
Emacs daemon is not wanted for all users, it is possible to install the
service but not globally enable it:
```
services.emacs.enable = false;
services.emacs.install = true;
```
To enable the {command}`systemd` user service for just the
currently logged in user, run:
```
systemctl --user enable emacs
```
This will add the symlink
{file}`~/.config/systemd/user/emacs.service`.
## Configuring Emacs {#module-services-emacs-configuring}
The Emacs init file should be changed to load the extension packages at
startup:
[]{#module-services-emacs-package-initialisation}
```
(require 'package)
;; optional. makes unpure packages archives unavailable
(setq package-archives nil)
(setq package-enable-at-startup nil)
(package-initialize)
```
After the declarative emacs package configuration has been tested,
previously downloaded packages can be cleaned up by removing
{file}`~/.emacs.d/elpa` (do make a backup first, in case you
forgot a package).
<!--
todo: is it worth documenting customizations for
server-switch-hook, server-done-hook?
-->
### A Major Mode for Nix Expressions {#module-services-emacs-major-mode}
Of interest may be {var}`melpaPackages.nix-mode`, which
provides syntax highlighting for the Nix language. This is particularly
convenient if you regularly edit Nix files.
### Accessing man pages {#module-services-emacs-man-pages}
You can use `woman` to get completion of all available
man pages. For example, type `M-x woman <RET> nixos-rebuild <RET>.`
### Editing DocBook 5 XML Documents {#sec-emacs-docbook-xml}
Emacs includes
[nXML](https://www.gnu.org/software/emacs/manual/html_node/nxml-mode/Introduction.html),
a major-mode for validating and editing XML documents. When editing DocBook
5.0 documents, such as [this one](#book-nixos-manual),
nXML needs to be configured with the relevant schema, which is not
included.
To install the DocBook 5.0 schemas, either add
{var}`pkgs.docbook5` to [](#opt-environment.systemPackages)
([NixOS](#sec-declarative-package-mgmt)), or run
`nix-env -f '<nixpkgs>' -iA docbook5`
([Nix](#sec-ad-hoc-packages)).
Then customize the variable {var}`rng-schema-locating-files` to
include {file}`~/.emacs.d/schemas.xml` and put the following
text into that file:
[]{#ex-emacs-docbook-xml}
```xml
<?xml version="1.0"?>
<!--
To let emacs find this file, evaluate:
(add-to-list 'rng-schema-locating-files "~/.emacs.d/schemas.xml")
-->
<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0">
<!--
Use this variation if pkgs.docbook5 is added to environment.systemPackages
-->
<namespace ns="http://docbook.org/ns/docbook"
uri="/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc"/>
<!--
Use this variation if installing schema with "nix-env -iA pkgs.docbook5".
<namespace ns="http://docbook.org/ns/docbook"
uri="../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc"/>
-->
</locatingRules>
```

View file

@ -1,143 +1,121 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-emacs">
version="5.0" <title>Emacs</title>
xml:id="module-services-emacs">
<title>Emacs</title>
<!--
Documentation contributors:
Damien Cassou @DamienCassou
Thomas Tuegel @ttuegel
Rodney Lorrimar @rvl
Adam Hoese @adisbladis
-->
<para>
<link xlink:href="https://www.gnu.org/software/emacs/">Emacs</link> is an
extensible, customizable, self-documenting real-time display editor — and
more. At its core is an interpreter for Emacs Lisp, a dialect of the Lisp
programming language with extensions to support text editing.
</para>
<para>
Emacs runs within a graphical desktop environment using the X Window System,
but works equally well on a text terminal. Under
<productname>macOS</productname>, a "Mac port" edition is available, which
uses Apple's native GUI frameworks.
</para>
<para>
<productname>Nixpkgs</productname> provides a superior environment for
running <application>Emacs</application>. It's simple to create custom builds
by overriding the default packages. Chaotic collections of Emacs Lisp code
and extensions can be brought under control using declarative package
management. <productname>NixOS</productname> even provides a
<command>systemd</command> user service for automatically starting the Emacs
daemon.
</para>
<section xml:id="module-services-emacs-installing">
<title>Installing <application>Emacs</application></title>
<para> <para>
Emacs can be installed in the normal way for Nix (see <link xlink:href="https://www.gnu.org/software/emacs/">Emacs</link>
<xref linkend="sec-package-management" />). In addition, a NixOS is an extensible, customizable, self-documenting real-time display
<emphasis>service</emphasis> can be enabled. editor — and more. At its core is an interpreter for Emacs Lisp, a
dialect of the Lisp programming language with extensions to support
text editing.
</para> </para>
<para>
<section xml:id="module-services-emacs-releases"> Emacs runs within a graphical desktop environment using the X Window
<title>The Different Releases of Emacs</title> System, but works equally well on a text terminal. Under macOS, a
<quote>Mac port</quote> edition is available, which uses Apples
<para> native GUI frameworks.
<productname>Nixpkgs</productname> defines several basic Emacs packages. </para>
The following are attributes belonging to the <varname>pkgs</varname> set: <para>
<variablelist> Nixpkgs provides a superior environment for running Emacs. Its
<varlistentry> simple to create custom builds by overriding the default packages.
<term> Chaotic collections of Emacs Lisp code and extensions can be brought
<varname>emacs</varname> under control using declarative package management. NixOS even
</term> provides a <command>systemd</command> user service for automatically
<term> starting the Emacs daemon.
<varname>emacs</varname> </para>
</term> <section xml:id="module-services-emacs-installing">
<listitem> <title>Installing Emacs</title>
<para> <para>
The latest stable version of Emacs using the Emacs can be installed in the normal way for Nix (see
<link <xref linkend="sec-package-management" />). In addition, a NixOS
xlink:href="http://www.gtk.org">GTK 2</link> <emphasis>service</emphasis> can be enabled.
widget toolkit. </para>
</para> <section xml:id="module-services-emacs-releases">
</listitem> <title>The Different Releases of Emacs</title>
</varlistentry> <para>
<varlistentry> Nixpkgs defines several basic Emacs packages. The following are
<term> attributes belonging to the <varname>pkgs</varname> set:
<varname>emacs-nox</varname> </para>
</term> <variablelist spacing="compact">
<listitem> <varlistentry>
<para> <term>
Emacs built without any dependency on X11 libraries. <varname>emacs</varname>
</para> </term>
</listitem> <listitem>
</varlistentry> <para>
<varlistentry> The latest stable version of Emacs using the
<term> <link xlink:href="http://www.gtk.org">GTK 2</link> widget
<varname>emacsMacport</varname> toolkit.
</term> </para>
<term> </listitem>
<varname>emacsMacport</varname> </varlistentry>
</term> <varlistentry>
<listitem> <term>
<para> <varname>emacs-nox</varname>
Emacs with the "Mac port" patches, providing a more native look and </term>
feel under macOS. <listitem>
</para> <para>
</listitem> Emacs built without any dependency on X11 libraries.
</varlistentry> </para>
</variablelist> </listitem>
</para> </varlistentry>
<varlistentry>
<para> <term>
If those aren't suitable, then the following imitation Emacs editors are <varname>emacsMacport</varname>
also available in Nixpkgs: </term>
<link xlink:href="https://www.gnu.org/software/zile/">Zile</link>, <listitem>
<link xlink:href="http://homepage.boetes.org/software/mg/">mg</link>, <para>
<link xlink:href="http://yi-editor.github.io/">Yi</link>, Emacs with the <quote>Mac port</quote> patches, providing
<link xlink:href="https://joe-editor.sourceforge.io/">jmacs</link>. a more native look and feel under macOS.
</para> </para>
</section> </listitem>
</varlistentry>
<section xml:id="module-services-emacs-adding-packages"> </variablelist>
<title>Adding Packages to Emacs</title> <para>
If those arent suitable, then the following imitation Emacs
<para> editors are also available in Nixpkgs:
Emacs includes an entire ecosystem of functionality beyond text editing, <link xlink:href="https://www.gnu.org/software/zile/">Zile</link>,
including a project planner, mail and news reader, debugger interface, <link xlink:href="http://homepage.boetes.org/software/mg/">mg</link>,
calendar, and more. <link xlink:href="http://yi-editor.github.io/">Yi</link>,
</para> <link xlink:href="https://joe-editor.sourceforge.io/">jmacs</link>.
</para>
<para> </section>
Most extensions are gotten with the Emacs packaging system <section xml:id="module-services-emacs-adding-packages">
(<filename>package.el</filename>) from <title>Adding Packages to Emacs</title>
<link <para>
xlink:href="https://elpa.gnu.org/">Emacs Lisp Package Archive Emacs includes an entire ecosystem of functionality beyond text
(<acronym>ELPA</acronym>)</link>, editing, including a project planner, mail and news reader,
<link xlink:href="https://melpa.org/"><acronym>MELPA</acronym></link>, debugger interface, calendar, and more.
<link xlink:href="https://stable.melpa.org/">MELPA Stable</link>, and </para>
<link xlink:href="http://orgmode.org/elpa.html">Org ELPA</link>. Nixpkgs is <para>
regularly updated to mirror all these archives. Most extensions are gotten with the Emacs packaging system
</para> (<filename>package.el</filename>) from
<link xlink:href="https://elpa.gnu.org/">Emacs Lisp Package
<para> Archive (ELPA)</link>,
Under NixOS, you can continue to use <link xlink:href="https://melpa.org/">MELPA</link>,
<function>package-list-packages</function> and <link xlink:href="https://stable.melpa.org/">MELPA
<function>package-install</function> to install packages. You can also Stable</link>, and
declare the set of Emacs packages you need using the derivations from <link xlink:href="http://orgmode.org/elpa.html">Org ELPA</link>.
Nixpkgs. The rest of this section discusses declarative installation of Nixpkgs is regularly updated to mirror all these archives.
Emacs packages through nixpkgs. </para>
</para> <para>
Under NixOS, you can continue to use
<para> <literal>package-list-packages</literal> and
The first step to declare the list of packages you want in your Emacs <literal>package-install</literal> to install packages. You can
installation is to create a dedicated derivation. This can be done in a also declare the set of Emacs packages you need using the
dedicated <filename>emacs.nix</filename> file such as: derivations from Nixpkgs. The rest of this section discusses
<example xml:id="ex-emacsNix"> declarative installation of Emacs packages through nixpkgs.
<title>Nix expression to build Emacs with packages (<filename>emacs.nix</filename>)</title> </para>
<programlisting language="nix"> <para>
The first step to declare the list of packages you want in your
Emacs installation is to create a dedicated derivation. This can
be done in a dedicated <filename>emacs.nix</filename> file such
as:
</para>
<para>
<anchor xml:id="ex-emacsNix" />
</para>
<programlisting language="nix">
/* /*
This is a nix expression to build Emacs and some Emacs packages I like This is a nix expression to build Emacs and some Emacs packages I like
from source on any distribution where Nix is installed. This will install from source on any distribution where Nix is installed. This will install
@ -152,185 +130,142 @@ To run the newly compiled executable:
$ ./result/bin/emacs $ ./result/bin/emacs
*/ */
{ pkgs ? import &lt;nixpkgs&gt; {} }: <co xml:id="ex-emacsNix-1" />
# The first non-comment line in this file indicates that
# the whole file represents a function.
{ pkgs ? import &lt;nixpkgs&gt; {} }:
let let
myEmacs = pkgs.emacs; <co xml:id="ex-emacsNix-2" /> # The let expression below defines a myEmacs binding pointing to the
emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages; <co xml:id="ex-emacsNix-3" /> # current stable version of Emacs. This binding is here to separate
# the choice of the Emacs binary from the specification of the
# required packages.
myEmacs = pkgs.emacs;
# This generates an emacsWithPackages function. It takes a single
# argument: a function from a package set to a list of packages
# (the packages that will be available in Emacs).
emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages;
in in
emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [ <co xml:id="ex-emacsNix-4" /> # The rest of the file specifies the list of packages to install. In the
# example, two packages (magit and zerodark-theme) are taken from
# MELPA stable.
emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [
magit # ; Integrate git &lt;C-x g&gt; magit # ; Integrate git &lt;C-x g&gt;
zerodark-theme # ; Nicolas' theme zerodark-theme # ; Nicolas' theme
]) ++ (with epkgs.melpaPackages; [ <co xml:id="ex-emacsNix-5" /> ])
# Two packages (undo-tree and zoom-frm) are taken from MELPA.
++ (with epkgs.melpaPackages; [
undo-tree # ; &lt;C-x u&gt; to show the undo tree undo-tree # ; &lt;C-x u&gt; to show the undo tree
zoom-frm # ; increase/decrease font size for all buffers %lt;C-x C-+&gt; zoom-frm # ; increase/decrease font size for all buffers %lt;C-x C-+&gt;
]) ++ (with epkgs.elpaPackages; [ <co xml:id="ex-emacsNix-6" /> ])
# Three packages are taken from GNU ELPA.
++ (with epkgs.elpaPackages; [
auctex # ; LaTeX mode auctex # ; LaTeX mode
beacon # ; highlight my cursor when scrolling beacon # ; highlight my cursor when scrolling
nameless # ; hide current package name everywhere in elisp code nameless # ; hide current package name everywhere in elisp code
]) ++ [ ])
pkgs.notmuch # From main packages set <co xml:id="ex-emacsNix-7" /> # notmuch is taken from a nixpkgs derivation which contains an Emacs mode.
++ [
pkgs.notmuch # From main packages set
]) ])
</programlisting> </programlisting>
</example>
<calloutlist>
<callout arearefs="ex-emacsNix-1">
<para> <para>
The first non-comment line in this file (<literal>{ pkgs ? ... The result of this configuration will be an
}</literal>) indicates that the whole file represents a function. <command>emacs</command> command which launches Emacs with all
of your chosen packages in the <varname>load-path</varname>.
</para> </para>
</callout>
<callout arearefs="ex-emacsNix-2">
<para> <para>
The <varname>let</varname> expression below defines a You can check that it works by executing this in a terminal:
<varname>myEmacs</varname> binding pointing to the current stable
version of Emacs. This binding is here to separate the choice of the
Emacs binary from the specification of the required packages.
</para> </para>
</callout> <programlisting>
<callout arearefs="ex-emacsNix-3"> $ nix-build emacs.nix
$ ./result/bin/emacs -q
</programlisting>
<para> <para>
This generates an <varname>emacsWithPackages</varname> function. It and then typing <literal>M-x package-initialize</literal>. Check
takes a single argument: a function from a package set to a list of that you can use all the packages you want in this Emacs
packages (the packages that will be available in Emacs). instance. For example, try switching to the zerodark theme
through
<literal>M-x load-theme &lt;RET&gt; zerodark &lt;RET&gt; y</literal>.
</para> </para>
</callout> <tip>
<callout arearefs="ex-emacsNix-4"> <para>
A few popular extensions worth checking out are: auctex,
company, edit-server, flycheck, helm, iedit, magit,
multiple-cursors, projectile, and yasnippet.
</para>
</tip>
<para> <para>
The rest of the file specifies the list of packages to install. In the The list of available packages in the various ELPA repositories
example, two packages (<varname>magit</varname> and can be seen with the following commands:
<varname>zerodark-theme</varname>) are taken from MELPA stable. <anchor xml:id="module-services-emacs-querying-packages" />
</para> </para>
</callout> <programlisting>
<callout arearefs="ex-emacsNix-5"> nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.elpaPackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.melpaPackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.melpaStablePackages
nix-env -f &quot;&lt;nixpkgs&gt;&quot; -qaP -A emacs.pkgs.orgPackages
</programlisting>
<para> <para>
Two packages (<varname>undo-tree</varname> and If you are on NixOS, you can install this particular Emacs for
<varname>zoom-frm</varname>) are taken from MELPA. all users by adding it to the list of system packages (see
<xref linkend="sec-declarative-package-mgmt" />). Simply modify
your file <filename>configuration.nix</filename> to make it
contain:
<anchor xml:id="module-services-emacs-configuration-nix" />
</para> </para>
</callout> <programlisting>
<callout arearefs="ex-emacsNix-6">
<para>
Three packages are taken from GNU ELPA.
</para>
</callout>
<callout arearefs="ex-emacsNix-7">
<para>
<varname>notmuch</varname> is taken from a nixpkgs derivation which
contains an Emacs mode.
</para>
</callout>
</calloutlist>
</para>
<para>
The result of this configuration will be an <command>emacs</command>
command which launches Emacs with all of your chosen packages in the
<varname>load-path</varname>.
</para>
<para>
You can check that it works by executing this in a terminal:
<screen>
<prompt>$ </prompt>nix-build emacs.nix
<prompt>$ </prompt>./result/bin/emacs -q
</screen>
and then typing <literal>M-x package-initialize</literal>. Check that you
can use all the packages you want in this Emacs instance. For example, try
switching to the zerodark theme through <literal>M-x load-theme &lt;RET&gt;
zerodark &lt;RET&gt; y</literal>.
</para>
<tip>
<para>
A few popular extensions worth checking out are: auctex, company,
edit-server, flycheck, helm, iedit, magit, multiple-cursors, projectile,
and yasnippet.
</para>
</tip>
<para>
The list of available packages in the various ELPA repositories can be seen
with the following commands:
<example xml:id="module-services-emacs-querying-packages">
<title>Querying Emacs packages</title>
<programlisting><![CDATA[
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.elpaPackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaPackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaStablePackages
nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.orgPackages
]]></programlisting>
</example>
</para>
<para>
If you are on NixOS, you can install this particular Emacs for all users by
adding it to the list of system packages (see
<xref linkend="sec-declarative-package-mgmt" />). Simply modify your file
<filename>configuration.nix</filename> to make it contain:
<example xml:id="module-services-emacs-configuration-nix">
<title>Custom Emacs in <filename>configuration.nix</filename></title>
<programlisting><![CDATA[
{ {
environment.systemPackages = [ environment.systemPackages = [
# [...] # [...]
(import /path/to/emacs.nix { inherit pkgs; }) (import /path/to/emacs.nix { inherit pkgs; })
]; ];
} }
]]></programlisting> </programlisting>
</example> <para>
</para> In this case, the next <command>nixos-rebuild switch</command>
will take care of adding your <command>emacs</command> to the
<para> <varname>PATH</varname> environment variable (see
In this case, the next <command>nixos-rebuild switch</command> will take <xref linkend="sec-changing-config" />).
care of adding your <command>emacs</command> to the <varname>PATH</varname> </para>
environment variable (see <xref linkend="sec-changing-config" />). <para>
</para> If you are not on NixOS or want to install this particular Emacs
only for yourself, you can do so by adding it to your
<!-- fixme: i think the following is better done with config.nix <filename>~/.config/nixpkgs/config.nix</filename> (see
https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides <link xlink:href="https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
--> manual</link>):
<anchor xml:id="module-services-emacs-config-nix" />
<para> </para>
If you are not on NixOS or want to install this particular Emacs only for <programlisting>
yourself, you can do so by adding it to your
<filename>~/.config/nixpkgs/config.nix</filename> (see
<link xlink:href="https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides">Nixpkgs
manual</link>):
<example xml:id="module-services-emacs-config-nix">
<title>Custom Emacs in <filename>~/.config/nixpkgs/config.nix</filename></title>
<programlisting><![CDATA[
{ {
packageOverrides = super: let self = super.pkgs; in { packageOverrides = super: let self = super.pkgs; in {
myemacs = import /path/to/emacs.nix { pkgs = self; }; myemacs = import /path/to/emacs.nix { pkgs = self; };
}; };
} }
]]></programlisting> </programlisting>
</example> <para>
</para> In this case, the next
<literal>nix-env -f '&lt;nixpkgs&gt;' -iA myemacs</literal> will
<para> take care of adding your emacs to the <varname>PATH</varname>
In this case, the next <literal>nix-env -f '&lt;nixpkgs&gt;' -iA environment variable.
myemacs</literal> will take care of adding your emacs to the </para>
<varname>PATH</varname> environment variable. </section>
</para> <section xml:id="module-services-emacs-advanced">
</section> <title>Advanced Emacs Configuration</title>
<para>
<section xml:id="module-services-emacs-advanced"> If you want, you can tweak the Emacs package itself from your
<title>Advanced Emacs Configuration</title> <filename>emacs.nix</filename>. For example, if you want to have
a GTK 3-based Emacs instead of the default GTK 2-based binary
<para> and remove the automatically generated
If you want, you can tweak the Emacs package itself from your <filename>emacs.desktop</filename> (useful if you only use
<filename>emacs.nix</filename>. For example, if you want to have a <command>emacsclient</command>), you can change your file
GTK 3-based Emacs instead of the default GTK 2-based binary and remove the <filename>emacs.nix</filename> in this way:
automatically generated <filename>emacs.desktop</filename> (useful if you </para>
only use <command>emacsclient</command>), you can change your file <para>
<filename>emacs.nix</filename> in this way: <anchor xml:id="ex-emacsGtk3Nix" />
</para> </para>
<programlisting>
<example xml:id="ex-emacsGtk3Nix"> { pkgs ? import &lt;nixpkgs&gt; {} }:
<title>Custom Emacs build</title>
<programlisting><![CDATA[
{ pkgs ? import <nixpkgs> {} }:
let let
myEmacs = (pkgs.emacs.override { myEmacs = (pkgs.emacs.override {
# Use gtk3 instead of the default gtk2 # Use gtk3 instead of the default gtk2
@ -339,149 +274,143 @@ let
}).overrideAttrs (attrs: { }).overrideAttrs (attrs: {
# I don't want emacs.desktop file because I only use # I don't want emacs.desktop file because I only use
# emacsclient. # emacsclient.
postInstall = (attrs.postInstall or "") + '' postInstall = (attrs.postInstall or &quot;&quot;) + ''
rm $out/share/applications/emacs.desktop rm $out/share/applications/emacs.desktop
''; '';
}); });
in [...] in [...]
]]></programlisting>
</example>
<para>
After building this file as shown in <xref linkend="ex-emacsNix" />, you
will get an GTK 3-based Emacs binary pre-loaded with your favorite packages.
</para>
</section>
</section>
<section xml:id="module-services-emacs-running">
<title>Running Emacs as a Service</title>
<para>
<productname>NixOS</productname> provides an optional
<command>systemd</command> service which launches
<link xlink:href="https://www.gnu.org/software/emacs/manual/html_node/emacs/Emacs-Server.html">
Emacs daemon </link> with the user's login session.
</para>
<para>
<emphasis>Source:</emphasis>
<filename>modules/services/editors/emacs.nix</filename>
</para>
<section xml:id="module-services-emacs-enabling">
<title>Enabling the Service</title>
<para>
To install and enable the <command>systemd</command> user service for Emacs
daemon, add the following to your <filename>configuration.nix</filename>:
<programlisting>
<xref linkend="opt-services.emacs.enable"/> = true;
<xref linkend="opt-services.emacs.package"/> = import /home/cassou/.emacs.d { pkgs = pkgs; };
</programlisting> </programlisting>
</para> <para>
After building this file as shown in
<para> <link linkend="ex-emacsNix">the example above</link>, you will
The <varname>services.emacs.package</varname> option allows a custom get an GTK 3-based Emacs binary pre-loaded with your favorite
derivation to be used, for example, one created by packages.
<function>emacsWithPackages</function>. </para>
</para> </section>
<para>
Ensure that the Emacs server is enabled for your user's Emacs
configuration, either by customizing the <varname>server-mode</varname>
variable, or by adding <literal>(server-start)</literal> to
<filename>~/.emacs.d/init.el</filename>.
</para>
<para>
To start the daemon, execute the following:
<screen>
<prompt>$ </prompt>nixos-rebuild switch # to activate the new configuration.nix
<prompt>$ </prompt>systemctl --user daemon-reload # to force systemd reload
<prompt>$ </prompt>systemctl --user start emacs.service # to start the Emacs daemon
</screen>
The server should now be ready to serve Emacs clients.
</para>
</section> </section>
<section xml:id="module-services-emacs-running">
<section xml:id="module-services-emacs-starting-client"> <title>Running Emacs as a Service</title>
<title>Starting the client</title> <para>
NixOS provides an optional <command>systemd</command> service
<para> which launches
Ensure that the emacs server is enabled, either by customizing the <link xlink:href="https://www.gnu.org/software/emacs/manual/html_node/emacs/Emacs-Server.html">Emacs
<varname>server-mode</varname> variable, or by adding daemon</link> with the users login session.
<literal>(server-start)</literal> to <filename>~/.emacs</filename>. </para>
</para> <para>
<emphasis>Source:</emphasis>
<para> <filename>modules/services/editors/emacs.nix</filename>
To connect to the emacs daemon, run one of the following: </para>
<programlisting><![CDATA[ <section xml:id="module-services-emacs-enabling">
<title>Enabling the Service</title>
<para>
To install and enable the <command>systemd</command> user
service for Emacs daemon, add the following to your
<filename>configuration.nix</filename>:
</para>
<programlisting>
services.emacs.enable = true;
services.emacs.package = import /home/cassou/.emacs.d { pkgs = pkgs; };
</programlisting>
<para>
The <varname>services.emacs.package</varname> option allows a
custom derivation to be used, for example, one created by
<literal>emacsWithPackages</literal>.
</para>
<para>
Ensure that the Emacs server is enabled for your users Emacs
configuration, either by customizing the
<varname>server-mode</varname> variable, or by adding
<literal>(server-start)</literal> to
<filename>~/.emacs.d/init.el</filename>.
</para>
<para>
To start the daemon, execute the following:
</para>
<programlisting>
$ nixos-rebuild switch # to activate the new configuration.nix
$ systemctl --user daemon-reload # to force systemd reload
$ systemctl --user start emacs.service # to start the Emacs daemon
</programlisting>
<para>
The server should now be ready to serve Emacs clients.
</para>
</section>
<section xml:id="module-services-emacs-starting-client">
<title>Starting the client</title>
<para>
Ensure that the emacs server is enabled, either by customizing
the <varname>server-mode</varname> variable, or by adding
<literal>(server-start)</literal> to
<filename>~/.emacs</filename>.
</para>
<para>
To connect to the emacs daemon, run one of the following:
</para>
<programlisting>
emacsclient FILENAME emacsclient FILENAME
emacsclient --create-frame # opens a new frame (window) emacsclient --create-frame # opens a new frame (window)
emacsclient --create-frame --tty # opens a new frame on the current terminal emacsclient --create-frame --tty # opens a new frame on the current terminal
]]></programlisting>
</para>
</section>
<section xml:id="module-services-emacs-editor-variable">
<title>Configuring the <varname>EDITOR</varname> variable</title>
<!--<title><command>emacsclient</command> as the Default Editor</title>-->
<para>
If <xref linkend="opt-services.emacs.defaultEditor"/> is
<literal>true</literal>, the <varname>EDITOR</varname> variable will be set
to a wrapper script which launches <command>emacsclient</command>.
</para>
<para>
Any setting of <varname>EDITOR</varname> in the shell config files will
override <varname>services.emacs.defaultEditor</varname>. To make sure
<varname>EDITOR</varname> refers to the Emacs wrapper script, remove any
existing <varname>EDITOR</varname> assignment from
<filename>.profile</filename>, <filename>.bashrc</filename>,
<filename>.zshenv</filename> or any other shell config file.
</para>
<para>
If you have formed certain bad habits when editing files, these can be
corrected with a shell alias to the wrapper script:
<programlisting>alias vi=$EDITOR</programlisting>
</para>
</section>
<section xml:id="module-services-emacs-per-user">
<title>Per-User Enabling of the Service</title>
<para>
In general, <command>systemd</command> user services are globally enabled
by symlinks in <filename>/etc/systemd/user</filename>. In the case where
Emacs daemon is not wanted for all users, it is possible to install the
service but not globally enable it:
<programlisting>
<xref linkend="opt-services.emacs.enable"/> = false;
<xref linkend="opt-services.emacs.install"/> = true;
</programlisting> </programlisting>
</para> </section>
<section xml:id="module-services-emacs-editor-variable">
<para> <title>Configuring the <varname>EDITOR</varname> variable</title>
To enable the <command>systemd</command> user service for just the <para>
currently logged in user, run: If <xref linkend="opt-services.emacs.defaultEditor" /> is
<programlisting>systemctl --user enable emacs</programlisting> <literal>true</literal>, the <varname>EDITOR</varname> variable
This will add the symlink will be set to a wrapper script which launches
<filename>~/.config/systemd/user/emacs.service</filename>. <command>emacsclient</command>.
</para> </para>
<para>
Any setting of <varname>EDITOR</varname> in the shell config
files will override
<varname>services.emacs.defaultEditor</varname>. To make sure
<varname>EDITOR</varname> refers to the Emacs wrapper script,
remove any existing <varname>EDITOR</varname> assignment from
<filename>.profile</filename>, <filename>.bashrc</filename>,
<filename>.zshenv</filename> or any other shell config file.
</para>
<para>
If you have formed certain bad habits when editing files, these
can be corrected with a shell alias to the wrapper script:
</para>
<programlisting>
alias vi=$EDITOR
</programlisting>
</section>
<section xml:id="module-services-emacs-per-user">
<title>Per-User Enabling of the Service</title>
<para>
In general, <command>systemd</command> user services are
globally enabled by symlinks in
<filename>/etc/systemd/user</filename>. In the case where Emacs
daemon is not wanted for all users, it is possible to install
the service but not globally enable it:
</para>
<programlisting>
services.emacs.enable = false;
services.emacs.install = true;
</programlisting>
<para>
To enable the <command>systemd</command> user service for just
the currently logged in user, run:
</para>
<programlisting>
systemctl --user enable emacs
</programlisting>
<para>
This will add the symlink
<filename>~/.config/systemd/user/emacs.service</filename>.
</para>
</section>
</section> </section>
</section> <section xml:id="module-services-emacs-configuring">
<section xml:id="module-services-emacs-configuring"> <title>Configuring Emacs</title>
<title>Configuring Emacs</title> <para>
The Emacs init file should be changed to load the extension
<para> packages at startup:
The Emacs init file should be changed to load the extension packages at <anchor xml:id="module-services-emacs-package-initialisation" />
startup: </para>
<example xml:id="module-services-emacs-package-initialisation"> <programlisting>
<title>Package initialization in <filename>.emacs</filename></title>
<programlisting><![CDATA[
(require 'package) (require 'package)
;; optional. makes unpure packages archives unavailable ;; optional. makes unpure packages archives unavailable
@ -489,92 +418,73 @@ emacsclient --create-frame --tty # opens a new frame on the current terminal
(setq package-enable-at-startup nil) (setq package-enable-at-startup nil)
(package-initialize) (package-initialize)
]]></programlisting> </programlisting>
</example> <para>
</para> After the declarative emacs package configuration has been tested,
previously downloaded packages can be cleaned up by removing
<para> <filename>~/.emacs.d/elpa</filename> (do make a backup first, in
After the declarative emacs package configuration has been tested, case you forgot a package).
previously downloaded packages can be cleaned up by removing </para>
<filename>~/.emacs.d/elpa</filename> (do make a backup first, in case you <section xml:id="module-services-emacs-major-mode">
forgot a package). <title>A Major Mode for Nix Expressions</title>
</para> <para>
Of interest may be <varname>melpaPackages.nix-mode</varname>,
<!-- which provides syntax highlighting for the Nix language. This is
todo: is it worth documenting customizations for particularly convenient if you regularly edit Nix files.
server-switch-hook, server-done-hook? </para>
--> </section>
<section xml:id="module-services-emacs-man-pages">
<section xml:id="module-services-emacs-major-mode"> <title>Accessing man pages</title>
<title>A Major Mode for Nix Expressions</title> <para>
You can use <literal>woman</literal> to get completion of all
<para> available man pages. For example, type
Of interest may be <varname>melpaPackages.nix-mode</varname>, which <literal>M-x woman &lt;RET&gt; nixos-rebuild &lt;RET&gt;.</literal>
provides syntax highlighting for the Nix language. This is particularly </para>
convenient if you regularly edit Nix files. </section>
</para> <section xml:id="sec-emacs-docbook-xml">
</section> <title>Editing DocBook 5 XML Documents</title>
<para>
<section xml:id="module-services-emacs-man-pages"> Emacs includes
<title>Accessing man pages</title> <link xlink:href="https://www.gnu.org/software/emacs/manual/html_node/nxml-mode/Introduction.html">nXML</link>,
a major-mode for validating and editing XML documents. When
<para> editing DocBook 5.0 documents, such as
You can use <function>woman</function> to get completion of all available <link linkend="book-nixos-manual">this one</link>, nXML needs to
man pages. For example, type <literal>M-x woman &lt;RET&gt; nixos-rebuild be configured with the relevant schema, which is not included.
&lt;RET&gt;.</literal> </para>
</para> <para>
</section> To install the DocBook 5.0 schemas, either add
<varname>pkgs.docbook5</varname> to
<section xml:id="sec-emacs-docbook-xml"> <xref linkend="opt-environment.systemPackages" />
<title>Editing DocBook 5 XML Documents</title> (<link linkend="sec-declarative-package-mgmt">NixOS</link>), or
run <literal>nix-env -f '&lt;nixpkgs&gt;' -iA docbook5</literal>
<para> (<link linkend="sec-ad-hoc-packages">Nix</link>).
Emacs includes </para>
<link <para>
xlink:href="https://www.gnu.org/software/emacs/manual/html_node/nxml-mode/Introduction.html">nXML</link>, Then customize the variable
a major-mode for validating and editing XML documents. When editing DocBook <varname>rng-schema-locating-files</varname> to include
5.0 documents, such as <link linkend="book-nixos-manual">this one</link>, <filename>~/.emacs.d/schemas.xml</filename> and put the
nXML needs to be configured with the relevant schema, which is not following text into that file:
included. <anchor xml:id="ex-emacs-docbook-xml" />
</para> </para>
<programlisting language="xml">
<para> &lt;?xml version=&quot;1.0&quot;?&gt;
To install the DocBook 5.0 schemas, either add &lt;!--
<varname>pkgs.docbook5</varname> to
<xref linkend="opt-environment.systemPackages"/>
(<link
linkend="sec-declarative-package-mgmt">NixOS</link>), or run
<literal>nix-env -f '&lt;nixpkgs&gt;' -iA docbook5</literal>
(<link linkend="sec-ad-hoc-packages">Nix</link>).
</para>
<para>
Then customize the variable <varname>rng-schema-locating-files</varname> to
include <filename>~/.emacs.d/schemas.xml</filename> and put the following
text into that file:
<example xml:id="ex-emacs-docbook-xml">
<title>nXML Schema Configuration (<filename>~/.emacs.d/schemas.xml</filename>)</title>
<programlisting language="xml"><![CDATA[
<?xml version="1.0"?>
<!--
To let emacs find this file, evaluate: To let emacs find this file, evaluate:
(add-to-list 'rng-schema-locating-files "~/.emacs.d/schemas.xml") (add-to-list 'rng-schema-locating-files &quot;~/.emacs.d/schemas.xml&quot;)
--> --&gt;
<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0"> &lt;locatingRules xmlns=&quot;http://thaiopensource.com/ns/locating-rules/1.0&quot;&gt;
<!-- &lt;!--
Use this variation if pkgs.docbook5 is added to environment.systemPackages Use this variation if pkgs.docbook5 is added to environment.systemPackages
--> --&gt;
<namespace ns="http://docbook.org/ns/docbook" &lt;namespace ns=&quot;http://docbook.org/ns/docbook&quot;
uri="/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc"/> uri=&quot;/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc&quot;/&gt;
<!-- &lt;!--
Use this variation if installing schema with "nix-env -iA pkgs.docbook5". Use this variation if installing schema with &quot;nix-env -iA pkgs.docbook5&quot;.
<namespace ns="http://docbook.org/ns/docbook" &lt;namespace ns=&quot;http://docbook.org/ns/docbook&quot;
uri="../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc"/> uri=&quot;../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc&quot;/&gt;
--> --&gt;
</locatingRules> &lt;/locatingRules&gt;
]]></programlisting> </programlisting>
</example> </section>
</para>
</section> </section>
</section>
</chapter> </chapter>

View file

@ -42,6 +42,13 @@ in
ExecStart = "${pkgs.lm_sensors}/sbin/fancontrol ${configFile}"; ExecStart = "${pkgs.lm_sensors}/sbin/fancontrol ${configFile}";
}; };
}; };
# On some systems, the fancontrol service does not resume properly after sleep because the pwm status of the fans
# is not reset properly. Restarting the service fixes this, in accordance with https://github.com/lm-sensors/lm-sensors/issues/172.
powerManagement.resumeCommands = ''
systemctl restart fancontrol.service
'';
}; };
meta.maintainers = [ maintainers.evils ]; meta.maintainers = [ maintainers.evils ];

View file

@ -0,0 +1,17 @@
# Trezor {#trezor}
Trezor is an open-source cryptocurrency hardware wallet and security token
allowing secure storage of private keys.
It offers advanced features such U2F two-factor authorization, SSH login
through
[Trezor SSH agent](https://wiki.trezor.io/Apps:SSH_agent),
[GPG](https://wiki.trezor.io/GPG) and a
[password manager](https://wiki.trezor.io/Trezor_Password_Manager).
For more information, guides and documentation, see <https://wiki.trezor.io>.
To enable Trezor support, add the following to your {file}`configuration.nix`:
services.trezord.enable = true;
This will add all necessary udev rules and start Trezor Bridge.

View file

@ -1,26 +1,29 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="trezor">
version="5.0" <title>Trezor</title>
xml:id="trezor"> <para>
<title>Trezor</title> Trezor is an open-source cryptocurrency hardware wallet and security
<para> token allowing secure storage of private keys.
Trezor is an open-source cryptocurrency hardware wallet and security token </para>
allowing secure storage of private keys. <para>
</para> It offers advanced features such U2F two-factor authorization, SSH
<para> login through
It offers advanced features such U2F two-factor authorization, SSH login <link xlink:href="https://wiki.trezor.io/Apps:SSH_agent">Trezor SSH
through agent</link>,
<link xlink:href="https://wiki.trezor.io/Apps:SSH_agent">Trezor SSH agent</link>, <link xlink:href="https://wiki.trezor.io/GPG">GPG</link> and a
<link xlink:href="https://wiki.trezor.io/GPG">GPG</link> and a <link xlink:href="https://wiki.trezor.io/Trezor_Password_Manager">password
<link xlink:href="https://wiki.trezor.io/Trezor_Password_Manager">password manager</link>. manager</link>. For more information, guides and documentation, see
For more information, guides and documentation, see <link xlink:href="https://wiki.trezor.io"/>. <link xlink:href="https://wiki.trezor.io">https://wiki.trezor.io</link>.
</para> </para>
<para> <para>
To enable Trezor support, add the following to your <filename>configuration.nix</filename>: To enable Trezor support, add the following to your
<programlisting> <filename>configuration.nix</filename>:
<xref linkend="opt-services.trezord.enable"/> = true; </para>
<programlisting>
services.trezord.enable = true;
</programlisting> </programlisting>
This will add all necessary udev rules and start Trezor Bridge. <para>
</para> This will add all necessary udev rules and start Trezor Bridge.
</para>
</chapter> </chapter>

View file

@ -0,0 +1,82 @@
# Mailman {#module-services-mailman}
[Mailman](https://www.list.org) is free
software for managing electronic mail discussion and e-newsletter
lists. Mailman and its web interface can be configured using the
corresponding NixOS module. Note that this service is best used with
an existing, securely configured Postfix setup, as it does not automatically configure this.
## Basic usage with Postfix {#module-services-mailman-basic-usage}
For a basic configuration with Postfix as the MTA, the following settings are suggested:
```
{ config, ... }: {
services.postfix = {
enable = true;
relayDomains = ["hash:/var/lib/mailman/data/postfix_domains"];
sslCert = config.security.acme.certs."lists.example.org".directory + "/full.pem";
sslKey = config.security.acme.certs."lists.example.org".directory + "/key.pem";
config = {
transport_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"];
local_recipient_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"];
};
};
services.mailman = {
enable = true;
serve.enable = true;
hyperkitty.enable = true;
webHosts = ["lists.example.org"];
siteOwner = "mailman@example.org";
};
services.nginx.virtualHosts."lists.example.org".enableACME = true;
networking.firewall.allowedTCPPorts = [ 25 80 443 ];
}
```
DNS records will also be required:
- `AAAA` and `A` records pointing to the host in question, in order for browsers to be able to discover the address of the web server;
- An `MX` record pointing to a domain name at which the host is reachable, in order for other mail servers to be able to deliver emails to the mailing lists it hosts.
After this has been done and appropriate DNS records have been
set up, the Postorius mailing list manager and the Hyperkitty
archive browser will be available at
https://lists.example.org/. Note that this setup is not
sufficient to deliver emails to most email providers nor to
avoid spam -- a number of additional measures for authenticating
incoming and outgoing mails, such as SPF, DMARC and DKIM are
necessary, but outside the scope of the Mailman module.
## Using with other MTAs {#module-services-mailman-other-mtas}
Mailman also supports other MTA, though with a little bit more configuration. For example, to use Mailman with Exim, you can use the following settings:
```
{ config, ... }: {
services = {
mailman = {
enable = true;
siteOwner = "mailman@example.org";
enablePostfix = false;
settings.mta = {
incoming = "mailman.mta.exim4.LMTP";
outgoing = "mailman.mta.deliver.deliver";
lmtp_host = "localhost";
lmtp_port = "8024";
smtp_host = "localhost";
smtp_port = "25";
configuration = "python:mailman.config.exim4";
};
};
exim = {
enable = true;
# You can configure Exim in a separate file to reduce configuration.nix clutter
config = builtins.readFile ./exim.conf;
};
};
}
```
The exim config needs some special additions to work with Mailman. Currently
NixOS can't manage Exim config with such granularity. Please refer to
[Mailman documentation](https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html)
for more info on configuring Mailman for working with Exim.

View file

@ -1,79 +1,95 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mailman">
version="5.0"
xml:id="module-services-mailman">
<title>Mailman</title> <title>Mailman</title>
<para> <para>
<link xlink:href="https://www.list.org">Mailman</link> is free <link xlink:href="https://www.list.org">Mailman</link> is free
software for managing electronic mail discussion and e-newsletter software for managing electronic mail discussion and e-newsletter
lists. Mailman and its web interface can be configured using the lists. Mailman and its web interface can be configured using the
corresponding NixOS module. Note that this service is best used with corresponding NixOS module. Note that this service is best used with
an existing, securely configured Postfix setup, as it does not automatically configure this. an existing, securely configured Postfix setup, as it does not
automatically configure this.
</para> </para>
<section xml:id="module-services-mailman-basic-usage"> <section xml:id="module-services-mailman-basic-usage">
<title>Basic usage with Postfix</title> <title>Basic usage with Postfix</title>
<para> <para>
For a basic configuration with Postfix as the MTA, the following settings are suggested: For a basic configuration with Postfix as the MTA, the following
<programlisting>{ config, ... }: { settings are suggested:
</para>
<programlisting>
{ config, ... }: {
services.postfix = { services.postfix = {
enable = true; enable = true;
relayDomains = ["hash:/var/lib/mailman/data/postfix_domains"]; relayDomains = [&quot;hash:/var/lib/mailman/data/postfix_domains&quot;];
sslCert = config.security.acme.certs."lists.example.org".directory + "/full.pem"; sslCert = config.security.acme.certs.&quot;lists.example.org&quot;.directory + &quot;/full.pem&quot;;
sslKey = config.security.acme.certs."lists.example.org".directory + "/key.pem"; sslKey = config.security.acme.certs.&quot;lists.example.org&quot;.directory + &quot;/key.pem&quot;;
config = { config = {
transport_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"]; transport_maps = [&quot;hash:/var/lib/mailman/data/postfix_lmtp&quot;];
local_recipient_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"]; local_recipient_maps = [&quot;hash:/var/lib/mailman/data/postfix_lmtp&quot;];
}; };
}; };
services.mailman = { services.mailman = {
<link linkend="opt-services.mailman.enable">enable</link> = true; enable = true;
<link linkend="opt-services.mailman.serve.enable">serve.enable</link> = true; serve.enable = true;
<link linkend="opt-services.mailman.hyperkitty.enable">hyperkitty.enable</link> = true; hyperkitty.enable = true;
<link linkend="opt-services.mailman.webHosts">webHosts</link> = ["lists.example.org"]; webHosts = [&quot;lists.example.org&quot;];
<link linkend="opt-services.mailman.siteOwner">siteOwner</link> = "mailman@example.org"; siteOwner = &quot;mailman@example.org&quot;;
}; };
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">services.nginx.virtualHosts."lists.example.org".enableACME</link> = true; services.nginx.virtualHosts.&quot;lists.example.org&quot;.enableACME = true;
<link linkend="opt-networking.firewall.allowedTCPPorts">networking.firewall.allowedTCPPorts</link> = [ 25 80 443 ]; networking.firewall.allowedTCPPorts = [ 25 80 443 ];
}</programlisting> }
</para> </programlisting>
<para> <para>
DNS records will also be required: DNS records will also be required:
<itemizedlist>
<listitem><para><literal>AAAA</literal> and <literal>A</literal> records pointing to the host in question, in order for browsers to be able to discover the address of the web server;</para></listitem>
<listitem><para>An <literal>MX</literal> record pointing to a domain name at which the host is reachable, in order for other mail servers to be able to deliver emails to the mailing lists it hosts.</para></listitem>
</itemizedlist>
</para> </para>
<itemizedlist spacing="compact">
<listitem>
<para>
<literal>AAAA</literal> and <literal>A</literal> records
pointing to the host in question, in order for browsers to be
able to discover the address of the web server;
</para>
</listitem>
<listitem>
<para>
An <literal>MX</literal> record pointing to a domain name at
which the host is reachable, in order for other mail servers
to be able to deliver emails to the mailing lists it hosts.
</para>
</listitem>
</itemizedlist>
<para> <para>
After this has been done and appropriate DNS records have been After this has been done and appropriate DNS records have been set
set up, the Postorius mailing list manager and the Hyperkitty up, the Postorius mailing list manager and the Hyperkitty archive
archive browser will be available at browser will be available at https://lists.example.org/. Note that
https://lists.example.org/. Note that this setup is not this setup is not sufficient to deliver emails to most email
sufficient to deliver emails to most email providers nor to providers nor to avoid spam a number of additional measures for
avoid spam -- a number of additional measures for authenticating authenticating incoming and outgoing mails, such as SPF, DMARC and
incoming and outgoing mails, such as SPF, DMARC and DKIM are DKIM are necessary, but outside the scope of the Mailman module.
necessary, but outside the scope of the Mailman module.
</para> </para>
</section> </section>
<section xml:id="module-services-mailman-other-mtas"> <section xml:id="module-services-mailman-other-mtas">
<title>Using with other MTAs</title> <title>Using with other MTAs</title>
<para> <para>
Mailman also supports other MTA, though with a little bit more configuration. For example, to use Mailman with Exim, you can use the following settings: Mailman also supports other MTA, though with a little bit more
<programlisting>{ config, ... }: { configuration. For example, to use Mailman with Exim, you can use
the following settings:
</para>
<programlisting>
{ config, ... }: {
services = { services = {
mailman = { mailman = {
enable = true; enable = true;
siteOwner = "mailman@example.org"; siteOwner = &quot;mailman@example.org&quot;;
<link linkend="opt-services.mailman.enablePostfix">enablePostfix</link> = false; enablePostfix = false;
settings.mta = { settings.mta = {
incoming = "mailman.mta.exim4.LMTP"; incoming = &quot;mailman.mta.exim4.LMTP&quot;;
outgoing = "mailman.mta.deliver.deliver"; outgoing = &quot;mailman.mta.deliver.deliver&quot;;
lmtp_host = "localhost"; lmtp_host = &quot;localhost&quot;;
lmtp_port = "8024"; lmtp_port = &quot;8024&quot;;
smtp_host = "localhost"; smtp_host = &quot;localhost&quot;;
smtp_port = "25"; smtp_port = &quot;25&quot;;
configuration = "python:mailman.config.exim4"; configuration = &quot;python:mailman.config.exim4&quot;;
}; };
}; };
exim = { exim = {
@ -82,13 +98,15 @@
config = builtins.readFile ./exim.conf; config = builtins.readFile ./exim.conf;
}; };
}; };
}</programlisting> }
</para> </programlisting>
<para> <para>
The exim config needs some special additions to work with Mailman. Currently The exim config needs some special additions to work with Mailman.
NixOS can't manage Exim config with such granularity. Please refer to Currently NixOS cant manage Exim config with such granularity.
<link xlink:href="https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html">Mailman documentation</link> Please refer to
for more info on configuring Mailman for working with Exim. <link xlink:href="https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html">Mailman
documentation</link> for more info on configuring Mailman for
working with Exim.
</para> </para>
</section> </section>
</chapter> </chapter>

View file

@ -0,0 +1,110 @@
# Mjolnir (Matrix Moderation Tool) {#module-services-mjolnir}
This chapter will show you how to set up your own, self-hosted
[Mjolnir](https://github.com/matrix-org/mjolnir) instance.
As an all-in-one moderation tool, it can protect your server from
malicious invites, spam messages, and whatever else you don't want.
In addition to server-level protection, Mjolnir is great for communities
wanting to protect their rooms without having to use their personal
accounts for moderation.
The bot by default includes support for bans, redactions, anti-spam,
server ACLs, room directory changes, room alias transfers, account
deactivation, room shutdown, and more.
See the [README](https://github.com/matrix-org/mjolnir#readme)
page and the [Moderator's guide](https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md)
for additional instructions on how to setup and use Mjolnir.
For [additional settings](#opt-services.mjolnir.settings)
see [the default configuration](https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml).
## Mjolnir Setup {#module-services-mjolnir-setup}
First create a new Room which will be used as a management room for Mjolnir. In
this room, Mjolnir will log possible errors and debugging information. You'll
need to set this Room-ID in [services.mjolnir.managementRoom](#opt-services.mjolnir.managementRoom).
Next, create a new user for Mjolnir on your homeserver, if not present already.
The Mjolnir Matrix user expects to be free of any rate limiting.
See [Synapse #6286](https://github.com/matrix-org/synapse/issues/6286)
for an example on how to achieve this.
If you want Mjolnir to be able to deactivate users, move room aliases, shutdown rooms, etc.
you'll need to make the Mjolnir user a Matrix server admin.
Now invite the Mjolnir user to the management room.
It is recommended to use [Pantalaimon](https://github.com/matrix-org/pantalaimon),
so your management room can be encrypted. This also applies if you are looking to moderate an encrypted room.
To enable the Pantalaimon E2E Proxy for mjolnir, enable
[services.mjolnir.pantalaimon](#opt-services.mjolnir.pantalaimon.enable). This will
autoconfigure a new Pantalaimon instance, which will connect to the homeserver
set in [services.mjolnir.homeserverUrl](#opt-services.mjolnir.homeserverUrl) and Mjolnir itself
will be configured to connect to the new Pantalaimon instance.
```
{
services.mjolnir = {
enable = true;
homeserverUrl = "https://matrix.domain.tld";
pantalaimon = {
enable = true;
username = "mjolnir";
passwordFile = "/run/secrets/mjolnir-password";
};
protectedRooms = [
"https://matrix.to/#/!xxx:domain.tld"
];
managementRoom = "!yyy:domain.tld";
};
}
```
### Element Matrix Services (EMS) {#module-services-mjolnir-setup-ems}
If you are using a managed ["Element Matrix Services (EMS)"](https://ems.element.io/)
server, you will need to consent to the terms and conditions. Upon startup, an error
log entry with a URL to the consent page will be generated.
## Synapse Antispam Module {#module-services-mjolnir-matrix-synapse-antispam}
A Synapse module is also available to apply the same rulesets the bot
uses across an entire homeserver.
To use the Antispam Module, add `matrix-synapse-plugins.matrix-synapse-mjolnir-antispam`
to the Synapse plugin list and enable the `mjolnir.Module` module.
```
{
services.matrix-synapse = {
plugins = with pkgs; [
matrix-synapse-plugins.matrix-synapse-mjolnir-antispam
];
extraConfig = ''
modules:
- module: mjolnir.Module
config:
# Prevent servers/users in the ban lists from inviting users on this
# server to rooms. Default true.
block_invites: true
# Flag messages sent by servers/users in the ban lists as spam. Currently
# this means that spammy messages will appear as empty to users. Default
# false.
block_messages: false
# Remove users from the user directory search by filtering matrix IDs and
# display names by the entries in the user ban list. Default false.
block_usernames: false
# The room IDs of the ban lists to honour. Unlike other parts of Mjolnir,
# this list cannot be room aliases or permalinks. This server is expected
# to already be joined to the room - Mjolnir will not automatically join
# these rooms.
ban_lists:
- "!roomid:example.org"
'';
};
}
```

View file

@ -1,106 +1,120 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-mjolnir">
version="5.0" <title>Mjolnir (Matrix Moderation Tool)</title>
xml:id="module-services-mjolnir">
<title>Mjolnir (Matrix Moderation Tool)</title>
<para>
This chapter will show you how to set up your own, self-hosted
<link xlink:href="https://github.com/matrix-org/mjolnir">Mjolnir</link>
instance.
</para>
<para>
As an all-in-one moderation tool, it can protect your server from
malicious invites, spam messages, and whatever else you don't want.
In addition to server-level protection, Mjolnir is great for communities
wanting to protect their rooms without having to use their personal
accounts for moderation.
</para>
<para>
The bot by default includes support for bans, redactions, anti-spam,
server ACLs, room directory changes, room alias transfers, account
deactivation, room shutdown, and more.
</para>
<para>
See the <link xlink:href="https://github.com/matrix-org/mjolnir#readme">README</link>
page and the <link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md">Moderator's guide</link>
for additional instructions on how to setup and use Mjolnir.
</para>
<para>
For <link linkend="opt-services.mjolnir.settings">additional settings</link>
see <link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml">the default configuration</link>.
</para>
<section xml:id="module-services-mjolnir-setup">
<title>Mjolnir Setup</title>
<para> <para>
First create a new Room which will be used as a management room for Mjolnir. In This chapter will show you how to set up your own, self-hosted
this room, Mjolnir will log possible errors and debugging information. You'll <link xlink:href="https://github.com/matrix-org/mjolnir">Mjolnir</link>
need to set this Room-ID in <link linkend="opt-services.mjolnir.managementRoom">services.mjolnir.managementRoom</link>. instance.
</para> </para>
<para> <para>
Next, create a new user for Mjolnir on your homeserver, if not present already. As an all-in-one moderation tool, it can protect your server from
malicious invites, spam messages, and whatever else you dont want.
In addition to server-level protection, Mjolnir is great for
communities wanting to protect their rooms without having to use
their personal accounts for moderation.
</para> </para>
<para> <para>
The Mjolnir Matrix user expects to be free of any rate limiting. The bot by default includes support for bans, redactions, anti-spam,
See <link xlink:href="https://github.com/matrix-org/synapse/issues/6286">Synapse #6286</link> server ACLs, room directory changes, room alias transfers, account
for an example on how to achieve this. deactivation, room shutdown, and more.
</para> </para>
<para> <para>
If you want Mjolnir to be able to deactivate users, move room aliases, shutdown rooms, etc. See the
you'll need to make the Mjolnir user a Matrix server admin. <link xlink:href="https://github.com/matrix-org/mjolnir#readme">README</link>
page and the
<link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md">Moderators
guide</link> for additional instructions on how to setup and use
Mjolnir.
</para> </para>
<para> <para>
Now invite the Mjolnir user to the management room. For <link linkend="opt-services.mjolnir.settings">additional
settings</link> see
<link xlink:href="https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml">the
default configuration</link>.
</para> </para>
<para> <section xml:id="module-services-mjolnir-setup">
It is recommended to use <link xlink:href="https://github.com/matrix-org/pantalaimon">Pantalaimon</link>, <title>Mjolnir Setup</title>
so your management room can be encrypted. This also applies if you are looking to moderate an encrypted room. <para>
</para> First create a new Room which will be used as a management room
<para> for Mjolnir. In this room, Mjolnir will log possible errors and
To enable the Pantalaimon E2E Proxy for mjolnir, enable debugging information. Youll need to set this Room-ID in
<link linkend="opt-services.mjolnir.pantalaimon.enable">services.mjolnir.pantalaimon</link>. This will <link linkend="opt-services.mjolnir.managementRoom">services.mjolnir.managementRoom</link>.
autoconfigure a new Pantalaimon instance, which will connect to the homeserver </para>
set in <link linkend="opt-services.mjolnir.homeserverUrl">services.mjolnir.homeserverUrl</link> and Mjolnir itself <para>
will be configured to connect to the new Pantalaimon instance. Next, create a new user for Mjolnir on your homeserver, if not
</para> present already.
<programlisting> </para>
<para>
The Mjolnir Matrix user expects to be free of any rate limiting.
See
<link xlink:href="https://github.com/matrix-org/synapse/issues/6286">Synapse
#6286</link> for an example on how to achieve this.
</para>
<para>
If you want Mjolnir to be able to deactivate users, move room
aliases, shutdown rooms, etc. youll need to make the Mjolnir user
a Matrix server admin.
</para>
<para>
Now invite the Mjolnir user to the management room.
</para>
<para>
It is recommended to use
<link xlink:href="https://github.com/matrix-org/pantalaimon">Pantalaimon</link>,
so your management room can be encrypted. This also applies if you
are looking to moderate an encrypted room.
</para>
<para>
To enable the Pantalaimon E2E Proxy for mjolnir, enable
<link linkend="opt-services.mjolnir.pantalaimon.enable">services.mjolnir.pantalaimon</link>.
This will autoconfigure a new Pantalaimon instance, which will
connect to the homeserver set in
<link linkend="opt-services.mjolnir.homeserverUrl">services.mjolnir.homeserverUrl</link>
and Mjolnir itself will be configured to connect to the new
Pantalaimon instance.
</para>
<programlisting>
{ {
services.mjolnir = { services.mjolnir = {
enable = true; enable = true;
<link linkend="opt-services.mjolnir.homeserverUrl">homeserverUrl</link> = "https://matrix.domain.tld"; homeserverUrl = &quot;https://matrix.domain.tld&quot;;
<link linkend="opt-services.mjolnir.pantalaimon">pantalaimon</link> = { pantalaimon = {
<link linkend="opt-services.mjolnir.pantalaimon.enable">enable</link> = true; enable = true;
<link linkend="opt-services.mjolnir.pantalaimon.username">username</link> = "mjolnir"; username = &quot;mjolnir&quot;;
<link linkend="opt-services.mjolnir.pantalaimon.passwordFile">passwordFile</link> = "/run/secrets/mjolnir-password"; passwordFile = &quot;/run/secrets/mjolnir-password&quot;;
}; };
<link linkend="opt-services.mjolnir.protectedRooms">protectedRooms</link> = [ protectedRooms = [
"https://matrix.to/#/!xxx:domain.tld" &quot;https://matrix.to/#/!xxx:domain.tld&quot;
]; ];
<link linkend="opt-services.mjolnir.managementRoom">managementRoom</link> = "!yyy:domain.tld"; managementRoom = &quot;!yyy:domain.tld&quot;;
}; };
} }
</programlisting> </programlisting>
<section xml:id="module-services-mjolnir-setup-ems"> <section xml:id="module-services-mjolnir-setup-ems">
<title>Element Matrix Services (EMS)</title> <title>Element Matrix Services (EMS)</title>
<para> <para>
If you are using a managed <link xlink:href="https://ems.element.io/">"Element Matrix Services (EMS)"</link> If you are using a managed
server, you will need to consent to the terms and conditions. Upon startup, an error <link xlink:href="https://ems.element.io/"><quote>Element Matrix
log entry with a URL to the consent page will be generated. Services (EMS)</quote></link> server, you will need to consent
</para> to the terms and conditions. Upon startup, an error log entry
</section> with a URL to the consent page will be generated.
</section> </para>
</section>
<section xml:id="module-services-mjolnir-matrix-synapse-antispam"> </section>
<title>Synapse Antispam Module</title> <section xml:id="module-services-mjolnir-matrix-synapse-antispam">
<para> <title>Synapse Antispam Module</title>
A Synapse module is also available to apply the same rulesets the bot <para>
uses across an entire homeserver. A Synapse module is also available to apply the same rulesets the
</para> bot uses across an entire homeserver.
<para> </para>
To use the Antispam Module, add <package>matrix-synapse-plugins.matrix-synapse-mjolnir-antispam</package> <para>
to the Synapse plugin list and enable the <literal>mjolnir.Module</literal> module. To use the Antispam Module, add
</para> <literal>matrix-synapse-plugins.matrix-synapse-mjolnir-antispam</literal>
<programlisting> to the Synapse plugin list and enable the
<literal>mjolnir.Module</literal> module.
</para>
<programlisting>
{ {
services.matrix-synapse = { services.matrix-synapse = {
plugins = with pkgs; [ plugins = with pkgs; [
@ -125,10 +139,10 @@
# to already be joined to the room - Mjolnir will not automatically join # to already be joined to the room - Mjolnir will not automatically join
# these rooms. # these rooms.
ban_lists: ban_lists:
- "!roomid:example.org" - &quot;!roomid:example.org&quot;
''; '';
}; };
} }
</programlisting> </programlisting>
</section> </section>
</chapter> </chapter>

View file

@ -0,0 +1,216 @@
# Matrix {#module-services-matrix}
[Matrix](https://matrix.org/) is an open standard for
interoperable, decentralised, real-time communication over IP. It can be used
to power Instant Messaging, VoIP/WebRTC signalling, Internet of Things
communication - or anywhere you need a standard HTTP API for publishing and
subscribing to data whilst tracking the conversation history.
This chapter will show you how to set up your own, self-hosted Matrix
homeserver using the Synapse reference homeserver, and how to serve your own
copy of the Element web client. See the
[Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
overview page for links to Element Apps for Android and iOS,
desktop clients, as well as bridges to other networks and other projects
around Matrix.
## Synapse Homeserver {#module-services-matrix-synapse}
[Synapse](https://github.com/matrix-org/synapse) is
the reference homeserver implementation of Matrix from the core development
team at matrix.org. The following configuration example will set up a
synapse server for the `example.org` domain, served from
the host `myhostname.example.org`. For more information,
please refer to the
[installation instructions of Synapse](https://matrix-org.github.io/synapse/latest/setup/installation.html) .
```
{ pkgs, lib, config, ... }:
let
fqdn = "${config.networking.hostName}.${config.networking.domain}";
clientConfig = {
"m.homeserver".base_url = "https://${fqdn}";
"m.identity_server" = {};
};
serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443";
mkWellKnown = data: ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON data}';
'';
in {
networking.hostName = "myhostname";
networking.domain = "example.org";
networking.firewall.allowedTCPPorts = [ 80 443 ];
services.postgresql.enable = true;
services.postgresql.initialScript = pkgs.writeText "synapse-init.sql" ''
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
TEMPLATE template0
LC_COLLATE = "C"
LC_CTYPE = "C";
'';
services.nginx = {
enable = true;
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
virtualHosts = {
# If the A and AAAA DNS records on example.org do not point on the same host as the
# records for myhostname.example.org, you can easily move the /.well-known
# virtualHost section of the code to the host that is serving example.org, while
# the rest stays on myhostname.example.org with no other changes required.
# This pattern also allows to seamlessly move the homeserver from
# myhostname.example.org to myotherhost.example.org by only changing the
# /.well-known redirection target.
"${config.networking.domain}" = {
enableACME = true;
forceSSL = true;
# This section is not needed if the server_name of matrix-synapse is equal to
# the domain (i.e. example.org from @foo:example.org) and the federation port
# is 8448.
# Further reference can be found in the docs about delegation under
# https://matrix-org.github.io/synapse/latest/delegate.html
locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig;
# This is usually needed for homeserver discovery (from e.g. other Matrix clients).
# Further reference can be found in the upstream docs at
# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient
locations."= /.well-known/matrix/client".extraConfig = mkWellKnown clientConfig;
};
"${fqdn}" = {
enableACME = true;
forceSSL = true;
# It's also possible to do a redirect here or something else, this vhost is not
# needed for Matrix. It's recommended though to *not put* element
# here, see also the section about Element.
locations."/".extraConfig = ''
return 404;
'';
# Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
# *must not* be used here.
locations."/_matrix".proxyPass = "http://[::1]:8008";
# Forward requests for e.g. SSO and password-resets.
locations."/_synapse/client".proxyPass = "http://[::1]:8008";
};
};
};
services.matrix-synapse = {
enable = true;
settings.server_name = config.networking.domain;
settings.listeners = [
{ port = 8008;
bind_addresses = [ "::1" ];
type = "http";
tls = false;
x_forwarded = true;
resources = [ {
names = [ "client" "federation" ];
compress = true;
} ];
}
];
};
}
```
## Registering Matrix users {#module-services-matrix-register-users}
If you want to run a server with public registration by anybody, you can
then enable `services.matrix-synapse.settings.enable_registration = true;`.
Otherwise, or you can generate a registration secret with
{command}`pwgen -s 64 1` and set it with
[](#opt-services.matrix-synapse.settings.registration_shared_secret).
To create a new user or admin, run the following after you have set the secret
and have rebuilt NixOS:
```ShellSession
$ nix-shell -p matrix-synapse
$ register_new_matrix_user -k your-registration-shared-secret http://localhost:8008
New user localpart: your-username
Password:
Confirm password:
Make admin [no]:
Success!
```
In the example, this would create a user with the Matrix Identifier
`@your-username:example.org`.
::: {.warning}
When using [](#opt-services.matrix-synapse.settings.registration_shared_secret), the secret
will end up in the world-readable store. Instead it's recommended to deploy the secret
in an additional file like this:
- Create a file with the following contents:
```
registration_shared_secret: your-very-secret-secret
```
- Deploy the file with a secret-manager such as
[{option}`deployment.keys`](https://nixops.readthedocs.io/en/latest/overview.html#managing-keys)
from {manpage}`nixops(1)` or [sops-nix](https://github.com/Mic92/sops-nix/) to
e.g. {file}`/run/secrets/matrix-shared-secret` and ensure that it's readable
by `matrix-synapse`.
- Include the file like this in your configuration:
```
{
services.matrix-synapse.extraConfigFiles = [
"/run/secrets/matrix-shared-secret"
];
}
```
:::
::: {.note}
It's also possible to user alternative authentication mechanism such as
[LDAP (via `matrix-synapse-ldap3`)](https://github.com/matrix-org/matrix-synapse-ldap3)
or [OpenID](https://matrix-org.github.io/synapse/latest/openid.html).
:::
## Element (formerly known as Riot) Web Client {#module-services-matrix-element-web}
[Element Web](https://github.com/vector-im/riot-web/) is
the reference web client for Matrix and developed by the core team at
matrix.org. Element was formerly known as Riot.im, see the
[Element introductory blog post](https://element.io/blog/welcome-to-element/)
for more information. The following snippet can be optionally added to the code before
to complete the synapse installation with a web client served at
`https://element.myhostname.example.org` and
`https://element.example.org`. Alternatively, you can use the hosted
copy at <https://app.element.io/>,
or use other web clients or native client applications. Due to the
`/.well-known` urls set up done above, many clients should
fill in the required connection details automatically when you enter your
Matrix Identifier. See
[Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
for a list of existing clients and their supported featureset.
```
{
services.nginx.virtualHosts."element.${fqdn}" = {
enableACME = true;
forceSSL = true;
serverAliases = [
"element.${config.networking.domain}"
];
root = pkgs.element-web.override {
conf = {
default_server_config = clientConfig; # see `clientConfig` from the snippet above.
};
};
};
}
```
::: {.note}
The Element developers do not recommend running Element and your Matrix
homeserver on the same fully-qualified domain name for security reasons. In
the example, this means that you should not reuse the
`myhostname.example.org` virtualHost to also serve Element,
but instead serve it on a different subdomain, like
`element.example.org` in the example. See the
[Element Important Security Notes](https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes)
for more information on this subject.
:::

View file

@ -1,256 +1,243 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-matrix">
version="5.0" <title>Matrix</title>
xml:id="module-services-matrix">
<title>Matrix</title>
<para>
<link xlink:href="https://matrix.org/">Matrix</link> is an open standard for
interoperable, decentralised, real-time communication over IP. It can be used
to power Instant Messaging, VoIP/WebRTC signalling, Internet of Things
communication - or anywhere you need a standard HTTP API for publishing and
subscribing to data whilst tracking the conversation history.
</para>
<para>
This chapter will show you how to set up your own, self-hosted Matrix
homeserver using the Synapse reference homeserver, and how to serve your own
copy of the Element web client. See the
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> overview page for links to Element Apps for Android and iOS,
desktop clients, as well as bridges to other networks and other projects
around Matrix.
</para>
<section xml:id="module-services-matrix-synapse">
<title>Synapse Homeserver</title>
<para> <para>
<link xlink:href="https://github.com/matrix-org/synapse">Synapse</link> is <link xlink:href="https://matrix.org/">Matrix</link> is an open
the reference homeserver implementation of Matrix from the core development standard for interoperable, decentralised, real-time communication
team at matrix.org. The following configuration example will set up a over IP. It can be used to power Instant Messaging, VoIP/WebRTC
synapse server for the <literal>example.org</literal> domain, served from signalling, Internet of Things communication - or anywhere you need
the host <literal>myhostname.example.org</literal>. For more information, a standard HTTP API for publishing and subscribing to data whilst
please refer to the tracking the conversation history.
<link xlink:href="https://matrix-org.github.io/synapse/latest/setup/installation.html"> </para>
installation instructions of Synapse </link>. <para>
<programlisting> This chapter will show you how to set up your own, self-hosted
Matrix homeserver using the Synapse reference homeserver, and how to
serve your own copy of the Element web client. See the
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> overview page for links to Element Apps for
Android and iOS, desktop clients, as well as bridges to other
networks and other projects around Matrix.
</para>
<section xml:id="module-services-matrix-synapse">
<title>Synapse Homeserver</title>
<para>
<link xlink:href="https://github.com/matrix-org/synapse">Synapse</link>
is the reference homeserver implementation of Matrix from the core
development team at matrix.org. The following configuration
example will set up a synapse server for the
<literal>example.org</literal> domain, served from the host
<literal>myhostname.example.org</literal>. For more information,
please refer to the
<link xlink:href="https://matrix-org.github.io/synapse/latest/setup/installation.html">installation
instructions of Synapse</link> .
</para>
<programlisting>
{ pkgs, lib, config, ... }: { pkgs, lib, config, ... }:
let let
fqdn = "${config.networking.hostName}.${config.networking.domain}"; fqdn = &quot;${config.networking.hostName}.${config.networking.domain}&quot;;
clientConfig = { clientConfig = {
"m.homeserver".base_url = "https://${fqdn}"; &quot;m.homeserver&quot;.base_url = &quot;https://${fqdn}&quot;;
"m.identity_server" = {}; &quot;m.identity_server&quot; = {};
}; };
serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443"; serverConfig.&quot;m.server&quot; = &quot;${config.services.matrix-synapse.settings.server_name}:443&quot;;
mkWellKnown = data: '' mkWellKnown = data: ''
add_header Content-Type application/json; add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *; add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON data}'; return 200 '${builtins.toJSON data}';
''; '';
in { in {
<xref linkend="opt-networking.hostName" /> = "myhostname"; networking.hostName = &quot;myhostname&quot;;
<xref linkend="opt-networking.domain" /> = "example.org"; networking.domain = &quot;example.org&quot;;
<xref linkend="opt-networking.firewall.allowedTCPPorts" /> = [ 80 443 ]; networking.firewall.allowedTCPPorts = [ 80 443 ];
<xref linkend="opt-services.postgresql.enable" /> = true; services.postgresql.enable = true;
<xref linkend="opt-services.postgresql.initialScript" /> = pkgs.writeText "synapse-init.sql" '' services.postgresql.initialScript = pkgs.writeText &quot;synapse-init.sql&quot; ''
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; CREATE ROLE &quot;matrix-synapse&quot; WITH LOGIN PASSWORD 'synapse';
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" CREATE DATABASE &quot;matrix-synapse&quot; WITH OWNER &quot;matrix-synapse&quot;
TEMPLATE template0 TEMPLATE template0
LC_COLLATE = "C" LC_COLLATE = &quot;C&quot;
LC_CTYPE = "C"; LC_CTYPE = &quot;C&quot;;
''; '';
services.nginx = { services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true; enable = true;
<link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true; recommendedTlsSettings = true;
<link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true; recommendedOptimisation = true;
<link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true; recommendedGzipSettings = true;
<link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true; recommendedProxySettings = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = { virtualHosts = {
"${config.networking.domain}" = { <co xml:id='ex-matrix-synapse-dns' /> # If the A and AAAA DNS records on example.org do not point on the same host as the
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; # records for myhostname.example.org, you can easily move the /.well-known
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; # virtualHost section of the code to the host that is serving example.org, while
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/server".extraConfig</link> = mkWellKnown serverConfig; <co xml:id='ex-matrix-synapse-well-known-server' /> # the rest stays on myhostname.example.org with no other changes required.
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/client".extraConfig</link> = mkWellKnown clientConfig; <co xml:id='ex-matrix-synapse-well-known-client' /> # This pattern also allows to seamlessly move the homeserver from
# myhostname.example.org to myotherhost.example.org by only changing the
# /.well-known redirection target.
&quot;${config.networking.domain}&quot; = {
enableACME = true;
forceSSL = true;
# This section is not needed if the server_name of matrix-synapse is equal to
# the domain (i.e. example.org from @foo:example.org) and the federation port
# is 8448.
# Further reference can be found in the docs about delegation under
# https://matrix-org.github.io/synapse/latest/delegate.html
locations.&quot;= /.well-known/matrix/server&quot;.extraConfig = mkWellKnown serverConfig;
# This is usually needed for homeserver discovery (from e.g. other Matrix clients).
# Further reference can be found in the upstream docs at
# https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient
locations.&quot;= /.well-known/matrix/client&quot;.extraConfig = mkWellKnown clientConfig;
}; };
"${fqdn}" = { &quot;${fqdn}&quot; = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; enableACME = true;
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; forceSSL = true;
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."/".extraConfig</link> = '' <co xml:id='ex-matrix-synapse-rev-default' /> # It's also possible to do a redirect here or something else, this vhost is not
# needed for Matrix. It's recommended though to *not put* element
# here, see also the section about Element.
locations.&quot;/&quot;.extraConfig = ''
return 404; return 404;
''; '';
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">locations."/_matrix".proxyPass</link> = "http://[::1]:8008"; <co xml:id='ex-matrix-synapse-rev-proxy-pass' /> # Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">locations."/_synapse/client".proxyPass</link> = "http://[::1]:8008"; <co xml:id='ex-matrix-synapse-rev-client' /> # *must not* be used here.
locations.&quot;/_matrix&quot;.proxyPass = &quot;http://[::1]:8008&quot;;
# Forward requests for e.g. SSO and password-resets.
locations.&quot;/_synapse/client&quot;.proxyPass = &quot;http://[::1]:8008&quot;;
}; };
}; };
}; };
services.matrix-synapse = { services.matrix-synapse = {
<link linkend="opt-services.matrix-synapse.enable">enable</link> = true; enable = true;
<link linkend="opt-services.matrix-synapse.settings.server_name">settings.server_name</link> = config.networking.domain; settings.server_name = config.networking.domain;
<link linkend="opt-services.matrix-synapse.settings.listeners">settings.listeners</link> = [ settings.listeners = [
{ <link linkend="opt-services.matrix-synapse.settings.listeners._.port">port</link> = 8008; { port = 8008;
<link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_addresses</link> = [ "::1" ]; bind_addresses = [ &quot;::1&quot; ];
<link linkend="opt-services.matrix-synapse.settings.listeners._.type">type</link> = "http"; type = &quot;http&quot;;
<link linkend="opt-services.matrix-synapse.settings.listeners._.tls">tls</link> = false; tls = false;
<link linkend="opt-services.matrix-synapse.settings.listeners._.x_forwarded">x_forwarded</link> = true; x_forwarded = true;
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources">resources</link> = [ { resources = [ {
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.names">names</link> = [ "client" "federation" ]; names = [ &quot;client&quot; &quot;federation&quot; ];
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.compress">compress</link> = true; compress = true;
} ]; } ];
} }
]; ];
}; };
} }
</programlisting> </programlisting>
</para> </section>
<calloutlist> <section xml:id="module-services-matrix-register-users">
<callout arearefs='ex-matrix-synapse-dns'> <title>Registering Matrix users</title>
<para> <para>
If the <code>A</code> and <code>AAAA</code> DNS records on If you want to run a server with public registration by anybody,
<literal>example.org</literal> do not point on the same host as the records you can then enable
for <code>myhostname.example.org</code>, you can easily move the <literal>services.matrix-synapse.settings.enable_registration = true;</literal>.
<code>/.well-known</code> virtualHost section of the code to the host that Otherwise, or you can generate a registration secret with
is serving <literal>example.org</literal>, while the rest stays on <command>pwgen -s 64 1</command> and set it with
<literal>myhostname.example.org</literal> with no other changes required. <xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />.
This pattern also allows to seamlessly move the homeserver from To create a new user or admin, run the following after you have
<literal>myhostname.example.org</literal> to set the secret and have rebuilt NixOS:
<literal>myotherhost.example.org</literal> by only changing the
<code>/.well-known</code> redirection target.
</para> </para>
</callout> <programlisting>
<callout arearefs='ex-matrix-synapse-well-known-server'> $ nix-shell -p matrix-synapse
<para> $ register_new_matrix_user -k your-registration-shared-secret http://localhost:8008
This section is not needed if the <link linkend="opt-services.matrix-synapse.settings.server_name">server_name</link> New user localpart: your-username
of <package>matrix-synapse</package> is equal to the domain (i.e. Password:
<literal>example.org</literal> from <literal>@foo:example.org</literal>) Confirm password:
and the federation port is 8448. Make admin [no]:
Further reference can be found in the <link xlink:href="https://matrix-org.github.io/synapse/latest/delegate.html">docs
about delegation</link>.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-well-known-client'>
<para>
This is usually needed for homeserver discovery (from e.g. other Matrix clients).
Further reference can be found in the <link xlink:href="https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient">upstream docs</link>
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-default'>
<para>
It's also possible to do a redirect here or something else, this vhost is not
needed for Matrix. It's recommended though to <emphasis>not put</emphasis> element
here, see also the <link linkend='ex-matrix-synapse-rev-default'>section about Element</link>.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-proxy-pass'>
<para>
Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
<emphasis>must not</emphasis> be used here.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-client'>
<para>
Forward requests for e.g. SSO and password-resets.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="module-services-matrix-register-users">
<title>Registering Matrix users</title>
<para>
If you want to run a server with public registration by anybody, you can
then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.settings.enable_registration</link> =
true;</literal>. Otherwise, or you can generate a registration secret with
<command>pwgen -s 64 1</command> and set it with
<option><link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">services.matrix-synapse.settings.registration_shared_secret</link></option>.
To create a new user or admin, run the following after you have set the secret
and have rebuilt NixOS:
<screen>
<prompt>$ </prompt>nix-shell -p matrix-synapse
<prompt>$ </prompt>register_new_matrix_user -k <replaceable>your-registration-shared-secret</replaceable> http://localhost:8008
<prompt>New user localpart: </prompt><replaceable>your-username</replaceable>
<prompt>Password:</prompt>
<prompt>Confirm password:</prompt>
<prompt>Make admin [no]:</prompt>
Success! Success!
</screen> </programlisting>
In the example, this would create a user with the Matrix Identifier
<literal>@your-username:example.org</literal>.
<warning>
<para> <para>
When using <xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />, the secret In the example, this would create a user with the Matrix
will end up in the world-readable store. Instead it's recommended to deploy the secret Identifier <literal>@your-username:example.org</literal>.
in an additional file like this: </para>
<itemizedlist> <warning>
<listitem> <para>
<para> When using
Create a file with the following contents: <xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />,
<programlisting>registration_shared_secret: your-very-secret-secret</programlisting> the secret will end up in the world-readable store. Instead its
</para> recommended to deploy the secret in an additional file like
</listitem> this:
<listitem> </para>
<para> <itemizedlist>
Deploy the file with a secret-manager such as <link xlink:href="https://nixops.readthedocs.io/en/latest/overview.html#managing-keys"><option>deployment.keys</option></link> <listitem>
from <citerefentry><refentrytitle>nixops</refentrytitle><manvolnum>1</manvolnum></citerefentry> <para>
or <link xlink:href="https://github.com/Mic92/sops-nix/">sops-nix</link> to Create a file with the following contents:
e.g. <filename>/run/secrets/matrix-shared-secret</filename> and ensure that it's readable </para>
by <package>matrix-synapse</package>. <programlisting>
</para> registration_shared_secret: your-very-secret-secret
</listitem> </programlisting>
<listitem> </listitem>
<para> <listitem>
Include the file like this in your configuration: <para>
<programlisting> Deploy the file with a secret-manager such as
<link xlink:href="https://nixops.readthedocs.io/en/latest/overview.html#managing-keys"><option>deployment.keys</option></link>
from
<citerefentry><refentrytitle>nixops</refentrytitle><manvolnum>1</manvolnum></citerefentry>
or
<link xlink:href="https://github.com/Mic92/sops-nix/">sops-nix</link>
to e.g.
<filename>/run/secrets/matrix-shared-secret</filename> and
ensure that its readable by
<literal>matrix-synapse</literal>.
</para>
</listitem>
<listitem>
<para>
Include the file like this in your configuration:
</para>
<programlisting>
{ {
<xref linkend="opt-services.matrix-synapse.extraConfigFiles" /> = [ services.matrix-synapse.extraConfigFiles = [
"/run/secrets/matrix-shared-secret" &quot;/run/secrets/matrix-shared-secret&quot;
]; ];
} }
</programlisting> </programlisting>
</para> </listitem>
</listitem> </itemizedlist>
</itemizedlist> </warning>
<note>
<para>
Its also possible to user alternative authentication mechanism
such as
<link xlink:href="https://github.com/matrix-org/matrix-synapse-ldap3">LDAP
(via <literal>matrix-synapse-ldap3</literal>)</link> or
<link xlink:href="https://matrix-org.github.io/synapse/latest/openid.html">OpenID</link>.
</para>
</note>
</section>
<section xml:id="module-services-matrix-element-web">
<title>Element (formerly known as Riot) Web Client</title>
<para>
<link xlink:href="https://github.com/vector-im/riot-web/">Element
Web</link> is the reference web client for Matrix and developed by
the core team at matrix.org. Element was formerly known as
Riot.im, see the
<link xlink:href="https://element.io/blog/welcome-to-element/">Element
introductory blog post</link> for more information. The following
snippet can be optionally added to the code before to complete the
synapse installation with a web client served at
<literal>https://element.myhostname.example.org</literal> and
<literal>https://element.example.org</literal>. Alternatively, you
can use the hosted copy at
<link xlink:href="https://app.element.io/">https://app.element.io/</link>,
or use other web clients or native client applications. Due to the
<literal>/.well-known</literal> urls set up done above, many
clients should fill in the required connection details
automatically when you enter your Matrix Identifier. See
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> for a list of existing clients and their
supported featureset.
</para> </para>
</warning> <programlisting>
</para>
<note>
<para>
It's also possible to user alternative authentication mechanism such as
<link xlink:href="https://github.com/matrix-org/matrix-synapse-ldap3">LDAP (via <literal>matrix-synapse-ldap3</literal>)</link>
or <link xlink:href="https://matrix-org.github.io/synapse/latest/openid.html">OpenID</link>.
</para>
</note>
</section>
<section xml:id="module-services-matrix-element-web">
<title>Element (formerly known as Riot) Web Client</title>
<para>
<link xlink:href="https://github.com/vector-im/riot-web/">Element Web</link> is
the reference web client for Matrix and developed by the core team at
matrix.org. Element was formerly known as Riot.im, see the
<link xlink:href="https://element.io/blog/welcome-to-element/">Element introductory blog post</link>
for more information. The following snippet can be optionally added to the code before
to complete the synapse installation with a web client served at
<code>https://element.myhostname.example.org</code> and
<code>https://element.example.org</code>. Alternatively, you can use the hosted
copy at <link xlink:href="https://app.element.io/">https://app.element.io/</link>,
or use other web clients or native client applications. Due to the
<literal>/.well-known</literal> urls set up done above, many clients should
fill in the required connection details automatically when you enter your
Matrix Identifier. See
<link xlink:href="https://matrix.org/docs/projects/try-matrix-now.html">Try
Matrix Now!</link> for a list of existing clients and their supported
featureset.
<programlisting>
{ {
services.nginx.virtualHosts."element.${fqdn}" = { services.nginx.virtualHosts.&quot;element.${fqdn}&quot; = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; enableACME = true;
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; forceSSL = true;
<link linkend="opt-services.nginx.virtualHosts._name_.serverAliases">serverAliases</link> = [ serverAliases = [
"element.${config.networking.domain}" &quot;element.${config.networking.domain}&quot;
]; ];
<link linkend="opt-services.nginx.virtualHosts._name_.root">root</link> = pkgs.element-web.override { root = pkgs.element-web.override {
conf = { conf = {
default_server_config = clientConfig; # see `clientConfig` from the snippet above. default_server_config = clientConfig; # see `clientConfig` from the snippet above.
}; };
@ -258,19 +245,19 @@ Success!
}; };
} }
</programlisting> </programlisting>
</para> <note>
<para>
<note> The Element developers do not recommend running Element and your
<para> Matrix homeserver on the same fully-qualified domain name for
The Element developers do not recommend running Element and your Matrix security reasons. In the example, this means that you should not
homeserver on the same fully-qualified domain name for security reasons. In reuse the <literal>myhostname.example.org</literal> virtualHost
the example, this means that you should not reuse the to also serve Element, but instead serve it on a different
<literal>myhostname.example.org</literal> virtualHost to also serve Element, subdomain, like <literal>element.example.org</literal> in the
but instead serve it on a different subdomain, like example. See the
<literal>element.example.org</literal> in the example. See the <link xlink:href="https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes">Element
<link xlink:href="https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes">Element Important Security Notes</link> for more information on this
Important Security Notes</link> for more information on this subject. subject.
</para> </para>
</note> </note>
</section> </section>
</chapter> </chapter>

View file

@ -254,6 +254,12 @@ in {
''; '';
}; };
ignoreLid = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc "Treat outputs as connected even if their lids are closed";
};
hooks = mkOption { hooks = mkOption {
type = hooksModule; type = hooksModule;
description = lib.mdDoc "Global hook scripts"; description = lib.mdDoc "Global hook scripts";
@ -340,7 +346,13 @@ in {
startLimitIntervalSec = 5; startLimitIntervalSec = 5;
startLimitBurst = 1; startLimitBurst = 1;
serviceConfig = { serviceConfig = {
ExecStart = "${pkgs.autorandr}/bin/autorandr --batch --change --default ${cfg.defaultTarget}"; ExecStart = ''
${pkgs.autorandr}/bin/autorandr \
--batch \
--change \
--default ${cfg.defaultTarget} \
${optionalString cfg.ignoreLid "--ignore-lid"}
'';
Type = "oneshot"; Type = "oneshot";
RemainAfterExit = false; RemainAfterExit = false;
KillMode = "process"; KillMode = "process";

View file

@ -175,7 +175,7 @@ in
}; };
type = mkOption { type = mkOption {
type = types.enum [ "zip" "rar" "tar" "sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" ]; type = types.enum [ "zip" "rar" "tar" "sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" "tar.zst" ];
default = "zip"; default = "zip";
description = lib.mdDoc "Archive format used to store the dump file."; description = lib.mdDoc "Archive format used to store the dump file.";
}; };
@ -468,12 +468,14 @@ in
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -" "d '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -" "d '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -" "d '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/data' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -" "d '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} gitea - -" "z '${cfg.stateDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} gitea - -" "z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -" "z '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -" "z '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -" "z '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/data' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -" "z '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"Z '${cfg.stateDir}' - ${cfg.user} gitea - -" "Z '${cfg.stateDir}' - ${cfg.user} gitea - -"
@ -633,7 +635,6 @@ in
systemd.services.gitea-dump = mkIf cfg.dump.enable { systemd.services.gitea-dump = mkIf cfg.dump.enable {
description = "gitea dump"; description = "gitea dump";
after = [ "gitea.service" ]; after = [ "gitea.service" ];
wantedBy = [ "default.target" ];
path = [ gitea ]; path = [ gitea ];
environment = { environment = {

View file

@ -0,0 +1,112 @@
# GitLab {#module-services-gitlab}
GitLab is a feature-rich git hosting service.
## Prerequisites {#module-services-gitlab-prerequisites}
The `gitlab` service exposes only an Unix socket at
`/run/gitlab/gitlab-workhorse.socket`. You need to
configure a webserver to proxy HTTP requests to the socket.
For instance, the following configuration could be used to use nginx as
frontend proxy:
```
services.nginx = {
enable = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
virtualHosts."git.example.com" = {
enableACME = true;
forceSSL = true;
locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
};
};
```
## Configuring {#module-services-gitlab-configuring}
GitLab depends on both PostgreSQL and Redis and will automatically enable
both services. In the case of PostgreSQL, a database and a role will be
created.
The default state dir is `/var/gitlab/state`. This is where
all data like the repositories and uploads will be stored.
A basic configuration with some custom settings could look like this:
```
services.gitlab = {
enable = true;
databasePasswordFile = "/var/keys/gitlab/db_password";
initialRootPasswordFile = "/var/keys/gitlab/root_password";
https = true;
host = "git.example.com";
port = 443;
user = "git";
group = "git";
smtp = {
enable = true;
address = "localhost";
port = 25;
};
secrets = {
dbFile = "/var/keys/gitlab/db";
secretFile = "/var/keys/gitlab/secret";
otpFile = "/var/keys/gitlab/otp";
jwsFile = "/var/keys/gitlab/jws";
};
extraConfig = {
gitlab = {
email_from = "gitlab-no-reply@example.com";
email_display_name = "Example GitLab";
email_reply_to = "gitlab-no-reply@example.com";
default_projects_features = { builds = false; };
};
};
};
```
If you're setting up a new GitLab instance, generate new
secrets. You for instance use
`tr -dc A-Za-z0-9 < /dev/urandom | head -c 128 > /var/keys/gitlab/db` to
generate a new db secret. Make sure the files can be read by, and
only by, the user specified by
[services.gitlab.user](#opt-services.gitlab.user). GitLab
encrypts sensitive data stored in the database. If you're restoring
an existing GitLab instance, you must specify the secrets secret
from `config/secrets.yml` located in your GitLab
state folder.
When `incoming_mail.enabled` is set to `true`
in [extraConfig](#opt-services.gitlab.extraConfig) an additional
service called `gitlab-mailroom` is enabled for fetching incoming mail.
Refer to [](#ch-options) for all available configuration
options for the [services.gitlab](#opt-services.gitlab.enable) module.
## Maintenance {#module-services-gitlab-maintenance}
### Backups {#module-services-gitlab-maintenance-backups}
Backups can be configured with the options in
[services.gitlab.backup](#opt-services.gitlab.backup.keepTime). Use
the [services.gitlab.backup.startAt](#opt-services.gitlab.backup.startAt)
option to configure regular backups.
To run a manual backup, start the `gitlab-backup` service:
```ShellSession
$ systemctl start gitlab-backup.service
```
### Rake tasks {#module-services-gitlab-maintenance-rake}
You can run GitLab's rake tasks with `gitlab-rake`
which will be available on the system when GitLab is enabled. You
will have to run the command as the user that you configured to run
GitLab with.
A list of all available rake tasks can be obtained by running:
```ShellSession
$ sudo -u git -H gitlab-rake -T
```

View file

@ -40,6 +40,7 @@ let
gitalyToml = pkgs.writeText "gitaly.toml" '' gitalyToml = pkgs.writeText "gitaly.toml" ''
socket_path = "${lib.escape ["\""] gitalySocket}" socket_path = "${lib.escape ["\""] gitalySocket}"
runtime_dir = "/run/gitaly"
bin_dir = "${cfg.packages.gitaly}/bin" bin_dir = "${cfg.packages.gitaly}/bin"
prometheus_listen_addr = "localhost:9236" prometheus_listen_addr = "localhost:9236"
@ -1353,6 +1354,7 @@ in {
TimeoutSec = "infinity"; TimeoutSec = "infinity";
Restart = "on-failure"; Restart = "on-failure";
WorkingDirectory = gitlabEnv.HOME; WorkingDirectory = gitlabEnv.HOME;
RuntimeDirectory = "gitaly";
ExecStart = "${cfg.packages.gitaly}/bin/gitaly ${gitalyToml}"; ExecStart = "${cfg.packages.gitaly}/bin/gitaly ${gitalyToml}";
}; };
}; };

View file

@ -1,151 +1,143 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-gitlab">
version="5.0" <title>GitLab</title>
xml:id="module-services-gitlab">
<title>GitLab</title>
<para>
GitLab is a feature-rich git hosting service.
</para>
<section xml:id="module-services-gitlab-prerequisites">
<title>Prerequisites</title>
<para> <para>
The <literal>gitlab</literal> service exposes only an Unix socket at GitLab is a feature-rich git hosting service.
<literal>/run/gitlab/gitlab-workhorse.socket</literal>. You need to
configure a webserver to proxy HTTP requests to the socket.
</para> </para>
<section xml:id="module-services-gitlab-prerequisites">
<para> <title>Prerequisites</title>
For instance, the following configuration could be used to use nginx as <para>
frontend proxy: The <literal>gitlab</literal> service exposes only an Unix socket
<programlisting> at <literal>/run/gitlab/gitlab-workhorse.socket</literal>. You
<link linkend="opt-services.nginx.enable">services.nginx</link> = { need to configure a webserver to proxy HTTP requests to the
<link linkend="opt-services.nginx.enable">enable</link> = true; socket.
<link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true; </para>
<link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true; <para>
<link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true; For instance, the following configuration could be used to use
<link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true; nginx as frontend proxy:
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link>."git.example.com" = { </para>
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; <programlisting>
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; services.nginx = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">locations."/".proxyPass</link> = "http://unix:/run/gitlab/gitlab-workhorse.socket"; enable = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
virtualHosts.&quot;git.example.com&quot; = {
enableACME = true;
forceSSL = true;
locations.&quot;/&quot;.proxyPass = &quot;http://unix:/run/gitlab/gitlab-workhorse.socket&quot;;
}; };
}; };
</programlisting> </programlisting>
</para> </section>
</section> <section xml:id="module-services-gitlab-configuring">
<section xml:id="module-services-gitlab-configuring"> <title>Configuring</title>
<title>Configuring</title> <para>
GitLab depends on both PostgreSQL and Redis and will automatically
<para> enable both services. In the case of PostgreSQL, a database and a
GitLab depends on both PostgreSQL and Redis and will automatically enable role will be created.
both services. In the case of PostgreSQL, a database and a role will be </para>
created. <para>
</para> The default state dir is <literal>/var/gitlab/state</literal>.
This is where all data like the repositories and uploads will be
<para> stored.
The default state dir is <literal>/var/gitlab/state</literal>. This is where </para>
all data like the repositories and uploads will be stored. <para>
</para> A basic configuration with some custom settings could look like
this:
<para> </para>
A basic configuration with some custom settings could look like this: <programlisting>
<programlisting>
services.gitlab = { services.gitlab = {
<link linkend="opt-services.gitlab.enable">enable</link> = true; enable = true;
<link linkend="opt-services.gitlab.databasePasswordFile">databasePasswordFile</link> = "/var/keys/gitlab/db_password"; databasePasswordFile = &quot;/var/keys/gitlab/db_password&quot;;
<link linkend="opt-services.gitlab.initialRootPasswordFile">initialRootPasswordFile</link> = "/var/keys/gitlab/root_password"; initialRootPasswordFile = &quot;/var/keys/gitlab/root_password&quot;;
<link linkend="opt-services.gitlab.https">https</link> = true; https = true;
<link linkend="opt-services.gitlab.host">host</link> = "git.example.com"; host = &quot;git.example.com&quot;;
<link linkend="opt-services.gitlab.port">port</link> = 443; port = 443;
<link linkend="opt-services.gitlab.user">user</link> = "git"; user = &quot;git&quot;;
<link linkend="opt-services.gitlab.group">group</link> = "git"; group = &quot;git&quot;;
smtp = { smtp = {
<link linkend="opt-services.gitlab.smtp.enable">enable</link> = true; enable = true;
<link linkend="opt-services.gitlab.smtp.address">address</link> = "localhost"; address = &quot;localhost&quot;;
<link linkend="opt-services.gitlab.smtp.port">port</link> = 25; port = 25;
}; };
secrets = { secrets = {
<link linkend="opt-services.gitlab.secrets.dbFile">dbFile</link> = "/var/keys/gitlab/db"; dbFile = &quot;/var/keys/gitlab/db&quot;;
<link linkend="opt-services.gitlab.secrets.secretFile">secretFile</link> = "/var/keys/gitlab/secret"; secretFile = &quot;/var/keys/gitlab/secret&quot;;
<link linkend="opt-services.gitlab.secrets.otpFile">otpFile</link> = "/var/keys/gitlab/otp"; otpFile = &quot;/var/keys/gitlab/otp&quot;;
<link linkend="opt-services.gitlab.secrets.jwsFile">jwsFile</link> = "/var/keys/gitlab/jws"; jwsFile = &quot;/var/keys/gitlab/jws&quot;;
}; };
<link linkend="opt-services.gitlab.extraConfig">extraConfig</link> = { extraConfig = {
gitlab = { gitlab = {
email_from = "gitlab-no-reply@example.com"; email_from = &quot;gitlab-no-reply@example.com&quot;;
email_display_name = "Example GitLab"; email_display_name = &quot;Example GitLab&quot;;
email_reply_to = "gitlab-no-reply@example.com"; email_reply_to = &quot;gitlab-no-reply@example.com&quot;;
default_projects_features = { builds = false; }; default_projects_features = { builds = false; };
}; };
}; };
}; };
</programlisting> </programlisting>
</para> <para>
If youre setting up a new GitLab instance, generate new secrets.
<para> You for instance use
If you're setting up a new GitLab instance, generate new <literal>tr -dc A-Za-z0-9 &lt; /dev/urandom | head -c 128 &gt; /var/keys/gitlab/db</literal>
secrets. You for instance use <literal>tr -dc A-Za-z0-9 &lt; to generate a new db secret. Make sure the files can be read by,
/dev/urandom | head -c 128 &gt; /var/keys/gitlab/db</literal> to and only by, the user specified by
generate a new db secret. Make sure the files can be read by, and <link linkend="opt-services.gitlab.user">services.gitlab.user</link>.
only by, the user specified by <link GitLab encrypts sensitive data stored in the database. If youre
linkend="opt-services.gitlab.user">services.gitlab.user</link>. GitLab restoring an existing GitLab instance, you must specify the
encrypts sensitive data stored in the database. If you're restoring secrets secret from <literal>config/secrets.yml</literal> located
an existing GitLab instance, you must specify the secrets secret in your GitLab state folder.
from <literal>config/secrets.yml</literal> located in your GitLab </para>
state folder. <para>
</para> When <literal>incoming_mail.enabled</literal> is set to
<literal>true</literal> in
<para> <link linkend="opt-services.gitlab.extraConfig">extraConfig</link>
When <literal>incoming_mail.enabled</literal> is set to <literal>true</literal> an additional service called <literal>gitlab-mailroom</literal> is
in <link linkend="opt-services.gitlab.extraConfig">extraConfig</link> an additional enabled for fetching incoming mail.
service called <literal>gitlab-mailroom</literal> is enabled for fetching incoming mail. </para>
</para> <para>
Refer to <xref linkend="ch-options" /> for all available
<para> configuration options for the
Refer to <xref linkend="ch-options" /> for all available configuration <link linkend="opt-services.gitlab.enable">services.gitlab</link>
options for the module.
<link linkend="opt-services.gitlab.enable">services.gitlab</link> module. </para>
</para>
</section>
<section xml:id="module-services-gitlab-maintenance">
<title>Maintenance</title>
<section xml:id="module-services-gitlab-maintenance-backups">
<title>Backups</title>
<para>
Backups can be configured with the options in <link
linkend="opt-services.gitlab.backup.keepTime">services.gitlab.backup</link>. Use
the <link
linkend="opt-services.gitlab.backup.startAt">services.gitlab.backup.startAt</link>
option to configure regular backups.
</para>
<para>
To run a manual backup, start the <literal>gitlab-backup</literal> service:
<screen>
<prompt>$ </prompt>systemctl start gitlab-backup.service
</screen>
</para>
</section> </section>
<section xml:id="module-services-gitlab-maintenance">
<section xml:id="module-services-gitlab-maintenance-rake"> <title>Maintenance</title>
<title>Rake tasks</title> <section xml:id="module-services-gitlab-maintenance-backups">
<title>Backups</title>
<para> <para>
You can run GitLab's rake tasks with <literal>gitlab-rake</literal> Backups can be configured with the options in
which will be available on the system when GitLab is enabled. You <link linkend="opt-services.gitlab.backup.keepTime">services.gitlab.backup</link>.
will have to run the command as the user that you configured to run Use the
GitLab with. <link linkend="opt-services.gitlab.backup.startAt">services.gitlab.backup.startAt</link>
</para> option to configure regular backups.
</para>
<para> <para>
A list of all available rake tasks can be obtained by running: To run a manual backup, start the
<screen> <literal>gitlab-backup</literal> service:
<prompt>$ </prompt>sudo -u git -H gitlab-rake -T </para>
</screen> <programlisting>
</para> $ systemctl start gitlab-backup.service
</programlisting>
</section>
<section xml:id="module-services-gitlab-maintenance-rake">
<title>Rake tasks</title>
<para>
You can run GitLabs rake tasks with
<literal>gitlab-rake</literal> which will be available on the
system when GitLab is enabled. You will have to run the command
as the user that you configured to run GitLab with.
</para>
<para>
A list of all available rake tasks can be obtained by running:
</para>
<programlisting>
$ sudo -u git -H gitlab-rake -T
</programlisting>
</section>
</section> </section>
</section>
</chapter> </chapter>

View file

@ -59,6 +59,10 @@ in
systemPackages = [ cfg.package ]; systemPackages = [ cfg.package ];
}; };
services.ntfy-sh.settings = {
auth-file = mkDefault "/var/lib/ntfy-sh/user.db";
};
systemd.services.ntfy-sh = { systemd.services.ntfy-sh = {
description = "Push notifications server"; description = "Push notifications server";
@ -68,6 +72,7 @@ in
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/ntfy serve -c ${configuration}"; ExecStart = "${cfg.package}/bin/ntfy serve -c ${configuration}";
User = cfg.user; User = cfg.user;
StateDirectory = "ntfy-sh";
AmbientCapabilities = "CAP_NET_BIND_SERVICE"; AmbientCapabilities = "CAP_NET_BIND_SERVICE";
PrivateTmp = true; PrivateTmp = true;

View file

@ -175,7 +175,7 @@ in
description = "Take snapper snapshot of root on boot"; description = "Take snapper snapshot of root on boot";
inherit documentation; inherit documentation;
serviceConfig.ExecStart = "${pkgs.snapper}/bin/snapper --config root create --cleanup-algorithm number --description boot"; serviceConfig.ExecStart = "${pkgs.snapper}/bin/snapper --config root create --cleanup-algorithm number --description boot";
serviceConfig.type = "oneshot"; serviceConfig.Type = "oneshot";
requires = [ "local-fs.target" ]; requires = [ "local-fs.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
unitConfig.ConditionPathExists = "/etc/snapper/configs/root"; unitConfig.ConditionPathExists = "/etc/snapper/configs/root";

View file

@ -0,0 +1,93 @@
# Sourcehut {#module-services-sourcehut}
[Sourcehut](https://sr.ht.com/) is an open-source,
self-hostable software development platform. The server setup can be automated using
[services.sourcehut](#opt-services.sourcehut.enable).
## Basic usage {#module-services-sourcehut-basic-usage}
Sourcehut is a Python and Go based set of applications.
This NixOS module also provides basic configuration integrating Sourcehut into locally running
`services.nginx`, `services.redis.servers.sourcehut`, `services.postfix`
and `services.postgresql` services.
A very basic configuration may look like this:
```
{ pkgs, ... }:
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
in join config.networking.hostName config.networking.domain;
in {
networking = {
hostName = "srht";
domain = "tld";
firewall.allowedTCPPorts = [ 22 80 443 ];
};
services.sourcehut = {
enable = true;
git.enable = true;
man.enable = true;
meta.enable = true;
nginx.enable = true;
postfix.enable = true;
postgresql.enable = true;
redis.enable = true;
settings = {
"sr.ht" = {
environment = "production";
global-domain = fqdn;
origin = "https://${fqdn}";
# Produce keys with srht-keygen from sourcehut.coresrht.
network-key = "/run/keys/path/to/network-key";
service-key = "/run/keys/path/to/service-key";
};
webhooks.private-key= "/run/keys/path/to/webhook-key";
};
};
security.acme.certs."${fqdn}".extraDomainNames = [
"meta.${fqdn}"
"man.${fqdn}"
"git.${fqdn}"
];
services.nginx = {
enable = true;
# only recommendedProxySettings are strictly required, but the rest make sense as well.
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
# Settings to setup what certificates are used for which endpoint.
virtualHosts = {
"${fqdn}".enableACME = true;
"meta.${fqdn}".useACMEHost = fqdn:
"man.${fqdn}".useACMEHost = fqdn:
"git.${fqdn}".useACMEHost = fqdn:
};
};
}
```
The `hostName` option is used internally to configure the nginx
reverse-proxy. The `settings` attribute set is
used by the configuration generator and the result is placed in `/etc/sr.ht/config.ini`.
## Configuration {#module-services-sourcehut-configuration}
All configuration parameters are also stored in
`/etc/sr.ht/config.ini` which is generated by
the module and linked from the store to ensure that all values from `config.ini`
can be modified by the module.
## Using an alternative webserver as reverse-proxy (e.g. `httpd`) {#module-services-sourcehut-httpd}
By default, `nginx` is used as reverse-proxy for `sourcehut`.
However, it's possible to use e.g. `httpd` by explicitly disabling
`nginx` using [](#opt-services.nginx.enable) and fixing the
`settings`.

View file

@ -1390,6 +1390,6 @@ in
'') '')
]; ];
meta.doc = ./sourcehut.xml; meta.doc = ./default.xml;
meta.maintainers = with maintainers; [ tomberek ]; meta.maintainers = with maintainers; [ tomberek ];
} }

View file

@ -0,0 +1,113 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-sourcehut">
<title>Sourcehut</title>
<para>
<link xlink:href="https://sr.ht.com/">Sourcehut</link> is an
open-source, self-hostable software development platform. The server
setup can be automated using
<link linkend="opt-services.sourcehut.enable">services.sourcehut</link>.
</para>
<section xml:id="module-services-sourcehut-basic-usage">
<title>Basic usage</title>
<para>
Sourcehut is a Python and Go based set of applications. This NixOS
module also provides basic configuration integrating Sourcehut
into locally running <literal>services.nginx</literal>,
<literal>services.redis.servers.sourcehut</literal>,
<literal>services.postfix</literal> and
<literal>services.postgresql</literal> services.
</para>
<para>
A very basic configuration may look like this:
</para>
<programlisting>
{ pkgs, ... }:
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) &quot;.${domain}&quot;;
in join config.networking.hostName config.networking.domain;
in {
networking = {
hostName = &quot;srht&quot;;
domain = &quot;tld&quot;;
firewall.allowedTCPPorts = [ 22 80 443 ];
};
services.sourcehut = {
enable = true;
git.enable = true;
man.enable = true;
meta.enable = true;
nginx.enable = true;
postfix.enable = true;
postgresql.enable = true;
redis.enable = true;
settings = {
&quot;sr.ht&quot; = {
environment = &quot;production&quot;;
global-domain = fqdn;
origin = &quot;https://${fqdn}&quot;;
# Produce keys with srht-keygen from sourcehut.coresrht.
network-key = &quot;/run/keys/path/to/network-key&quot;;
service-key = &quot;/run/keys/path/to/service-key&quot;;
};
webhooks.private-key= &quot;/run/keys/path/to/webhook-key&quot;;
};
};
security.acme.certs.&quot;${fqdn}&quot;.extraDomainNames = [
&quot;meta.${fqdn}&quot;
&quot;man.${fqdn}&quot;
&quot;git.${fqdn}&quot;
];
services.nginx = {
enable = true;
# only recommendedProxySettings are strictly required, but the rest make sense as well.
recommendedTlsSettings = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
# Settings to setup what certificates are used for which endpoint.
virtualHosts = {
&quot;${fqdn}&quot;.enableACME = true;
&quot;meta.${fqdn}&quot;.useACMEHost = fqdn:
&quot;man.${fqdn}&quot;.useACMEHost = fqdn:
&quot;git.${fqdn}&quot;.useACMEHost = fqdn:
};
};
}
</programlisting>
<para>
The <literal>hostName</literal> option is used internally to
configure the nginx reverse-proxy. The <literal>settings</literal>
attribute set is used by the configuration generator and the
result is placed in <literal>/etc/sr.ht/config.ini</literal>.
</para>
</section>
<section xml:id="module-services-sourcehut-configuration">
<title>Configuration</title>
<para>
All configuration parameters are also stored in
<literal>/etc/sr.ht/config.ini</literal> which is generated by the
module and linked from the store to ensure that all values from
<literal>config.ini</literal> can be modified by the module.
</para>
</section>
<section xml:id="module-services-sourcehut-httpd">
<title>Using an alternative webserver as reverse-proxy (e.g.
<literal>httpd</literal>)</title>
<para>
By default, <literal>nginx</literal> is used as reverse-proxy for
<literal>sourcehut</literal>. However, its possible to use e.g.
<literal>httpd</literal> by explicitly disabling
<literal>nginx</literal> using
<xref linkend="opt-services.nginx.enable" /> and fixing the
<literal>settings</literal>.
</para>
</section>
</chapter>

View file

@ -1,119 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="module-services-sourcehut">
<title>Sourcehut</title>
<para>
<link xlink:href="https://sr.ht.com/">Sourcehut</link> is an open-source,
self-hostable software development platform. The server setup can be automated using
<link linkend="opt-services.sourcehut.enable">services.sourcehut</link>.
</para>
<section xml:id="module-services-sourcehut-basic-usage">
<title>Basic usage</title>
<para>
Sourcehut is a Python and Go based set of applications.
This NixOS module also provides basic configuration integrating Sourcehut into locally running
<literal><link linkend="opt-services.nginx.enable">services.nginx</link></literal>,
<literal><link linkend="opt-services.redis.servers">services.redis.servers.sourcehut</link></literal>,
<literal><link linkend="opt-services.postfix.enable">services.postfix</link></literal>
and
<literal><link linkend="opt-services.postgresql.enable">services.postgresql</link></literal> services.
</para>
<para>
A very basic configuration may look like this:
<programlisting>
{ pkgs, ... }:
let
fqdn =
let
join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
in join config.networking.hostName config.networking.domain;
in {
networking = {
<link linkend="opt-networking.hostName">hostName</link> = "srht";
<link linkend="opt-networking.domain">domain</link> = "tld";
<link linkend="opt-networking.firewall.allowedTCPPorts">firewall.allowedTCPPorts</link> = [ 22 80 443 ];
};
services.sourcehut = {
<link linkend="opt-services.sourcehut.enable">enable</link> = true;
<link linkend="opt-services.sourcehut.git.enable">git.enable</link> = true;
<link linkend="opt-services.sourcehut.man.enable">man.enable</link> = true;
<link linkend="opt-services.sourcehut.meta.enable">meta.enable</link> = true;
<link linkend="opt-services.sourcehut.nginx.enable">nginx.enable</link> = true;
<link linkend="opt-services.sourcehut.postfix.enable">postfix.enable</link> = true;
<link linkend="opt-services.sourcehut.postgresql.enable">postgresql.enable</link> = true;
<link linkend="opt-services.sourcehut.redis.enable">redis.enable</link> = true;
<link linkend="opt-services.sourcehut.settings">settings</link> = {
"sr.ht" = {
environment = "production";
global-domain = fqdn;
origin = "https://${fqdn}";
# Produce keys with srht-keygen from <package>sourcehut.coresrht</package>.
network-key = "/run/keys/path/to/network-key";
service-key = "/run/keys/path/to/service-key";
};
webhooks.private-key= "/run/keys/path/to/webhook-key";
};
};
<link linkend="opt-security.acme.certs._name_.extraDomainNames">security.acme.certs."${fqdn}".extraDomainNames</link> = [
"meta.${fqdn}"
"man.${fqdn}"
"git.${fqdn}"
];
services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true;
# only recommendedProxySettings are strictly required, but the rest make sense as well.
<link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true;
<link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true;
<link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true;
<link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true;
# Settings to setup what certificates are used for which endpoint.
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">"${fqdn}".enableACME</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"meta.${fqdn}".useACMEHost</link> = fqdn:
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"man.${fqdn}".useACMEHost</link> = fqdn:
<link linkend="opt-services.nginx.virtualHosts._name_.useACMEHost">"git.${fqdn}".useACMEHost</link> = fqdn:
};
};
}
</programlisting>
</para>
<para>
The <literal>hostName</literal> option is used internally to configure the nginx
reverse-proxy. The <literal>settings</literal> attribute set is
used by the configuration generator and the result is placed in <literal>/etc/sr.ht/config.ini</literal>.
</para>
</section>
<section xml:id="module-services-sourcehut-configuration">
<title>Configuration</title>
<para>
All configuration parameters are also stored in
<literal>/etc/sr.ht/config.ini</literal> which is generated by
the module and linked from the store to ensure that all values from <literal>config.ini</literal>
can be modified by the module.
</para>
</section>
<section xml:id="module-services-sourcehut-httpd">
<title>Using an alternative webserver as reverse-proxy (e.g. <literal>httpd</literal>)</title>
<para>
By default, <package>nginx</package> is used as reverse-proxy for <package>sourcehut</package>.
However, it's possible to use e.g. <package>httpd</package> by explicitly disabling
<package>nginx</package> using <xref linkend="opt-services.nginx.enable" /> and fixing the
<literal>settings</literal>.
</para>
</section>
</chapter>

View file

@ -0,0 +1,93 @@
# Taskserver {#module-services-taskserver}
Taskserver is the server component of
[Taskwarrior](https://taskwarrior.org/), a free and
open source todo list application.
*Upstream documentation:* <https://taskwarrior.org/docs/#taskd>
## Configuration {#module-services-taskserver-configuration}
Taskserver does all of its authentication via TLS using client certificates,
so you either need to roll your own CA or purchase a certificate from a
known CA, which allows creation of client certificates. These certificates
are usually advertised as "server certificates".
So in order to make it easier to handle your own CA, there is a helper tool
called {command}`nixos-taskserver` which manages the custom CA along
with Taskserver organisations, users and groups.
While the client certificates in Taskserver only authenticate whether a user
is allowed to connect, every user has its own UUID which identifies it as an
entity.
With {command}`nixos-taskserver` the client certificate is created
along with the UUID of the user, so it handles all of the credentials needed
in order to setup the Taskwarrior client to work with a Taskserver.
## The nixos-taskserver tool {#module-services-taskserver-nixos-taskserver-tool}
Because Taskserver by default only provides scripts to setup users
imperatively, the {command}`nixos-taskserver` tool is used for
addition and deletion of organisations along with users and groups defined
by [](#opt-services.taskserver.organisations) and as well for
imperative set up.
The tool is designed to not interfere if the command is used to manually set
up some organisations, users or groups.
For example if you add a new organisation using {command}`nixos-taskserver
org add foo`, the organisation is not modified and deleted no
matter what you define in
{option}`services.taskserver.organisations`, even if you're adding
the same organisation in that option.
The tool is modelled to imitate the official {command}`taskd`
command, documentation for each subcommand can be shown by using the
{option}`--help` switch.
## Declarative/automatic CA management {#module-services-taskserver-declarative-ca-management}
Everything is done according to what you specify in the module options,
however in order to set up a Taskwarrior client for synchronisation with a
Taskserver instance, you have to transfer the keys and certificates to the
client machine.
This is done using {command}`nixos-taskserver user export $orgname
$username` which is printing a shell script fragment to stdout
which can either be used verbatim or adjusted to import the user on the
client machine.
For example, let's say you have the following configuration:
```ShellSession
{
services.taskserver.enable = true;
services.taskserver.fqdn = "server";
services.taskserver.listenHost = "::";
services.taskserver.organisations.my-company.users = [ "alice" ];
}
```
This creates an organisation called `my-company` with the
user `alice`.
Now in order to import the `alice` user to another machine
`alicebox`, all we need to do is something like this:
```ShellSession
$ ssh server nixos-taskserver user export my-company alice | sh
```
Of course, if no SSH daemon is available on the server you can also copy
&amp; paste it directly into a shell.
After this step the user should be set up and you can start synchronising
your tasks for the first time with {command}`task sync init` on
`alicebox`.
Subsequent synchronisation requests merely require the command {command}`task
sync` after that stage.
## Manual CA management {#module-services-taskserver-manual-ca-management}
If you set any options within
[service.taskserver.pki.manual](#opt-services.taskserver.pki.manual.ca.cert).*,
{command}`nixos-taskserver` won't issue certificates, but you can
still use it for adding or removing user accounts.

View file

@ -566,5 +566,5 @@ in {
}) })
]; ];
meta.doc = ./doc.xml; meta.doc = ./default.xml;
} }

View file

@ -0,0 +1,130 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-taskserver">
<title>Taskserver</title>
<para>
Taskserver is the server component of
<link xlink:href="https://taskwarrior.org/">Taskwarrior</link>, a
free and open source todo list application.
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://taskwarrior.org/docs/#taskd">https://taskwarrior.org/docs/#taskd</link>
</para>
<section xml:id="module-services-taskserver-configuration">
<title>Configuration</title>
<para>
Taskserver does all of its authentication via TLS using client
certificates, so you either need to roll your own CA or purchase a
certificate from a known CA, which allows creation of client
certificates. These certificates are usually advertised as
<quote>server certificates</quote>.
</para>
<para>
So in order to make it easier to handle your own CA, there is a
helper tool called <command>nixos-taskserver</command> which
manages the custom CA along with Taskserver organisations, users
and groups.
</para>
<para>
While the client certificates in Taskserver only authenticate
whether a user is allowed to connect, every user has its own UUID
which identifies it as an entity.
</para>
<para>
With <command>nixos-taskserver</command> the client certificate is
created along with the UUID of the user, so it handles all of the
credentials needed in order to setup the Taskwarrior client to
work with a Taskserver.
</para>
</section>
<section xml:id="module-services-taskserver-nixos-taskserver-tool">
<title>The nixos-taskserver tool</title>
<para>
Because Taskserver by default only provides scripts to setup users
imperatively, the <command>nixos-taskserver</command> tool is used
for addition and deletion of organisations along with users and
groups defined by
<xref linkend="opt-services.taskserver.organisations" /> and as
well for imperative set up.
</para>
<para>
The tool is designed to not interfere if the command is used to
manually set up some organisations, users or groups.
</para>
<para>
For example if you add a new organisation using
<command>nixos-taskserver org add foo</command>, the organisation
is not modified and deleted no matter what you define in
<option>services.taskserver.organisations</option>, even if youre
adding the same organisation in that option.
</para>
<para>
The tool is modelled to imitate the official
<command>taskd</command> command, documentation for each
subcommand can be shown by using the <option>--help</option>
switch.
</para>
</section>
<section xml:id="module-services-taskserver-declarative-ca-management">
<title>Declarative/automatic CA management</title>
<para>
Everything is done according to what you specify in the module
options, however in order to set up a Taskwarrior client for
synchronisation with a Taskserver instance, you have to transfer
the keys and certificates to the client machine.
</para>
<para>
This is done using
<command>nixos-taskserver user export $orgname $username</command>
which is printing a shell script fragment to stdout which can
either be used verbatim or adjusted to import the user on the
client machine.
</para>
<para>
For example, lets say you have the following configuration:
</para>
<programlisting>
{
services.taskserver.enable = true;
services.taskserver.fqdn = &quot;server&quot;;
services.taskserver.listenHost = &quot;::&quot;;
services.taskserver.organisations.my-company.users = [ &quot;alice&quot; ];
}
</programlisting>
<para>
This creates an organisation called <literal>my-company</literal>
with the user <literal>alice</literal>.
</para>
<para>
Now in order to import the <literal>alice</literal> user to
another machine <literal>alicebox</literal>, all we need to do is
something like this:
</para>
<programlisting>
$ ssh server nixos-taskserver user export my-company alice | sh
</programlisting>
<para>
Of course, if no SSH daemon is available on the server you can
also copy &amp; paste it directly into a shell.
</para>
<para>
After this step the user should be set up and you can start
synchronising your tasks for the first time with
<command>task sync init</command> on <literal>alicebox</literal>.
</para>
<para>
Subsequent synchronisation requests merely require the command
<command>task sync</command> after that stage.
</para>
</section>
<section xml:id="module-services-taskserver-manual-ca-management">
<title>Manual CA management</title>
<para>
If you set any options within
<link linkend="opt-services.taskserver.pki.manual.ca.cert">service.taskserver.pki.manual</link>.*,
<command>nixos-taskserver</command> wont issue certificates, but
you can still use it for adding or removing user accounts.
</para>
</section>
</chapter>

View file

@ -1,135 +0,0 @@
<chapter xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="5.0"
xml:id="module-services-taskserver">
<title>Taskserver</title>
<para>
Taskserver is the server component of
<link xlink:href="https://taskwarrior.org/">Taskwarrior</link>, a free and
open source todo list application.
</para>
<para>
<emphasis>Upstream documentation:</emphasis>
<link xlink:href="https://taskwarrior.org/docs/#taskd"/>
</para>
<section xml:id="module-services-taskserver-configuration">
<title>Configuration</title>
<para>
Taskserver does all of its authentication via TLS using client certificates,
so you either need to roll your own CA or purchase a certificate from a
known CA, which allows creation of client certificates. These certificates
are usually advertised as <quote>server certificates</quote>.
</para>
<para>
So in order to make it easier to handle your own CA, there is a helper tool
called <command>nixos-taskserver</command> which manages the custom CA along
with Taskserver organisations, users and groups.
</para>
<para>
While the client certificates in Taskserver only authenticate whether a user
is allowed to connect, every user has its own UUID which identifies it as an
entity.
</para>
<para>
With <command>nixos-taskserver</command> the client certificate is created
along with the UUID of the user, so it handles all of the credentials needed
in order to setup the Taskwarrior client to work with a Taskserver.
</para>
</section>
<section xml:id="module-services-taskserver-nixos-taskserver-tool">
<title>The nixos-taskserver tool</title>
<para>
Because Taskserver by default only provides scripts to setup users
imperatively, the <command>nixos-taskserver</command> tool is used for
addition and deletion of organisations along with users and groups defined
by <xref linkend="opt-services.taskserver.organisations"/> and as well for
imperative set up.
</para>
<para>
The tool is designed to not interfere if the command is used to manually set
up some organisations, users or groups.
</para>
<para>
For example if you add a new organisation using <command>nixos-taskserver
org add foo</command>, the organisation is not modified and deleted no
matter what you define in
<option>services.taskserver.organisations</option>, even if you're adding
the same organisation in that option.
</para>
<para>
The tool is modelled to imitate the official <command>taskd</command>
command, documentation for each subcommand can be shown by using the
<option>--help</option> switch.
</para>
</section>
<section xml:id="module-services-taskserver-declarative-ca-management">
<title>Declarative/automatic CA management</title>
<para>
Everything is done according to what you specify in the module options,
however in order to set up a Taskwarrior client for synchronisation with a
Taskserver instance, you have to transfer the keys and certificates to the
client machine.
</para>
<para>
This is done using <command>nixos-taskserver user export $orgname
$username</command> which is printing a shell script fragment to stdout
which can either be used verbatim or adjusted to import the user on the
client machine.
</para>
<para>
For example, let's say you have the following configuration:
<screen>
{
<xref linkend="opt-services.taskserver.enable"/> = true;
<xref linkend="opt-services.taskserver.fqdn"/> = "server";
<xref linkend="opt-services.taskserver.listenHost"/> = "::";
<link linkend="opt-services.taskserver.organisations._name_.users">services.taskserver.organisations.my-company.users</link> = [ "alice" ];
}
</screen>
This creates an organisation called <literal>my-company</literal> with the
user <literal>alice</literal>.
</para>
<para>
Now in order to import the <literal>alice</literal> user to another machine
<literal>alicebox</literal>, all we need to do is something like this:
<screen>
<prompt>$ </prompt>ssh server nixos-taskserver user export my-company alice | sh
</screen>
Of course, if no SSH daemon is available on the server you can also copy
&amp; paste it directly into a shell.
</para>
<para>
After this step the user should be set up and you can start synchronising
your tasks for the first time with <command>task sync init</command> on
<literal>alicebox</literal>.
</para>
<para>
Subsequent synchronisation requests merely require the command <command>task
sync</command> after that stage.
</para>
</section>
<section xml:id="module-services-taskserver-manual-ca-management">
<title>Manual CA management</title>
<para>
If you set any options within
<link linkend="opt-services.taskserver.pki.manual.ca.cert">service.taskserver.pki.manual</link>.*,
<command>nixos-taskserver</command> won't issue certificates, but you can
still use it for adding or removing user accounts.
</para>
</section>
</chapter>

View file

@ -0,0 +1,46 @@
# WeeChat {#module-services-weechat}
[WeeChat](https://weechat.org/) is a fast and
extensible IRC client.
## Basic Usage {#module-services-weechat-basic-usage}
By default, the module creates a
[`systemd`](https://www.freedesktop.org/wiki/Software/systemd/)
unit which runs the chat client in a detached
[`screen`](https://www.gnu.org/software/screen/)
session.
This can be done by enabling the `weechat` service:
```
{ ... }:
{
services.weechat.enable = true;
}
```
The service is managed by a dedicated user named `weechat`
in the state directory `/var/lib/weechat`.
## Re-attaching to WeeChat {#module-services-weechat-reattach}
WeeChat runs in a screen session owned by a dedicated user. To explicitly
allow your another user to attach to this session, the
`screenrc` needs to be tweaked by adding
[multiuser](https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser)
support:
```
{
programs.screen.screenrc = ''
multiuser on
acladd normal_user
'';
}
```
Now, the session can be re-attached like this:
```
screen -x weechat/weechat-screen
```
*The session name can be changed using [services.weechat.sessionName.](options.html#opt-services.weechat.sessionName)*

View file

@ -1,66 +1,63 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-weechat">
version="5.0" <title>WeeChat</title>
xml:id="module-services-weechat">
<title>WeeChat</title>
<para>
<link xlink:href="https://weechat.org/">WeeChat</link> is a fast and
extensible IRC client.
</para>
<section xml:id="module-services-weechat-basic-usage">
<title>Basic Usage</title>
<para> <para>
By default, the module creates a <link xlink:href="https://weechat.org/">WeeChat</link> is a fast and
<literal><link xlink:href="https://www.freedesktop.org/wiki/Software/systemd/">systemd</link></literal> extensible IRC client.
unit which runs the chat client in a detached
<literal><link xlink:href="https://www.gnu.org/software/screen/">screen</link></literal>
session.
</para> </para>
<section xml:id="module-services-weechat-basic-usage">
<para> <title>Basic Usage</title>
This can be done by enabling the <literal>weechat</literal> service: <para>
<programlisting> By default, the module creates a
<link xlink:href="https://www.freedesktop.org/wiki/Software/systemd/"><literal>systemd</literal></link>
unit which runs the chat client in a detached
<link xlink:href="https://www.gnu.org/software/screen/"><literal>screen</literal></link>
session.
</para>
<para>
This can be done by enabling the <literal>weechat</literal>
service:
</para>
<programlisting>
{ ... }: { ... }:
{ {
<link linkend="opt-services.weechat.enable">services.weechat.enable</link> = true; services.weechat.enable = true;
} }
</programlisting> </programlisting>
</para> <para>
The service is managed by a dedicated user named
<para> <literal>weechat</literal> in the state directory
The service is managed by a dedicated user named <literal>weechat</literal> <literal>/var/lib/weechat</literal>.
in the state directory <literal>/var/lib/weechat</literal>. </para>
</para> </section>
</section> <section xml:id="module-services-weechat-reattach">
<section xml:id="module-services-weechat-reattach"> <title>Re-attaching to WeeChat</title>
<title>Re-attaching to WeeChat</title> <para>
WeeChat runs in a screen session owned by a dedicated user. To
<para> explicitly allow your another user to attach to this session, the
WeeChat runs in a screen session owned by a dedicated user. To explicitly <literal>screenrc</literal> needs to be tweaked by adding
allow your another user to attach to this session, the <link xlink:href="https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser">multiuser</link>
<literal>screenrc</literal> needs to be tweaked by adding support:
<link xlink:href="https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser">multiuser</link> </para>
support: <programlisting>
<programlisting>
{ {
<link linkend="opt-programs.screen.screenrc">programs.screen.screenrc</link> = '' programs.screen.screenrc = ''
multiuser on multiuser on
acladd normal_user acladd normal_user
''; '';
} }
</programlisting> </programlisting>
Now, the session can be re-attached like this: <para>
<programlisting> Now, the session can be re-attached like this:
</para>
<programlisting>
screen -x weechat/weechat-screen screen -x weechat/weechat-screen
</programlisting> </programlisting>
</para> <para>
<emphasis>The session name can be changed using
<para> <link xlink:href="options.html#opt-services.weechat.sessionName">services.weechat.sessionName.</link></emphasis>
<emphasis>The session name can be changed using </para>
<link linkend="opt-services.weechat.sessionName">services.weechat.sessionName.</link></emphasis> </section>
</para>
</section>
</chapter> </chapter>

View file

@ -25,7 +25,7 @@ services.parsedmarc = {
Note that GeoIP provisioning is disabled in the example for Note that GeoIP provisioning is disabled in the example for
simplicity, but should be turned on for fully functional reports. simplicity, but should be turned on for fully functional reports.
## Local mail ## Local mail {#module-services-parsedmarc-local-mail}
Instead of watching an external inbox, a local inbox can be Instead of watching an external inbox, a local inbox can be
automatically provisioned. The recipient's name is by default set to automatically provisioned. The recipient's name is by default set to
`dmarc`, but can be configured in `dmarc`, but can be configured in
@ -49,7 +49,7 @@ services.parsedmarc = {
}; };
``` ```
## Grafana and GeoIP ## Grafana and GeoIP {#module-services-parsedmarc-grafana-geoip}
The reports can be visualized and summarized with parsedmarc's The reports can be visualized and summarized with parsedmarc's
official Grafana dashboard. For all views to work, and for the data to official Grafana dashboard. For all views to work, and for the data to
be complete, GeoIP databases are also required. The following example be complete, GeoIP databases are also required. The following example

View file

@ -539,8 +539,6 @@ in
}; };
}; };
# Don't edit the docbook xml directly, edit the md and generate it:
# `pandoc parsedmarc.md -t docbook --top-level-division=chapter --extract-media=media -f markdown+smart > parsedmarc.xml`
meta.doc = ./parsedmarc.xml; meta.doc = ./parsedmarc.xml;
meta.maintainers = [ lib.maintainers.talyz ]; meta.maintainers = [ lib.maintainers.talyz ];
} }

View file

@ -1,3 +1,5 @@
<!-- Do not edit this file directly, edit its companion .md instead
and regenerate this file using nixos/doc/manual/md-to-db.sh -->
<chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-parsedmarc"> <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-parsedmarc">
<title>parsedmarc</title> <title>parsedmarc</title>
<para> <para>
@ -15,7 +17,7 @@
email address and saves them to a local Elasticsearch instance email address and saves them to a local Elasticsearch instance
looks like this: looks like this:
</para> </para>
<programlisting> <programlisting language="nix">
services.parsedmarc = { services.parsedmarc = {
enable = true; enable = true;
settings.imap = { settings.imap = {
@ -31,7 +33,7 @@ services.parsedmarc = {
simplicity, but should be turned on for fully functional reports. simplicity, but should be turned on for fully functional reports.
</para> </para>
</section> </section>
<section xml:id="local-mail"> <section xml:id="module-services-parsedmarc-local-mail">
<title>Local mail</title> <title>Local mail</title>
<para> <para>
Instead of watching an external inbox, a local inbox can be Instead of watching an external inbox, a local inbox can be
@ -44,7 +46,7 @@ services.parsedmarc = {
email address that should be configured in the domains dmarc email address that should be configured in the domains dmarc
policy is <literal>dmarc@monitoring.example.com</literal>. policy is <literal>dmarc@monitoring.example.com</literal>.
</para> </para>
<programlisting> <programlisting language="nix">
services.parsedmarc = { services.parsedmarc = {
enable = true; enable = true;
provision = { provision = {
@ -57,7 +59,7 @@ services.parsedmarc = {
}; };
</programlisting> </programlisting>
</section> </section>
<section xml:id="grafana-and-geoip"> <section xml:id="module-services-parsedmarc-grafana-geoip">
<title>Grafana and GeoIP</title> <title>Grafana and GeoIP</title>
<para> <para>
The reports can be visualized and summarized with parsedmarcs The reports can be visualized and summarized with parsedmarcs
@ -67,7 +69,7 @@ services.parsedmarc = {
Elasticsearch instance is automatically added as a Grafana Elasticsearch instance is automatically added as a Grafana
datasource, and the dashboard is added to Grafana as well. datasource, and the dashboard is added to Grafana as well.
</para> </para>
<programlisting> <programlisting language="nix">
services.parsedmarc = { services.parsedmarc = {
enable = true; enable = true;
provision = { provision = {

View file

@ -0,0 +1,180 @@
# Prometheus exporters {#module-services-prometheus-exporters}
Prometheus exporters provide metrics for the
[prometheus monitoring system](https://prometheus.io).
## Configuration {#module-services-prometheus-exporters-configuration}
One of the most common exporters is the
[node exporter](https://github.com/prometheus/node_exporter),
it provides hardware and OS metrics from the host it's
running on. The exporter could be configured as follows:
```
services.prometheus.exporters.node = {
enable = true;
port = 9100;
enabledCollectors = [
"logind"
"systemd"
];
disabledCollectors = [
"textfile"
];
openFirewall = true;
firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
};
```
It should now serve all metrics from the collectors that are explicitly
enabled and the ones that are
[enabled by default](https://github.com/prometheus/node_exporter#enabled-by-default),
via http under `/metrics`. In this
example the firewall should just allow incoming connections to the
exporter's port on the bridge interface `br0` (this would
have to be configured separately of course). For more information about
configuration see `man configuration.nix` or search through
the [available options](https://nixos.org/nixos/options.html#prometheus.exporters).
Prometheus can now be configured to consume the metrics produced by the exporter:
```
services.prometheus = {
# ...
scrapeConfigs = [
{
job_name = "node";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
];
# ...
}
```
## Adding a new exporter {#module-services-prometheus-exporters-new-exporter}
To add a new exporter, it has to be packaged first (see
`nixpkgs/pkgs/servers/monitoring/prometheus/` for
examples), then a module can be added. The postfix exporter is used in this
example:
- Some default options for all exporters are provided by
`nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix`:
- `enable`
- `port`
- `listenAddress`
- `extraFlags`
- `openFirewall`
- `firewallFilter`
- `user`
- `group`
- As there is already a package available, the module can now be added. This
is accomplished by adding a new file to the
`nixos/modules/services/monitoring/prometheus/exporters/`
directory, which will be called postfix.nix and contains all exporter
specific options and configuration:
```
# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
{ config, lib, pkgs, options }:
with lib;
let
# for convenience we define cfg here
cfg = config.services.prometheus.exporters.postfix;
in
{
port = 9154; # The postfix exporter listens on this port by default
# `extraOpts` is an attribute set which contains additional options
# (and optional overrides for default options).
# Note that this attribute is optional.
extraOpts = {
telemetryPath = mkOption {
type = types.str;
default = "/metrics";
description = ''
Path under which to expose metrics.
'';
};
logfilePath = mkOption {
type = types.path;
default = /var/log/postfix_exporter_input.log;
example = /var/log/mail.log;
description = ''
Path where Postfix writes log entries.
This file will be truncated by this exporter!
'';
};
showqPath = mkOption {
type = types.path;
default = /var/spool/postfix/public/showq;
example = /var/lib/postfix/queue/public/showq;
description = ''
Path at which Postfix places its showq socket.
'';
};
};
# `serviceOpts` is an attribute set which contains configuration
# for the exporter's systemd service. One of
# `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
# has to be specified here. This will be merged with the default
# service configuration.
# Note that by default 'DynamicUser' is 'true'.
serviceOpts = {
serviceConfig = {
DynamicUser = false;
ExecStart = ''
${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
--web.telemetry-path ${cfg.telemetryPath} \
${concatStringsSep " \\\n " cfg.extraFlags}
'';
};
};
}
```
- This should already be enough for the postfix exporter. Additionally one
could now add assertions and conditional default values. This can be done
in the 'meta-module' that combines all exporter definitions and generates
the submodules:
`nixpkgs/nixos/modules/services/prometheus/exporters.nix`
## Updating an exporter module {#module-services-prometheus-exporters-update-exporter-module}
Should an exporter option change at some point, it is possible to add
information about the change to the exporter definition similar to
`nixpkgs/nixos/modules/rename.nix`:
```
{ config, lib, pkgs, options }:
with lib;
let
cfg = config.services.prometheus.exporters.nginx;
in
{
port = 9113;
extraOpts = {
# additional module options
# ...
};
serviceOpts = {
# service configuration
# ...
};
imports = [
# 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath'
(mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
# removed option 'services.prometheus.exporters.nginx.insecure'
(mkRemovedOptionModule [ "insecure" ] ''
This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
'')
({ options.warnings = options.warnings; })
];
}
```

View file

@ -1,138 +1,135 @@
<chapter xmlns="http://docbook.org/ns/docbook" <!-- Do not edit this file directly, edit its companion .md instead
xmlns:xlink="http://www.w3.org/1999/xlink" and regenerate this file using nixos/doc/manual/md-to-db.sh -->
xmlns:xi="http://www.w3.org/2001/XInclude" <chapter xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xml:id="module-services-prometheus-exporters">
version="5.0" <title>Prometheus exporters</title>
xml:id="module-services-prometheus-exporters">
<title>Prometheus exporters</title>
<para>
Prometheus exporters provide metrics for the
<link xlink:href="https://prometheus.io">prometheus monitoring system</link>.
</para>
<section xml:id="module-services-prometheus-exporters-configuration">
<title>Configuration</title>
<para> <para>
One of the most common exporters is the Prometheus exporters provide metrics for the
<link xlink:href="https://github.com/prometheus/node_exporter">node <link xlink:href="https://prometheus.io">prometheus monitoring
exporter</link>, it provides hardware and OS metrics from the host it's system</link>.
running on. The exporter could be configured as follows: </para>
<programlisting> <section xml:id="module-services-prometheus-exporters-configuration">
<title>Configuration</title>
<para>
One of the most common exporters is the
<link xlink:href="https://github.com/prometheus/node_exporter">node
exporter</link>, it provides hardware and OS metrics from the host
its running on. The exporter could be configured as follows:
</para>
<programlisting>
services.prometheus.exporters.node = { services.prometheus.exporters.node = {
enable = true; enable = true;
port = 9100; port = 9100;
enabledCollectors = [ enabledCollectors = [
"logind" &quot;logind&quot;
"systemd" &quot;systemd&quot;
]; ];
disabledCollectors = [ disabledCollectors = [
"textfile" &quot;textfile&quot;
]; ];
openFirewall = true; openFirewall = true;
firewallFilter = "-i br0 -p tcp -m tcp --dport 9100"; firewallFilter = &quot;-i br0 -p tcp -m tcp --dport 9100&quot;;
}; };
</programlisting> </programlisting>
It should now serve all metrics from the collectors that are explicitly <para>
enabled and the ones that are It should now serve all metrics from the collectors that are
<link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled explicitly enabled and the ones that are
by default</link>, via http under <literal>/metrics</literal>. In this <link xlink:href="https://github.com/prometheus/node_exporter#enabled-by-default">enabled
example the firewall should just allow incoming connections to the by default</link>, via http under <literal>/metrics</literal>. In
exporter's port on the bridge interface <literal>br0</literal> (this would this example the firewall should just allow incoming connections
have to be configured separately of course). For more information about to the exporters port on the bridge interface
configuration see <literal>man configuration.nix</literal> or search through <literal>br0</literal> (this would have to be configured
the separately of course). For more information about configuration
<link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available see <literal>man configuration.nix</literal> or search through the
options</link>. <link xlink:href="https://nixos.org/nixos/options.html#prometheus.exporters">available
</para> options</link>.
</para>
<para> <para>
Prometheus can now be configured to consume the metrics produced by the exporter: Prometheus can now be configured to consume the metrics produced
by the exporter:
</para>
<programlisting> <programlisting>
services.prometheus = { services.prometheus = {
# ... # ...
scrapeConfigs = [ scrapeConfigs = [
{ {
job_name = "node"; job_name = &quot;node&quot;;
static_configs = [{ static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ]; targets = [ &quot;localhost:${toString config.services.prometheus.exporters.node.port}&quot; ];
}]; }];
} }
]; ];
# ... # ...
} }
</programlisting> </programlisting>
</para> </section>
</section> <section xml:id="module-services-prometheus-exporters-new-exporter">
<section xml:id="module-services-prometheus-exporters-new-exporter"> <title>Adding a new exporter</title>
<title>Adding a new exporter</title>
<para>
To add a new exporter, it has to be packaged first (see
<literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for
examples), then a module can be added. The postfix exporter is used in this
example:
</para>
<itemizedlist>
<listitem>
<para> <para>
Some default options for all exporters are provided by To add a new exporter, it has to be packaged first (see
<literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>: <literal>nixpkgs/pkgs/servers/monitoring/prometheus/</literal> for
examples), then a module can be added. The postfix exporter is
used in this example:
</para> </para>
</listitem>
<listitem override='none'>
<itemizedlist> <itemizedlist>
<listitem> <listitem>
<para> <para>
<literal>enable</literal> Some default options for all exporters are provided by
</para> <literal>nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix</literal>:
</listitem> </para>
<listitem> <itemizedlist spacing="compact">
<para> <listitem>
<literal>port</literal> <para>
</para> <literal>enable</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>listenAddress</literal> <para>
</para> <literal>port</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>extraFlags</literal> <para>
</para> <literal>listenAddress</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>openFirewall</literal> <para>
</para> <literal>extraFlags</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>firewallFilter</literal> <para>
</para> <literal>openFirewall</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>user</literal> <para>
</para> <literal>firewallFilter</literal>
</listitem> </para>
<listitem> </listitem>
<para> <listitem>
<literal>group</literal> <para>
</para> <literal>user</literal>
</listitem> </para>
</itemizedlist> </listitem>
</listitem> <listitem>
<listitem> <para>
<para> <literal>group</literal>
As there is already a package available, the module can now be added. This </para>
is accomplished by adding a new file to the </listitem>
<literal>nixos/modules/services/monitoring/prometheus/exporters/</literal> </itemizedlist>
directory, which will be called postfix.nix and contains all exporter </listitem>
specific options and configuration: <listitem>
<programlisting> <para>
As there is already a package available, the module can now be
added. This is accomplished by adding a new file to the
<literal>nixos/modules/services/monitoring/prometheus/exporters/</literal>
directory, which will be called postfix.nix and contains all
exporter specific options and configuration:
</para>
<programlisting>
# nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix # nixpgs/nixos/modules/services/prometheus/exporters/postfix.nix
{ config, lib, pkgs, options }: { config, lib, pkgs, options }:
@ -151,7 +148,7 @@ in
extraOpts = { extraOpts = {
telemetryPath = mkOption { telemetryPath = mkOption {
type = types.str; type = types.str;
default = "/metrics"; default = &quot;/metrics&quot;;
description = '' description = ''
Path under which to expose metrics. Path under which to expose metrics.
''; '';
@ -188,32 +185,33 @@ in
${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \ ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
--web.listen-address ${cfg.listenAddress}:${toString cfg.port} \ --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
--web.telemetry-path ${cfg.telemetryPath} \ --web.telemetry-path ${cfg.telemetryPath} \
${concatStringsSep " \\\n " cfg.extraFlags} ${concatStringsSep &quot; \\\n &quot; cfg.extraFlags}
''; '';
}; };
}; };
} }
</programlisting> </programlisting>
</para> </listitem>
</listitem> <listitem>
<listitem> <para>
This should already be enough for the postfix exporter.
Additionally one could now add assertions and conditional
default values. This can be done in the
<quote>meta-module</quote> that combines all exporter
definitions and generates the submodules:
<literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
</para>
</listitem>
</itemizedlist>
</section>
<section xml:id="module-services-prometheus-exporters-update-exporter-module">
<title>Updating an exporter module</title>
<para> <para>
This should already be enough for the postfix exporter. Additionally one Should an exporter option change at some point, it is possible to
could now add assertions and conditional default values. This can be done add information about the change to the exporter definition
in the 'meta-module' that combines all exporter definitions and generates similar to <literal>nixpkgs/nixos/modules/rename.nix</literal>:
the submodules:
<literal>nixpkgs/nixos/modules/services/prometheus/exporters.nix</literal>
</para> </para>
</listitem> <programlisting>
</itemizedlist>
</section>
<section xml:id="module-services-prometheus-exporters-update-exporter-module">
<title>Updating an exporter module</title>
<para>
Should an exporter option change at some point, it is possible to add
information about the change to the exporter definition similar to
<literal>nixpkgs/nixos/modules/rename.nix</literal>:
<programlisting>
{ config, lib, pkgs, options }: { config, lib, pkgs, options }:
with lib; with lib;
@ -232,17 +230,16 @@ in
# ... # ...
}; };
imports = [ imports = [
# 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath' # 'services.prometheus.exporters.nginx.telemetryEndpoint' -&gt; 'services.prometheus.exporters.nginx.telemetryPath'
(mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ]) (mkRenamedOptionModule [ &quot;telemetryEndpoint&quot; ] [ &quot;telemetryPath&quot; ])
# removed option 'services.prometheus.exporters.nginx.insecure' # removed option 'services.prometheus.exporters.nginx.insecure'
(mkRemovedOptionModule [ "insecure" ] '' (mkRemovedOptionModule [ &quot;insecure&quot; ] ''
This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true. This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
'') '')
({ options.warnings = options.warnings; }) ({ options.warnings = options.warnings; })
]; ];
} }
</programlisting> </programlisting>
</para>
</section> </section>
</chapter> </chapter>

View file

@ -9,7 +9,7 @@ let
pkgs.writeText "rspamd-exporter-config.yml" (builtins.toJSON conf); pkgs.writeText "rspamd-exporter-config.yml" (builtins.toJSON conf);
generateConfig = extraLabels: { generateConfig = extraLabels: {
metrics = (map (path: { modules.default.metrics = (map (path: {
name = "rspamd_${replaceStrings [ "[" "." " " "]" "\\" "'" ] [ "_" "_" "_" "" "" "" ] path}"; name = "rspamd_${replaceStrings [ "[" "." " " "]" "\\" "'" ] [ "_" "_" "_" "" "" "" ] path}";
path = "{ .${path} }"; path = "{ .${path} }";
labels = extraLabels; labels = extraLabels;

Some files were not shown because too many files have changed in this diff Show more