Project import generated by Copybara.

GitOrigin-RevId: 4a01ca36d6bfc133bc617e661916a81327c9bbc8
This commit is contained in:
Default email 2022-07-14 08:49:19 -04:00
parent 6fa9728205
commit ca5ab3a501
2175 changed files with 83526 additions and 23949 deletions

View file

@ -48,6 +48,7 @@
/pkgs/build-support/writers @lassulus @Profpatsch /pkgs/build-support/writers @lassulus @Profpatsch
# Nixpkgs documentation # Nixpkgs documentation
/doc @fricklerhandwerk
/maintainers/scripts/db-to-md.sh @jtojnar @ryantm /maintainers/scripts/db-to-md.sh @jtojnar @ryantm
/maintainers/scripts/doc @jtojnar @ryantm /maintainers/scripts/doc @jtojnar @ryantm
/doc/build-aux/pandoc-filters @jtojnar /doc/build-aux/pandoc-filters @jtojnar
@ -256,8 +257,8 @@
/pkgs/development/go-packages @kalbasit @Mic92 @zowoq /pkgs/development/go-packages @kalbasit @Mic92 @zowoq
# GNOME # GNOME
/pkgs/desktops/gnome @jtojnar @hedning /pkgs/desktops/gnome @jtojnar
/pkgs/desktops/gnome/extensions @piegamesde @jtojnar @hedning /pkgs/desktops/gnome/extensions @piegamesde @jtojnar
# Cinnamon # Cinnamon
/pkgs/desktops/cinnamon @mkg20001 /pkgs/desktops/cinnamon @mkg20001

View file

@ -8,8 +8,14 @@ on:
# the GitHub repository. This means that it should not evaluate user input in a # the GitHub repository. This means that it should not evaluate user input in a
# way that allows code injection. # way that allows code injection.
permissions:
contents: read
jobs: jobs:
backport: backport:
permissions:
contents: write # for zeebe-io/backport-action to create branch
pull-requests: write # for zeebe-io/backport-action to create PR to backport
name: Backport Pull Request name: Backport Pull Request
if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name)) if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
runs-on: ubuntu-latest runs-on: ubuntu-latest

View file

@ -10,6 +10,9 @@ on:
# branches: # branches:
# - master # - master
# - release-** # - release-**
permissions:
contents: read
jobs: jobs:
tests: tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest

View file

@ -4,8 +4,13 @@ on:
branches: branches:
- master - master
- release-** - release-**
permissions:
contents: read
jobs: jobs:
build: build:
permissions:
contents: write # for peter-evans/commit-comment to comment on commit
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
env: env:

View file

@ -23,4 +23,12 @@ jobs:
- name: Check DocBook files generated from Markdown are consistent - name: Check DocBook files generated from Markdown are consistent
run: | run: |
nixos/doc/manual/md-to-db.sh nixos/doc/manual/md-to-db.sh
git diff --exit-code git diff --exit-code || {
echo
echo 'Generated manual files are out of date.'
echo 'Please run'
echo
echo ' nixos/doc/manual/md-to-db.sh'
echo
exit 1
}

View file

@ -6,8 +6,13 @@ on:
- 'nixos-**' - 'nixos-**'
- 'nixpkgs-**' - 'nixpkgs-**'
permissions:
contents: read
jobs: jobs:
fail: fail:
permissions:
contents: none
name: "This PR is is targeting a channel branch" name: "This PR is is targeting a channel branch"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:

View file

@ -4,8 +4,13 @@ on:
check_suite: check_suite:
types: [ completed ] types: [ completed ]
permissions:
contents: read
jobs: jobs:
action: action:
permissions:
statuses: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: clear pending status - name: clear pending status

View file

@ -8,8 +8,13 @@ on:
# the GitHub repository. This means that it should not evaluate user input in a # the GitHub repository. This means that it should not evaluate user input in a
# way that allows code injection. # way that allows code injection.
permissions:
contents: read
jobs: jobs:
action: action:
permissions:
statuses: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: set pending status - name: set pending status

View file

@ -14,8 +14,14 @@ on:
# Merge every 24 hours # Merge every 24 hours
- cron: '0 0 * * *' - cron: '0 0 * * *'
permissions:
contents: read
jobs: jobs:
periodic-merge: periodic-merge:
permissions:
contents: write # for devmasx/merge-branch to merge branches
issues: write # for peter-evans/create-or-update-comment to create or update comment
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
@ -28,10 +34,6 @@ jobs:
pairs: pairs:
- from: master - from: master
into: haskell-updates into: haskell-updates
- from: release-21.11
into: staging-next-21.11
- from: staging-next-21.11
into: staging-21.11
- from: release-22.05 - from: release-22.05
into: staging-next-22.05 into: staging-next-22.05
- from: staging-next-22.05 - from: staging-next-22.05

View file

@ -14,8 +14,14 @@ on:
# Merge every 6 hours # Merge every 6 hours
- cron: '0 */6 * * *' - cron: '0 */6 * * *'
permissions:
contents: read
jobs: jobs:
periodic-merge: periodic-merge:
permissions:
contents: write # for devmasx/merge-branch to merge branches
issues: write # for peter-evans/create-or-update-comment to create or update comment
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:

View file

@ -2,11 +2,18 @@ name: "Update terraform-providers"
on: on:
schedule: schedule:
- cron: "14 3 * * 1" - cron: "14 3 * * 0"
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
tf-providers: tf-providers:
permissions:
contents: write # for peter-evans/create-pull-request to create branch
issues: write # for peter-evans/create-or-update-comment to create or update comment
pull-requests: write # for peter-evans/create-pull-request to create a PR
if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master if: github.repository_owner == 'NixOS' && github.ref == 'refs/heads/master' # ensure workflow_dispatch only runs on master
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:

View file

@ -20,7 +20,12 @@ buildImage {
fromImageName = null; fromImageName = null;
fromImageTag = "latest"; fromImageTag = "latest";
contents = pkgs.redis; copyToRoot = pkgs.buildEnv {
name = "image-root";
paths = [ pkgs.redis ];
pathsToLink = [ "/bin" ];
};
runAsRoot = '' runAsRoot = ''
#!${pkgs.runtimeShell} #!${pkgs.runtimeShell}
mkdir -p /data mkdir -p /data
@ -46,7 +51,7 @@ The above example will build a Docker image `redis/latest` from the given base i
- `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's `null`, in which case `buildImage` will peek the first tag available for the base image. - `fromImageTag` can be used to further specify the tag of the base image within the repository, in case an image contains multiple tags. By default it's `null`, in which case `buildImage` will peek the first tag available for the base image.
- `contents` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it's `null`. - `copyToRoot` is a derivation that will be copied in the new layer of the resulting image. This can be similarly seen as `ADD contents/ /` in a `Dockerfile`. By default it's `null`.
- `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`. - `runAsRoot` is a bash script that will run as root in an environment that overlays the existing layers of the base image with the new resulting layer, including the previously copied `contents` derivation. This can be similarly seen as `RUN ...` in a `Dockerfile`.
@ -81,7 +86,11 @@ pkgs.dockerTools.buildImage {
name = "hello"; name = "hello";
tag = "latest"; tag = "latest";
created = "now"; created = "now";
contents = pkgs.hello; copyToRoot = pkgs.buildEnv {
name = "image-root";
paths = [ pkgs.hello ];
pathsToLink = [ "/bin" ];
};
config.Cmd = [ "/bin/hello" ]; config.Cmd = [ "/bin/hello" ];
} }

View file

@ -338,6 +338,10 @@ A (typically large) program with a distinct user interface, primarily used inter
- `applications/terminal-emulators` (e.g. `alacritty` or `rxvt` or `termite`) - `applications/terminal-emulators` (e.g. `alacritty` or `rxvt` or `termite`)
- **If its a _file manager_:**
- `applications/file-managers` (e.g. `mc` or `ranger` or `pcmanfm`)
- **If its for _video playback / editing_:** - **If its for _video playback / editing_:**
- `applications/video` (e.g. `vlc`) - `applications/video` (e.g. `vlc`)

View file

@ -5,9 +5,11 @@
The Coq derivation is overridable through the `coq.override overrides`, where overrides is an attribute set which contains the arguments to override. We recommend overriding either of the following The Coq derivation is overridable through the `coq.override overrides`, where overrides is an attribute set which contains the arguments to override. We recommend overriding either of the following
* `version` (optional, defaults to the latest version of Coq selected for nixpkgs, see `pkgs/top-level/coq-packages` to witness this choice), which follows the conventions explained in the `coqPackages` section below, * `version` (optional, defaults to the latest version of Coq selected for nixpkgs, see `pkgs/top-level/coq-packages` to witness this choice), which follows the conventions explained in the `coqPackages` section below,
* `customOCamlPackage` (optional, defaults to `null`, which lets Coq choose a version automatically), which can be set to any of the ocaml packages attribute of `ocaml-ng` (such as `ocaml-ng.ocamlPackages_4_10` which is the default for Coq 8.11 for example). * `customOCamlPackages` (optional, defaults to `null`, which lets Coq choose a version automatically), which can be set to any of the ocaml packages attribute of `ocaml-ng` (such as `ocaml-ng.ocamlPackages_4_10` which is the default for Coq 8.11 for example).
* `coq-version` (optional, defaults to the short version e.g. "8.10"), is a version number of the form "x.y" that indicates which Coq's version build behavior to mimic when using a source which is not a release. E.g. `coq.override { version = "d370a9d1328a4e1cdb9d02ee032f605a9d94ec7a"; coq-version = "8.10"; }`. * `coq-version` (optional, defaults to the short version e.g. "8.10"), is a version number of the form "x.y" that indicates which Coq's version build behavior to mimic when using a source which is not a release. E.g. `coq.override { version = "d370a9d1328a4e1cdb9d02ee032f605a9d94ec7a"; coq-version = "8.10"; }`.
The associated package set can be optained using `mkCoqPackages coq`, where `coq` is the derivation to use.
## Coq packages attribute sets: `coqPackages` {#coq-packages-attribute-sets-coqpackages} ## Coq packages attribute sets: `coqPackages` {#coq-packages-attribute-sets-coqpackages}
The recommended way of defining a derivation for a Coq library, is to use the `coqPackages.mkCoqDerivation` function, which is essentially a specialization of `mkDerivation` taking into account most of the specifics of Coq libraries. The following attributes are supported: The recommended way of defining a derivation for a Coq library, is to use the `coqPackages.mkCoqDerivation` function, which is essentially a specialization of `mkDerivation` taking into account most of the specifics of Coq libraries. The following attributes are supported:

View file

@ -1,6 +1,6 @@
# Perl {#sec-language-perl} # Perl {#sec-language-perl}
## Running perl programs on the shell {#ssec-perl-running} ## Running Perl programs on the shell {#ssec-perl-running}
When executing a Perl script, it is possible you get an error such as `./myscript.pl: bad interpreter: /usr/bin/perl: no such file or directory`. This happens when the script expects Perl to be installed at `/usr/bin/perl`, which is not the case when using Perl from nixpkgs. You can fix the script by changing the first line to: When executing a Perl script, it is possible you get an error such as `./myscript.pl: bad interpreter: /usr/bin/perl: no such file or directory`. This happens when the script expects Perl to be installed at `/usr/bin/perl`, which is not the case when using Perl from nixpkgs. You can fix the script by changing the first line to:
@ -35,15 +35,16 @@ Perl packages from CPAN are defined in [pkgs/top-level/perl-packages.nix](https:
```nix ```nix
ClassC3 = buildPerlPackage rec { ClassC3 = buildPerlPackage rec {
name = "Class-C3-0.21"; pname = "Class-C3";
version = "0.21";
src = fetchurl { src = fetchurl {
url = "mirror://cpan/authors/id/F/FL/FLORA/${name}.tar.gz"; url = "mirror://cpan/authors/id/F/FL/FLORA/${pname}-${version}.tar.gz";
sha256 = "1bl8z095y4js66pwxnm7s853pi9czala4sqc743fdlnk27kq94gz"; sha256 = "1bl8z095y4js66pwxnm7s853pi9czala4sqc743fdlnk27kq94gz";
}; };
}; };
``` ```
Note the use of `mirror://cpan/`, and the `${name}` in the URL definition to ensure that the name attribute is consistent with the source that were actually downloading. Perl packages are made available in `all-packages.nix` through the variable `perlPackages`. For instance, if you have a package that needs `ClassC3`, you would typically write Note the use of `mirror://cpan/`, and the `pname` and `version` in the URL definition to ensure that the `pname` attribute is consistent with the source that were actually downloading. Perl packages are made available in `all-packages.nix` through the variable `perlPackages`. For instance, if you have a package that needs `ClassC3`, you would typically write
```nix ```nix
foo = import ../path/to/foo.nix { foo = import ../path/to/foo.nix {
@ -72,10 +73,11 @@ So what does `buildPerlPackage` do? It does the following:
{ buildPerlPackage, fetchurl, db }: { buildPerlPackage, fetchurl, db }:
buildPerlPackage rec { buildPerlPackage rec {
name = "BerkeleyDB-0.36"; pname = "BerkeleyDB";
version = "0.36";
src = fetchurl { src = fetchurl {
url = "mirror://cpan/authors/id/P/PM/PMQS/${name}.tar.gz"; url = "mirror://cpan/authors/id/P/PM/PMQS/${pname}-${version}.tar.gz";
sha256 = "07xf50riarb60l1h6m2dqmql8q5dij619712fsgw7ach04d8g3z1"; sha256 = "07xf50riarb60l1h6m2dqmql8q5dij619712fsgw7ach04d8g3z1";
}; };
@ -90,9 +92,10 @@ Dependencies on other Perl packages can be specified in the `buildInputs` and `p
```nix ```nix
ClassC3Componentised = buildPerlPackage rec { ClassC3Componentised = buildPerlPackage rec {
name = "Class-C3-Componentised-1.0004"; pname = "Class-C3-Componentised";
version = "1.0004";
src = fetchurl { src = fetchurl {
url = "mirror://cpan/authors/id/A/AS/ASH/${name}.tar.gz"; url = "mirror://cpan/authors/id/A/AS/ASH/${pname}-${version}.tar.gz";
sha256 = "0xql73jkcdbq4q9m0b0rnca6nrlvf5hyzy8is0crdk65bynvs8q1"; sha256 = "0xql73jkcdbq4q9m0b0rnca6nrlvf5hyzy8is0crdk65bynvs8q1";
}; };
propagatedBuildInputs = [ propagatedBuildInputs = [
@ -111,7 +114,7 @@ ImageExifTool = buildPerlPackage {
version = "11.50"; version = "11.50";
src = fetchurl { src = fetchurl {
url = "https://www.sno.phy.queensu.ca/~phil/exiftool/Image-ExifTool-11.50.tar.gz"; url = "https://www.sno.phy.queensu.ca/~phil/exiftool/${pname}-${version}.tar.gz";
sha256 = "0d8v48y94z8maxkmw1rv7v9m0jg2dc8xbp581njb6yhr7abwqdv3"; sha256 = "0d8v48y94z8maxkmw1rv7v9m0jg2dc8xbp581njb6yhr7abwqdv3";
}; };
@ -139,9 +142,10 @@ This program takes a Perl module name, looks it up on CPAN, fetches and unpacks
```ShellSession ```ShellSession
$ nix-generate-from-cpan XML::Simple $ nix-generate-from-cpan XML::Simple
XMLSimple = buildPerlPackage rec { XMLSimple = buildPerlPackage rec {
name = "XML-Simple-2.22"; pname = "XML-Simple";
version = "2.22";
src = fetchurl { src = fetchurl {
url = "mirror://cpan/authors/id/G/GR/GRANTM/${name}.tar.gz"; url = "mirror://cpan/authors/id/G/GR/GRANTM/XML-Simple-2.22.tar.gz";
sha256 = "b9450ef22ea9644ae5d6ada086dc4300fa105be050a2030ebd4efd28c198eb49"; sha256 = "b9450ef22ea9644ae5d6ada086dc4300fa105be050a2030ebd4efd28c198eb49";
}; };
propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ]; propagatedBuildInputs = [ XMLNamespaceSupport XMLSAX XMLSAXExpat ];

View file

@ -5,11 +5,9 @@ and additional libraries.
Loading can be deferred; see examples. Loading can be deferred; see examples.
At the moment we support three different methods for managing plugins: At the moment we support two different methods for managing plugins:
- Vim packages (*recommend*) - Vim packages (*recommended*)
- VAM (=vim-addon-manager)
- Pathogen
- vim-plug - vim-plug
## Custom configuration {#custom-configuration} ## Custom configuration {#custom-configuration}
@ -45,7 +43,7 @@ neovim.override {
``` ```
If you want to use `neovim-qt` as a graphical editor, you can configure it by overriding Neovim in an overlay If you want to use `neovim-qt` as a graphical editor, you can configure it by overriding Neovim in an overlay
or passing it an overridden Neovimn: or passing it an overridden Neovim:
```nix ```nix
neovim-qt.override { neovim-qt.override {
@ -61,7 +59,7 @@ neovim-qt.override {
## Managing plugins with Vim packages {#managing-plugins-with-vim-packages} ## Managing plugins with Vim packages {#managing-plugins-with-vim-packages}
To store you plugins in Vim packages (the native Vim plugin manager, see `:help packages`) the following example can be used: To store your plugins in Vim packages (the native Vim plugin manager, see `:help packages`) the following example can be used:
```nix ```nix
vim_configurable.customize { vim_configurable.customize {
@ -110,7 +108,7 @@ The resulting package can be added to `packageOverrides` in `~/.nixpkgs/config.n
}; };
myNeovim = neovim.override { myNeovim = neovim.override {
configure = { configure = {
# add here code from the example section # add code from the example section here
}; };
}; };
}; };
@ -158,10 +156,10 @@ in
``` ```
### Specificities for some plugins ### Specificities for some plugins
#### Tree sitter #### Treesitter
By default `nvim-treesitter` encourages you to download, compile and install By default `nvim-treesitter` encourages you to download, compile and install
the required tree-sitter grammars at run time with `:TSInstall`. This works the required Treesitter grammars at run time with `:TSInstall`. This works
poorly on NixOS. Instead, to install the `nvim-treesitter` plugins with a set poorly on NixOS. Instead, to install the `nvim-treesitter` plugins with a set
of precompiled grammars, you can use `nvim-treesitter.withPlugins` function: of precompiled grammars, you can use `nvim-treesitter.withPlugins` function:
@ -204,7 +202,7 @@ For Neovim the syntax is:
neovim.override { neovim.override {
configure = { configure = {
customRC = '' customRC = ''
# here your custom configuration goes! # your custom configuration goes here!
''; '';
plug.plugins = with pkgs.vimPlugins; [ plug.plugins = with pkgs.vimPlugins; [
vim-go vim-go
@ -213,100 +211,6 @@ neovim.override {
} }
``` ```
## Managing plugins with VAM {#managing-plugins-with-vam}
### Handling dependencies of Vim plugins {#handling-dependencies-of-vim-plugins}
VAM introduced .json files supporting dependencies without versioning
assuming that "using latest version" is ok most of the time.
### Example {#example}
First create a vim-scripts file having one plugin name per line. Example:
```vim
"tlib"
{'name': 'vim-addon-sql'}
{'filetype_regex': '\%(vim)$', 'names': ['reload', 'vim-dev-plugin']}
```
Such vim-scripts file can be read by VAM as well like this:
```vim
call vam#Scripts(expand('~/.vim-scripts'), {})
```
Create a default.nix file:
```nix
{ nixpkgs ? import <nixpkgs> {}, compiler ? "ghc7102" }:
nixpkgs.vim_configurable.customize { name = "vim"; vimrcConfig.vam.pluginDictionaries = [ "vim-addon-vim2nix" ]; }
```
Create a generate.vim file:
```vim
ActivateAddons vim-addon-vim2nix
let vim_scripts = "vim-scripts"
call nix#ExportPluginsForNix({
\ 'path_to_nixpkgs': eval('{"'.substitute(substitute(substitute($NIX_PATH, ':', ',', 'g'), '=',':', 'g'), '\([:,]\)', '"\1"',"g").'"}')["nixpkgs"],
\ 'cache_file': '/tmp/vim2nix-cache',
\ 'try_catch': 0,
\ 'plugin_dictionaries': ["vim-addon-manager"]+map(readfile(vim_scripts), 'eval(v:val)')
\ })
```
Then run
```bash
nix-shell -p vimUtils.vim_with_vim2nix --command "vim -c 'source generate.vim'"
```
You should get a Vim buffer with the nix derivations (output1) and vam.pluginDictionaries (output2).
You can add your Vim to your system's configuration file like this and start it by "vim-my":
```nix
my-vim =
let plugins = let inherit (vimUtils) buildVimPluginFrom2Nix; in {
copy paste output1 here
}; in vim_configurable.customize {
name = "vim-my";
vimrcConfig.vam.knownPlugins = plugins; # optional
vimrcConfig.vam.pluginDictionaries = [
copy paste output2 here
];
};
```
Sample output1:
```nix
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
name = "reload";
src = fetchgit {
url = "https://github.com/xolox/vim-reload";
rev = "0a601a668727f5b675cb1ddc19f6861f3f7ab9e1";
sha256 = "0vb832l9yxj919f5hfg6qj6bn9ni57gnjd3bj7zpq7d4iv2s4wdh";
};
dependencies = ["nim-misc"];
};
[...]
```
Sample output2:
```nix
[
''vim-addon-manager''
''tlib''
{ "name" = ''vim-addon-sql''; }
{ "filetype_regex" = ''\%(vim)$$''; "names" = [ ''reload'' ''vim-dev-plugin'' ]; }
]
```
## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs} ## Adding new plugins to nixpkgs {#adding-new-plugins-to-nixpkgs}
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names). Plugins are listed in alphabetical order in `vim-plugin-names` using the format `[github username]/[repository]@[gitref]`. For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`. Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`./update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names). Plugins are listed in alphabetical order in `vim-plugin-names` using the format `[github username]/[repository]@[gitref]`. For example https://github.com/scrooloose/nerdtree becomes `scrooloose/nerdtree`.
@ -323,7 +227,7 @@ Sometimes plugins require an override that must be changed when the plugin is up
To add a new plugin, run `./update.py --add "[owner]/[name]"`. **NOTE**: This script automatically commits to your git repository. Be sure to check out a fresh branch before running. To add a new plugin, run `./update.py --add "[owner]/[name]"`. **NOTE**: This script automatically commits to your git repository. Be sure to check out a fresh branch before running.
Finally, there are some plugins that are also packaged in nodePackages because they have Javascript-related build steps, such as running webpack. Those plugins are not listed in `vim-plugin-names` or managed by `update.py` at all, and are included separately in `overrides.nix`. Currently, all these plugins are related to the `coc.nvim` ecosystem of Language Server Protocol integration with vim/neovim. Finally, there are some plugins that are also packaged in nodePackages because they have Javascript-related build steps, such as running webpack. Those plugins are not listed in `vim-plugin-names` or managed by `update.py` at all, and are included separately in `overrides.nix`. Currently, all these plugins are related to the `coc.nvim` ecosystem of the Language Server Protocol integration with vim/neovim.
## Updating plugins in nixpkgs {#updating-plugins-in-nixpkgs} ## Updating plugins in nixpkgs {#updating-plugins-in-nixpkgs}

View file

@ -153,6 +153,24 @@ Add the following to your `mkDerivation` invocation.
doCheck = stdenv.hostPlatform == stdenv.buildPlatform; doCheck = stdenv.hostPlatform == stdenv.buildPlatform;
``` ```
#### Package using Meson needs to run binaries for the host platform during build. {#cross-meson-runs-host-code}
Add `mesonEmulatorHook` cross conditionally to `nativeBuildInputs`.
e.g.
```
nativeBuildInputs = [
meson
] ++ lib.optionals (stdenv.buildPlatform != stdenv.hostPlatform) [
mesonEmulatorHook
];
```
Example of an error which this fixes.
`[Errno 8] Exec format error: './gdk3-scan'`
## Cross-building packages {#sec-cross-usage} ## Cross-building packages {#sec-cross-usage}
Nixpkgs can be instantiated with `localSystem` alone, in which case there is no cross-compiling and everything is built by and for that system, or also with `crossSystem`, in which case packages run on the latter, but all building happens on the former. Both parameters take the same schema as the 3 (build, host, and target) platforms defined in the previous section. As mentioned above, `lib.systems.examples` has some platforms which are used as arguments for these parameters in practice. You can use them programmatically, or on the command line: Nixpkgs can be instantiated with `localSystem` alone, in which case there is no cross-compiling and everything is built by and for that system, or also with `crossSystem`, in which case packages run on the latter, but all building happens on the former. Both parameters take the same schema as the 3 (build, host, and target) platforms defined in the previous section. As mentioned above, `lib.systems.examples` has some platforms which are used as arguments for these parameters in practice. You can use them programmatically, or on the command line:

View file

@ -60,3 +60,8 @@ Some common issues when packaging software for Darwin:
``` ```
The package `xcbuild` can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues. The package `xcbuild` can be used to build projects that really depend on Xcode. However, this replacement is not 100% compatible with Xcode and can occasionally cause issues.
- x86_64-darwin uses the 10.12 SDK by default, but some software is not compatible with that version of the SDK. In that case,
the 11.0 SDK used by aarch64-darwin is available for use on x86_64-darwin. To use it, reference `apple_sdk_11_0` instead of
`apple_sdk` in your derivation and use `pkgs.darwin.apple_sdk_11_0.callPackage` instead of `pkgs.callPackage`. On Linux, this will
have the same effect as `pkgs.callPackage`, so you can use `pkgs.darwin.apple_sdk_11_0.callPackage` regardless of platform.

View file

@ -77,7 +77,7 @@ where the builder can do anything it wants, but typically starts with
source $stdenv/setup source $stdenv/setup
``` ```
to let `stdenv` set up the environment (e.g., process the `buildInputs`). If you want, you can still use `stdenv`s generic builder: to let `stdenv` set up the environment (e.g. by resetting `PATH` and populating it from build inputs). If you want, you can still use `stdenv`s generic builder:
```bash ```bash
source $stdenv/setup source $stdenv/setup
@ -698,12 +698,12 @@ Hook executed at the end of the install phase.
### The fixup phase {#ssec-fixup-phase} ### The fixup phase {#ssec-fixup-phase}
The fixup phase performs some (Nix-specific) post-processing actions on the files installed under `$out` by the install phase. The default `fixupPhase` does the following: The fixup phase performs (Nix-specific) post-processing actions on the files installed under `$out` by the install phase. The default `fixupPhase` does the following:
- It moves the `man/`, `doc/` and `info/` subdirectories of `$out` to `share/`. - It moves the `man/`, `doc/` and `info/` subdirectories of `$out` to `share/`.
- It strips libraries and executables of debug information. - It strips libraries and executables of debug information.
- On Linux, it applies the `patchelf` command to ELF executables and libraries to remove unused directories from the `RPATH` in order to prevent unnecessary runtime dependencies. - On Linux, it applies the `patchelf` command to ELF executables and libraries to remove unused directories from the `RPATH` in order to prevent unnecessary runtime dependencies.
- It rewrites the interpreter paths of shell scripts to paths found in `PATH`. E.g., `/usr/bin/perl` will be rewritten to `/nix/store/some-perl/bin/perl` found in `PATH`. - It rewrites the interpreter paths of shell scripts to paths found in `PATH`. E.g., `/usr/bin/perl` will be rewritten to `/nix/store/some-perl/bin/perl` found in `PATH`. See [](#patch-shebangs.sh) for details.
#### Variables controlling the fixup phase {#variables-controlling-the-fixup-phase} #### Variables controlling the fixup phase {#variables-controlling-the-fixup-phase}
@ -749,7 +749,7 @@ If set, the `patchelf` command is not used to remove unnecessary `RPATH` entries
##### `dontPatchShebangs` {#var-stdenv-dontPatchShebangs} ##### `dontPatchShebangs` {#var-stdenv-dontPatchShebangs}
If set, scripts starting with `#!` do not have their interpreter paths rewritten to paths in the Nix store. If set, scripts starting with `#!` do not have their interpreter paths rewritten to paths in the Nix store. See [](#patch-shebangs.sh) on how patching shebangs works.
##### `dontPruneLibtoolFiles` {#var-stdenv-dontPruneLibtoolFiles} ##### `dontPruneLibtoolFiles` {#var-stdenv-dontPruneLibtoolFiles}
@ -983,7 +983,7 @@ addEnvHooks "$hostOffset" myBashFunction
The *existence* of setups hooks has long been documented and packages inside Nixpkgs are free to use this mechanism. Other packages, however, should not rely on these mechanisms not changing between Nixpkgs versions. Because of the existing issues with this system, theres little benefit from mandating it be stable for any period of time. The *existence* of setups hooks has long been documented and packages inside Nixpkgs are free to use this mechanism. Other packages, however, should not rely on these mechanisms not changing between Nixpkgs versions. Because of the existing issues with this system, theres little benefit from mandating it be stable for any period of time.
First, lets cover some setup hooks that are part of Nixpkgs default stdenv. This means that they are run for every package built using `stdenv.mkDerivation`. Some of these are platform specific, so they may run on Linux but not Darwin or vice-versa. First, lets cover some setup hooks that are part of Nixpkgs default `stdenv`. This means that they are run for every package built using `stdenv.mkDerivation` or when using a custom builder that has `source $stdenv/setup`. Some of these are platform specific, so they may run on Linux but not Darwin or vice-versa.
### `move-docs.sh` {#move-docs.sh} ### `move-docs.sh` {#move-docs.sh}
@ -999,7 +999,70 @@ This runs the strip command on installed binaries and libraries. This removes un
### `patch-shebangs.sh` {#patch-shebangs.sh} ### `patch-shebangs.sh` {#patch-shebangs.sh}
This setup hook patches installed scripts to use the full path to the shebang interpreter. A shebang interpreter is the first commented line of a script telling the operating system which program will run the script (e.g `#!/bin/bash`). In Nix, we want an exact path to that interpreter to be used. This often replaces `/bin/sh` with a path in the Nix store. This setup hook patches installed scripts to add Nix store paths to their shebang interpreter as found in the build environment. The [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) line tells a Unix-like operating system which interpreter to use to execute the script's contents.
::: note
The [generic builder][generic-builder] populates `PATH` from inputs of the derivation.
:::
[generic-builder]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/stdenv/generic/builder.sh
#### Invocation {#patch-shebangs.sh-invocation}
Multiple paths can be specified.
```
patchShebangs [--build | --host] PATH...
```
##### Flags
`--build`
: Look up commands available at build time
`--host`
: Look up commands available at run time
##### Examples
```sh
patchShebangs --host /nix/store/<hash>-hello-1.0/bin
```
```sh
patchShebangs --build configure
```
`#!/bin/sh` will be rewritten to `#!/nix/store/<hash>-some-bash/bin/sh`.
`#!/usr/bin/env` gets special treatment: `#!/usr/bin/env python` is rewritten to `/nix/store/<hash>/bin/python`.
Interpreter paths that point to a valid Nix store location are not changed.
::: note
A script file must be marked as executable, otherwise it will not be
considered.
:::
This mechanism ensures that the interpreter for a given script is always found and is exactly the one specified by the build.
It can be disabled by setting [`dontPatchShebangs`](#var-stdenv-dontPatchShebangs):
```nix
stdenv.mkDerivation {
# ...
dontPatchShebangs = true;
# ...
}
```
The file [`patch-shebangs.sh`][patch-shebangs.sh] defines the [`patchShebangs`][patchShebangs] function. It is used to implement [`patchShebangsAuto`][patchShebangsAuto], the [setup hook](#ssec-setup-hooks) that is registered to run during the [fixup phase](#ssec-fixup-phase) by default.
If you need to run `patchShebangs` at build time, it must be called explicitly within [one of the build phases](#sec-stdenv-phases).
[patch-shebangs.sh]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh
[patchShebangs]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh#L24-L105
[patchShebangsAuto]: https://github.com/NixOS/nixpkgs/blob/19d4f7dc485f74109bd66ef74231285ff797a823/pkgs/build-support/setup-hooks/patch-shebangs.sh#L107-L119
### `audit-tmpdir.sh` {#audit-tmpdir.sh} ### `audit-tmpdir.sh` {#audit-tmpdir.sh}
@ -1316,7 +1379,7 @@ If the libraries lack `-fPIE`, you will get the error `recompile with -fPIE`.
[^footnote-stdenv-ignored-build-platform]: The build platform is ignored because it is a mere implementation detail of the package satisfying the dependency: As a general programming principle, dependencies are always *specified* as interfaces, not concrete implementation. [^footnote-stdenv-ignored-build-platform]: The build platform is ignored because it is a mere implementation detail of the package satisfying the dependency: As a general programming principle, dependencies are always *specified* as interfaces, not concrete implementation.
[^footnote-stdenv-native-dependencies-in-path]: Currently, this means for native builds all dependencies are put on the `PATH`. But in the future that may not be the case for sake of matching cross: the platforms would be assumed to be unique for native and cross builds alike, so only the `depsBuild*` and `nativeBuildInputs` would be added to the `PATH`. [^footnote-stdenv-native-dependencies-in-path]: Currently, this means for native builds all dependencies are put on the `PATH`. But in the future that may not be the case for sake of matching cross: the platforms would be assumed to be unique for native and cross builds alike, so only the `depsBuild*` and `nativeBuildInputs` would be added to the `PATH`.
[^footnote-stdenv-propagated-dependencies]: Nix itself already takes a packages transitive dependencies into account, but this propagation ensures nixpkgs-specific infrastructure like setup hooks (mentioned above) also are run as if the propagated dependency. [^footnote-stdenv-propagated-dependencies]: Nix itself already takes a packages transitive dependencies into account, but this propagation ensures nixpkgs-specific infrastructure like [setup hooks](#ssec-setup-hooks) also are run as if it were a propagated dependency.
[^footnote-stdenv-find-inputs-location]: The `findInputs` function, currently residing in `pkgs/stdenv/generic/setup.sh`, implements the propagation logic. [^footnote-stdenv-find-inputs-location]: The `findInputs` function, currently residing in `pkgs/stdenv/generic/setup.sh`, implements the propagation logic.
[^footnote-stdenv-sys-lib-search-path]: It clears the `sys_lib_*search_path` variables in the Libtool script to prevent Libtool from using libraries in `/usr/lib` and such. [^footnote-stdenv-sys-lib-search-path]: It clears the `sys_lib_*search_path` variables in the Libtool script to prevent Libtool from using libraries in `/usr/lib` and such.
[^footnote-stdenv-build-time-guessing-impurity]: Eventually these will be passed building natively as well, to improve determinism: build-time guessing, as is done today, is a risk of impurity. [^footnote-stdenv-build-time-guessing-impurity]: Eventually these will be passed building natively as well, to improve determinism: build-time guessing, as is done today, is a risk of impurity.

View file

@ -55,6 +55,12 @@ in mkLicense lset) ({
fullName = "GNU Affero General Public License v3.0 or later"; fullName = "GNU Affero General Public License v3.0 or later";
}; };
aladdin = {
spdxId = "Aladdin";
fullName = "Aladdin Free Public License";
free = false;
};
amazonsl = { amazonsl = {
fullName = "Amazon Software License"; fullName = "Amazon Software License";
url = "https://aws.amazon.com/asl/"; url = "https://aws.amazon.com/asl/";

View file

@ -242,6 +242,8 @@ rec {
in if ss != {} then optionAttrSetToDocList' opt.loc ss else []; in if ss != {} then optionAttrSetToDocList' opt.loc ss else [];
subOptionsVisible = docOption.visible && opt.visible or null != "shallow"; subOptionsVisible = docOption.visible && opt.visible or null != "shallow";
in in
# To find infinite recursion in NixOS option docs:
# builtins.trace opt.loc
[ docOption ] ++ optionals subOptionsVisible subOptions) (collect isOption options); [ docOption ] ++ optionals subOptionsVisible subOptions) (collect isOption options);

View file

@ -91,25 +91,23 @@ rec {
config = "mipsel-unknown-linux-gnu"; config = "mipsel-unknown-linux-gnu";
} // platforms.fuloong2f_n32; } // platforms.fuloong2f_n32;
# MIPS ABI table transcribed from here: https://wiki.debian.org/Multiarch/Tuples
# can execute on 32bit chip # can execute on 32bit chip
mips-linux-gnu = { config = "mips-linux-gnu"; } // platforms.gcc_mips32r2_o32; mips-linux-gnu = { config = "mips-unknown-linux-gnu"; } // platforms.gcc_mips32r2_o32;
mipsel-linux-gnu = { config = "mipsel-linux-gnu"; } // platforms.gcc_mips32r2_o32; mipsel-linux-gnu = { config = "mipsel-unknown-linux-gnu"; } // platforms.gcc_mips32r2_o32;
mipsisa32r6-linux-gnu = { config = "mipsisa32r6-linux-gnu"; } // platforms.gcc_mips32r6_o32; mipsisa32r6-linux-gnu = { config = "mipsisa32r6-unknown-linux-gnu"; } // platforms.gcc_mips32r6_o32;
mipsisa32r6el-linux-gnu = { config = "mipsisa32r6el-linux-gnu"; } // platforms.gcc_mips32r6_o32; mipsisa32r6el-linux-gnu = { config = "mipsisa32r6el-unknown-linux-gnu"; } // platforms.gcc_mips32r6_o32;
# require 64bit chip (for more registers, 64-bit floating point, 64-bit "long long") but use 32bit pointers # require 64bit chip (for more registers, 64-bit floating point, 64-bit "long long") but use 32bit pointers
mips64-linux-gnuabin32 = { config = "mips64-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32; mips64-linux-gnuabin32 = { config = "mips64-unknown-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
mips64el-linux-gnuabin32 = { config = "mips64el-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32; mips64el-linux-gnuabin32 = { config = "mips64el-unknown-linux-gnuabin32"; } // platforms.gcc_mips64r2_n32;
mipsisa64r6-linux-gnuabin32 = { config = "mipsisa64r6-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32; mipsisa64r6-linux-gnuabin32 = { config = "mipsisa64r6-unknown-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
mipsisa64r6el-linux-gnuabin32 = { config = "mipsisa64r6el-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32; mipsisa64r6el-linux-gnuabin32 = { config = "mipsisa64r6el-unknown-linux-gnuabin32"; } // platforms.gcc_mips64r6_n32;
# 64bit pointers # 64bit pointers
mips64-linux-gnuabi64 = { config = "mips64-linux-gnuabi64"; } // platforms.gcc_mips64r2_64; mips64-linux-gnuabi64 = { config = "mips64-unknown-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
mips64el-linux-gnuabi64 = { config = "mips64el-linux-gnuabi64"; } // platforms.gcc_mips64r2_64; mips64el-linux-gnuabi64 = { config = "mips64el-unknown-linux-gnuabi64"; } // platforms.gcc_mips64r2_64;
mipsisa64r6-linux-gnuabi64 = { config = "mipsisa64r6-linux-gnuabi64"; } // platforms.gcc_mips64r6_64; mipsisa64r6-linux-gnuabi64 = { config = "mipsisa64r6-unknown-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
mipsisa64r6el-linux-gnuabi64 = { config = "mipsisa64r6el-linux-gnuabi64"; } // platforms.gcc_mips64r6_64; mipsisa64r6el-linux-gnuabi64 = { config = "mipsisa64r6el-unknown-linux-gnuabi64"; } // platforms.gcc_mips64r6_64;
muslpi = raspberryPi // { muslpi = raspberryPi // {
config = "armv6l-unknown-linux-musleabihf"; config = "armv6l-unknown-linux-musleabihf";

View file

@ -179,7 +179,7 @@ rec {
they take effect as soon as the oldest release reaches end of life. */ they take effect as soon as the oldest release reaches end of life. */
oldestSupportedRelease = oldestSupportedRelease =
# Update on master only. Do not backport. # Update on master only. Do not backport.
2111; 2205;
/* Whether a feature is supported in all supported releases (at the time of /* Whether a feature is supported in all supported releases (at the time of
release branch-off, if applicable). See `oldestSupportedRelease`. */ release branch-off, if applicable). See `oldestSupportedRelease`. */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
#!/usr/bin/env nix-shell
#!nix-shell -i perl -p perl -p perlPackages.LWP -p perlPackages.LWPProtocolHttps -p perlPackages.LWPUserAgent -p perlPackages.JSON -p perlPackages.PathTiny
use LWP::UserAgent;
use JSON;
use Path::Tiny;
use strict;
use warnings;
my $maintainers_list_nix = "../maintainer-list.nix";
my $maintainers_json = from_json(`nix-instantiate --json --eval --expr 'builtins.fromJSON (builtins.toJSON (import $maintainers_list_nix))'`);
STDOUT->autoflush(1);
my $ua = LWP::UserAgent->new();
keys %$maintainers_json; # reset the internal iterator so a prior each() doesn't affect the loop
while(my($k, $v) = each %$maintainers_json) {
my $current_user = %$v{'github'};
if (!defined $current_user) {
print "$k has no github handle\n";
next;
}
my $github_id = %$v{'githubId'};
if (!defined $github_id) {
print "$k has no githubId\n";
next;
}
my $url = 'https://api.github.com/user/' . $github_id;
my $resp = $ua->get(
$url,
"Authorization" => "Token $ENV{GH_TOKEN}"
);
if ($resp->header("X-RateLimit-Remaining") == 0) {
my $ratelimit_reset = $resp->header("X-RateLimit-Reset");
print "Request limit exceeded, waiting until " . scalar localtime $ratelimit_reset . "\n";
sleep($ratelimit_reset - time() + 5);
}
if ($resp->code != 200) {
print $current_user . " likely deleted their github account\n";
next;
}
my $resp_json = from_json($resp->content);
my $api_user = %$resp_json{"login"};
if ($current_user ne $api_user) {
print $current_user . " is now known on github as " . $api_user . ". Editing maintainer-list.nix…\n";
my $file = path($maintainers_list_nix);
my $data = $file->slurp_utf8;
$data =~ s/github = "$current_user";$/github = "$api_user";/m;
$file->spew_utf8($data);
}
}

View file

@ -0,0 +1,83 @@
#! /usr/bin/env nix-shell
#! nix-shell -I nixpkgs=. -i bash -p delta jq perl
set -euo pipefail
shopt -s inherit_errexit
cat <<'EOF'
This script attempts to automatically convert option descriptions from
DocBook syntax to markdown. Naturally this process is incomplete and
imperfect, so any changes generated by this script MUST be reviewed.
Possible problems include: incorrectly replaced tags, badly formatted
markdown, DocBook tags this script doesn't recognize remaining in the
output and crashing the docs build, incorrect escaping of markdown
metacharacters, incorrect unescaping of XML entities—and the list goes on.
Always review the generated changes!
Some known limitations:
- Does not transform literalDocBook items
- Replacements can occur in non-option code, such as string literals
EOF
build-options-json() {
nix-build --no-out-link --expr '
let
sys = import ./nixos/default.nix {
configuration = {};
};
in
[
sys.config.system.build.manual.optionsJSON
]
'
}
git diff --quiet || {
echo "Worktree is dirty. Please stash or commit first."
exit 1
}
echo "Building options.json ..."
old_options=$(build-options-json)
echo "Applying replacements ..."
perl -pi -e '
BEGIN {
undef $/;
}
s,<literal>([^`]*?)</literal>,`$1`,smg;
s,<replaceable>([]*?)</replaceable>,«$1»,smg;
s,<filename>([^`]*?)</filename>,{file}`$1`,smg;
s,<option>([^`]*?)</option>,{option}`$1`,smg;
s,<code>([^`]*?)</code>,`$1`,smg;
s,<command>([^`]*?)</command>,{command}`$1`,smg;
s,<link xlink:href="(.+?)" ?/>,<$1>,smg;
s,<link xlink:href="(.+?)">(.*?)</link>,[$2]($1),smg;
s,<package>([^`]*?)</package>,`$1`,smg;
s,<emphasis>([^*]*?)</emphasis>,*$1*,smg;
s,<citerefentry>\s*
<refentrytitle>\s*(.*?)\s*</refentrytitle>\s*
<manvolnum>\s*(.*?)\s*</manvolnum>\s*
</citerefentry>,{manpage}`$1($2)`,smgx;
s,^( +description =),\1 lib.mdDoc,smg;
' "$@"
echo "Building options.json again ..."
new_options=$(build-options-json)
! cmp -s {$old_options,$new_options}/share/doc/nixos/options.json && {
diff -U10 \
<(jq . <$old_options/share/doc/nixos/options.json) \
<(jq . <$new_options/share/doc/nixos/options.json) \
| delta
}

View file

@ -98,6 +98,7 @@ with lib.maintainers; {
members = [ members = [
astro astro
SuperSandro2000 SuperSandro2000
revol-xut
]; ];
scope = "Maintain packages used in the C3D2 hackspace"; scope = "Maintain packages used in the C3D2 hackspace";
shortName = "c3d2"; shortName = "c3d2";
@ -325,7 +326,6 @@ with lib.maintainers; {
jitsi = { jitsi = {
members = [ members = [
cleeyv cleeyv
petabyteboy
ryantm ryantm
yuka yuka
]; ];

View file

@ -1,11 +1,18 @@
# Adding Custom Packages {#sec-custom-packages} # Adding Custom Packages {#sec-custom-packages}
It's possible that a package you need is not available in NixOS. In that It's possible that a package you need is not available in NixOS. In that
case, you can do two things. First, you can clone the Nixpkgs case, you can do two things. Either you can package it with Nix, or you can try
repository, add the package to your clone, and (optionally) submit a to use prebuilt packages from upstream. Due to the peculiarities of NixOS, it
patch or pull request to have it accepted into the main Nixpkgs repository. is important to note that building software from source is often easier than
This is described in detail in the [Nixpkgs manual](https://nixos.org/nixpkgs/manual). using pre-built executables.
In short, you clone Nixpkgs:
## Building with Nix {#sec-custom-packages-nix}
This can be done either in-tree or out-of-tree. For an in-tree build, you can
clone the Nixpkgs repository, add the package to your clone, and (optionally)
submit a patch or pull request to have it accepted into the main Nixpkgs
repository. This is described in detail in the [Nixpkgs
manual](https://nixos.org/nixpkgs/manual). In short, you clone Nixpkgs:
```ShellSession ```ShellSession
$ git clone https://github.com/NixOS/nixpkgs $ git clone https://github.com/NixOS/nixpkgs
@ -72,3 +79,21 @@ $ nix-build my-hello.nix
$ ./result/bin/hello $ ./result/bin/hello
Hello, world! Hello, world!
``` ```
## Using pre-built executables {#sec-custom-packages-prebuilt}
Most pre-built executables will not work on NixOS. There are two notable
exceptions: flatpaks and AppImages. For flatpaks see the [dedicated
section](#module-services-flatpak). AppImages will not run "as-is" on NixOS.
First you need to install `appimage-run`: add to `/etc/nixos/configuration.nix`
```nix
environment.systemPackages = [ pkgs.appimage-run ];
```
Then instead of running the AppImage "as-is", run `appimage-run foo.appimage`.
To make other pre-built executables work on NixOS, you need to package them
with Nix and special helpers like `autoPatchelfHook` or `buildFHSUserEnv`. See
the [Nixpkgs manual](https://nixos.org/nixpkgs/manual) for details. This
is complex and often doing a source build is easier.

View file

@ -24,11 +24,16 @@ Some Xfce programs are not installed automatically. To install them
manually (system wide), put them into your manually (system wide), put them into your
[](#opt-environment.systemPackages) from `pkgs.xfce`. [](#opt-environment.systemPackages) from `pkgs.xfce`.
## Thunar Plugins {#sec-xfce-thunar-plugins .unnumbered} ## Thunar {#sec-xfce-thunar-plugins .unnumbered}
Thunar (the Xfce file manager) is automatically enabled when Xfce is
enabled. To enable Thunar without enabling Xfce, use the configuration
option [](#opt-programs.thunar.enable) instead of simply adding
`pkgs.xfce.thunar` to [](#opt-environment.systemPackages).
If you\'d like to add extra plugins to Thunar, add them to If you\'d like to add extra plugins to Thunar, add them to
[](#opt-services.xserver.desktopManager.xfce.thunarPlugins). [](#opt-programs.thunar.plugins). You shouldn\'t just add them to
You shouldn\'t just add them to [](#opt-environment.systemPackages). [](#opt-environment.systemPackages).
## Troubleshooting {#sec-xfce-troubleshooting .unnumbered} ## Troubleshooting {#sec-xfce-troubleshooting .unnumbered}

View file

@ -133,12 +133,12 @@ let
# ^ redirect assumes xmllint doesnt print to stdout # ^ redirect assumes xmllint doesnt print to stdout
} }
lintrng manual-combined.xml
lintrng man-pages-combined.xml
mkdir $out mkdir $out
cp manual-combined.xml $out/ cp manual-combined.xml $out/
cp man-pages-combined.xml $out/ cp man-pages-combined.xml $out/
lintrng $out/manual-combined.xml
lintrng $out/man-pages-combined.xml
''; '';
olinkDB = runCommand "manual-olinkdb" olinkDB = runCommand "manual-olinkdb"

View file

@ -54,7 +54,7 @@ possiblility into account that they have to create them first.
## NixOS snippets {#sec-activation-script-nixos-snippets} ## NixOS snippets {#sec-activation-script-nixos-snippets}
There are some snippets NixOS enables by default because disabling them would There are some snippets NixOS enables by default because disabling them would
most likely break you system. This section lists a few of them and what they most likely break your system. This section lists a few of them and what they
do: do:
- `binsh` creates `/bin/sh` which points to the runtime shell - `binsh` creates `/bin/sh` which points to the runtime shell

View file

@ -2,10 +2,20 @@
<title>Adding Custom Packages</title> <title>Adding Custom Packages</title>
<para> <para>
Its possible that a package you need is not available in NixOS. In Its possible that a package you need is not available in NixOS. In
that case, you can do two things. First, you can clone the Nixpkgs that case, you can do two things. Either you can package it with
repository, add the package to your clone, and (optionally) submit a Nix, or you can try to use prebuilt packages from upstream. Due to
patch or pull request to have it accepted into the main Nixpkgs the peculiarities of NixOS, it is important to note that building
repository. This is described in detail in the software from source is often easier than using pre-built
executables.
</para>
<section xml:id="sec-custom-packages-nix">
<title>Building with Nix</title>
<para>
This can be done either in-tree or out-of-tree. For an in-tree
build, you can clone the Nixpkgs repository, add the package to
your clone, and (optionally) submit a patch or pull request to
have it accepted into the main Nixpkgs repository. This is
described in detail in the
<link xlink:href="https://nixos.org/nixpkgs/manual">Nixpkgs <link xlink:href="https://nixos.org/nixpkgs/manual">Nixpkgs
manual</link>. In short, you clone Nixpkgs: manual</link>. In short, you clone Nixpkgs:
</para> </para>
@ -29,8 +39,8 @@ environment.systemPackages = [ pkgs.my-package ];
# nixos-rebuild switch -I nixpkgs=/path/to/my/nixpkgs # nixos-rebuild switch -I nixpkgs=/path/to/my/nixpkgs
</programlisting> </programlisting>
<para> <para>
The second possibility is to add the package outside of the Nixpkgs The second possibility is to add the package outside of the
tree. For instance, here is how you specify a build of the Nixpkgs tree. For instance, here is how you specify a build of the
<link xlink:href="https://www.gnu.org/software/hello/">GNU <link xlink:href="https://www.gnu.org/software/hello/">GNU
Hello</link> package directly in Hello</link> package directly in
<literal>configuration.nix</literal>: <literal>configuration.nix</literal>:
@ -77,4 +87,32 @@ $ nix-build my-hello.nix
$ ./result/bin/hello $ ./result/bin/hello
Hello, world! Hello, world!
</programlisting> </programlisting>
</section>
<section xml:id="sec-custom-packages-prebuilt">
<title>Using pre-built executables</title>
<para>
Most pre-built executables will not work on NixOS. There are two
notable exceptions: flatpaks and AppImages. For flatpaks see the
<link linkend="module-services-flatpak">dedicated section</link>.
AppImages will not run <quote>as-is</quote> on NixOS. First you
need to install <literal>appimage-run</literal>: add to
<literal>/etc/nixos/configuration.nix</literal>
</para>
<programlisting language="bash">
environment.systemPackages = [ pkgs.appimage-run ];
</programlisting>
<para>
Then instead of running the AppImage <quote>as-is</quote>, run
<literal>appimage-run foo.appimage</literal>.
</para>
<para>
To make other pre-built executables work on NixOS, you need to
package them with Nix and special helpers like
<literal>autoPatchelfHook</literal> or
<literal>buildFHSUserEnv</literal>. See the
<link xlink:href="https://nixos.org/nixpkgs/manual">Nixpkgs
manual</link> for details. This is complex and often doing a
source build is easier.
</para>
</section>
</section> </section>

View file

@ -27,12 +27,18 @@ services.picom = {
<literal>pkgs.xfce</literal>. <literal>pkgs.xfce</literal>.
</para> </para>
<section xml:id="sec-xfce-thunar-plugins"> <section xml:id="sec-xfce-thunar-plugins">
<title>Thunar Plugins</title> <title>Thunar</title>
<para>
Thunar (the Xfce file manager) is automatically enabled when Xfce
is enabled. To enable Thunar without enabling Xfce, use the
configuration option <xref linkend="opt-programs.thunar.enable" />
instead of simply adding <literal>pkgs.xfce.thunar</literal> to
<xref linkend="opt-environment.systemPackages" />.
</para>
<para> <para>
If you'd like to add extra plugins to Thunar, add them to If you'd like to add extra plugins to Thunar, add them to
<xref linkend="opt-services.xserver.desktopManager.xfce.thunarPlugins" />. <xref linkend="opt-programs.thunar.plugins" />. You shouldn't just
You shouldn't just add them to add them to <xref linkend="opt-environment.systemPackages" />.
<xref linkend="opt-environment.systemPackages" />.
</para> </para>
</section> </section>
<section xml:id="sec-xfce-troubleshooting"> <section xml:id="sec-xfce-troubleshooting">

View file

@ -73,7 +73,7 @@ system.activationScripts.my-activation-script = {
<title>NixOS snippets</title> <title>NixOS snippets</title>
<para> <para>
There are some snippets NixOS enables by default because disabling There are some snippets NixOS enables by default because disabling
them would most likely break you system. This section lists a few them would most likely break your system. This section lists a few
of them and what they do: of them and what they do:
</para> </para>
<itemizedlist spacing="compact"> <itemizedlist spacing="compact">

View file

@ -2784,6 +2784,12 @@ sudo cp /var/lib/redis/dump.rdb /var/lib/redis-peertube/dump.rdb
runs a PostgreSQL server for the duration of package checks. runs a PostgreSQL server for the duration of package checks.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>zfs</literal> was updated from 2.1.4 to 2.1.5,
enabling it to be used with Linux kernel 5.18.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
<literal>stdenv.mkDerivation</literal> now supports a <literal>stdenv.mkDerivation</literal> now supports a

View file

@ -156,6 +156,13 @@
<link linkend="opt-services.expressvpn.enable">services.expressvpn</link>. <link linkend="opt-services.expressvpn.enable">services.expressvpn</link>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<link xlink:href="https://www.grafana.com/oss/tempo/">Grafana
Tempo</link>, a distributed tracing store. Available as
<link linkend="opt-services.tempo.enable">services.tempo</link>.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
<section xml:id="sec-release-22.11-incompatibilities"> <section xml:id="sec-release-22.11-incompatibilities">
@ -192,10 +199,12 @@
<listitem> <listitem>
<para> <para>
<literal>i18n.supportedLocales</literal> is now by default <literal>i18n.supportedLocales</literal> is now by default
only generated with the default locale set in only generated with the locales set in
<literal>i18n.defaultLocale</literal>. This got copied over <literal>i18n.defaultLocale</literal> and
from the minimal profile and reduces the final system size by <literal>i18n.extraLocaleSettings</literal>. This got
200MB. If you require all locales installed set the option to partially copied over from the minimal profile and reduces the
final system size by up to 200MB. If you require all locales
installed set the option to
<literal>[ &quot;all&quot; ]</literal>. <literal>[ &quot;all&quot; ]</literal>.
</para> </para>
</listitem> </listitem>
@ -214,6 +223,14 @@
<literal>(with foo; isPower &amp;&amp; is32bit &amp;&amp; isBigEndian)</literal>. <literal>(with foo; isPower &amp;&amp; is32bit &amp;&amp; isBigEndian)</literal>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
<literal>bsp-layout</literal> no longer uses the command
<literal>cycle</literal> to switch to other window layouts, as
it got replaced by the commands <literal>previous</literal>
and <literal>next</literal>.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
The Barco ClickShare driver/client package The Barco ClickShare driver/client package
@ -240,6 +257,16 @@
maintainer to update the package. maintainer to update the package.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The <literal>services.graphite.api</literal> and
<literal>services.graphite.beacon</literal> NixOS options, and
the <literal>python3.pkgs.graphite_api</literal>,
<literal>python3.pkgs.graphite_beacon</literal> and
<literal>python3.pkgs.influxgraph</literal> packages, have
been removed due to lack of upstream maintenance.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
(Neo)Vim can not be configured with (Neo)Vim can not be configured with
@ -275,6 +302,13 @@
<literal>hardware.saleae-logic.package</literal>. <literal>hardware.saleae-logic.package</literal>.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
The Redis module now disables RDB persistence when
<literal>services.redis.servers.&lt;name&gt;.save = []</literal>
instead of using the Redis default.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
Matrix Synapse now requires entries in the Matrix Synapse now requires entries in the
@ -288,8 +322,11 @@
</listitem> </listitem>
<listitem> <listitem>
<para> <para>
<literal>zfs</literal> was updated from 2.1.4 to 2.1.5, <literal>dockerTools.buildImage</literal> deprecates the
enabling it to be used with Linux kernel 5.18. misunderstood <literal>contents</literal> parameter, in favor
of <literal>copyToRoot</literal>. Use
<literal>copyToRoot = buildEnv { ... };</literal> or similar
if you intend to add packages to <literal>/bin</literal>.
</para> </para>
</listitem> </listitem>
<listitem> <listitem>
@ -299,6 +336,30 @@
as coreboots fork is no longer available. as coreboots fork is no longer available.
</para> </para>
</listitem> </listitem>
<listitem>
<para>
Add udev rules for the Teensy family of microcontrollers.
</para>
</listitem>
<listitem>
<para>
There is a new module for the <literal>thunar</literal>
program (the Xfce file manager), which depends on the
<literal>xfconf</literal> dbus service, and also has a dbus
service and a systemd unit. The option
<literal>services.xserver.desktopManager.xfce.thunarPlugins</literal>
has been renamed to
<literal>programs.thunar.plugins</literal>, and in a future
release it may be removed.
</para>
</listitem>
<listitem>
<para>
There is a new module for the <literal>xfconf</literal>
program (the Xfce configuration storage system), which has a
dbus service.
</para>
</listitem>
</itemizedlist> </itemizedlist>
</section> </section>
</section> </section>

View file

@ -974,6 +974,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The new [`postgresqlTestHook`](https://nixos.org/manual/nixpkgs/stable/#sec-postgresqlTestHook) runs a PostgreSQL server for the duration of package checks. - The new [`postgresqlTestHook`](https://nixos.org/manual/nixpkgs/stable/#sec-postgresqlTestHook) runs a PostgreSQL server for the duration of package checks.
- `zfs` was updated from 2.1.4 to 2.1.5, enabling it to be used with Linux kernel 5.18.
- `stdenv.mkDerivation` now supports a self-referencing `finalAttrs:` parameter - `stdenv.mkDerivation` now supports a self-referencing `finalAttrs:` parameter
containing the final `mkDerivation` arguments including overrides. containing the final `mkDerivation` arguments including overrides.
`drv.overrideAttrs` now supports two parameters `finalAttrs: previousAttrs:`. `drv.overrideAttrs` now supports two parameters `finalAttrs: previousAttrs:`.

View file

@ -64,6 +64,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [expressvpn](https://www.expressvpn.com), the CLI client for ExpressVPN. Available as [services.expressvpn](#opt-services.expressvpn.enable). - [expressvpn](https://www.expressvpn.com), the CLI client for ExpressVPN. Available as [services.expressvpn](#opt-services.expressvpn.enable).
- [Grafana Tempo](https://www.grafana.com/oss/tempo/), a distributed tracing store. Available as [services.tempo](#opt-services.tempo.enable).
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
## Backward Incompatibilities {#sec-release-22.11-incompatibilities} ## Backward Incompatibilities {#sec-release-22.11-incompatibilities}
@ -80,12 +82,14 @@ In addition to numerous new and upgraded packages, this release has the followin
and [changelog](https://ngrok.com/docs/ngrok-agent/changelog). Notably, breaking changes are that the config file format has and [changelog](https://ngrok.com/docs/ngrok-agent/changelog). Notably, breaking changes are that the config file format has
changed and support for single hypen arguments was dropped. changed and support for single hypen arguments was dropped.
- `i18n.supportedLocales` is now by default only generated with the default locale set in `i18n.defaultLocale`. - `i18n.supportedLocales` is now by default only generated with the locales set in `i18n.defaultLocale` and `i18n.extraLocaleSettings`.
This got copied over from the minimal profile and reduces the final system size by 200MB. This got partially copied over from the minimal profile and reduces the final system size by up to 200MB.
If you require all locales installed set the option to ``[ "all" ]``. If you require all locales installed set the option to ``[ "all" ]``.
- The `isPowerPC` predicate, found on `platform` attrsets (`hostPlatform`, `buildPlatform`, `targetPlatform`, etc) has been removed in order to reduce confusion. The predicate was was defined such that it matches only the 32-bit big-endian members of the POWER/PowerPC family, despite having a name which would imply a broader set of systems. If you were using this predicate, you can replace `foo.isPowerPC` with `(with foo; isPower && is32bit && isBigEndian)`. - The `isPowerPC` predicate, found on `platform` attrsets (`hostPlatform`, `buildPlatform`, `targetPlatform`, etc) has been removed in order to reduce confusion. The predicate was was defined such that it matches only the 32-bit big-endian members of the POWER/PowerPC family, despite having a name which would imply a broader set of systems. If you were using this predicate, you can replace `foo.isPowerPC` with `(with foo; isPower && is32bit && isBigEndian)`.
- `bsp-layout` no longer uses the command `cycle` to switch to other window layouts, as it got replaced by the commands `previous` and `next`.
- The Barco ClickShare driver/client package `pkgs.clickshare-csc1` and the option `programs.clickshare-csc1.enable` have been removed, - The Barco ClickShare driver/client package `pkgs.clickshare-csc1` and the option `programs.clickshare-csc1.enable` have been removed,
as it requires `qt4`, which reached its end-of-life 2015 and will no longer be supported by nixpkgs. as it requires `qt4`, which reached its end-of-life 2015 and will no longer be supported by nixpkgs.
[According to Barco](https://www.barco.com/de/support/knowledge-base/4380-can-i-use-linux-os-with-clickshare-base-units) many of their base unit models can be used with Google Chrome and the Google Cast extension. [According to Barco](https://www.barco.com/de/support/knowledge-base/4380-can-i-use-linux-os-with-clickshare-base-units) many of their base unit models can be used with Google Chrome and the Google Cast extension.
@ -95,6 +99,11 @@ In addition to numerous new and upgraded packages, this release has the followin
- riak package removed along with `services.riak` module, due to lack of maintainer to update the package. - riak package removed along with `services.riak` module, due to lack of maintainer to update the package.
- The `services.graphite.api` and `services.graphite.beacon` NixOS options, and
the `python3.pkgs.graphite_api`, `python3.pkgs.graphite_beacon` and
`python3.pkgs.influxgraph` packages, have been removed due to lack of upstream
maintenance.
- (Neo)Vim can not be configured with `configure.pathogen` anymore to reduce maintainance burden. - (Neo)Vim can not be configured with `configure.pathogen` anymore to reduce maintainance burden.
Use `configure.packages` instead. Use `configure.packages` instead.
@ -108,10 +117,19 @@ Use `configure.packages` instead.
- A new module was added for the Saleae Logic device family, providing the options `hardware.saleae-logic.enable` and `hardware.saleae-logic.package`. - A new module was added for the Saleae Logic device family, providing the options `hardware.saleae-logic.enable` and `hardware.saleae-logic.package`.
- The Redis module now disables RDB persistence when `services.redis.servers.<name>.save = []` instead of using the Redis default.
- Matrix Synapse now requires entries in the `state_group_edges` table to be unique, in order to prevent accidentally introducing duplicate information (for example, because a database backup was restored multiple times). If your Synapse database already has duplicate rows in this table, this could fail with an error and require manual remediation. - Matrix Synapse now requires entries in the `state_group_edges` table to be unique, in order to prevent accidentally introducing duplicate information (for example, because a database backup was restored multiple times). If your Synapse database already has duplicate rows in this table, this could fail with an error and require manual remediation.
- `zfs` was updated from 2.1.4 to 2.1.5, enabling it to be used with Linux kernel 5.18. - `dockerTools.buildImage` deprecates the misunderstood `contents` parameter, in favor of `copyToRoot`.
Use `copyToRoot = buildEnv { ... };` or similar if you intend to add packages to `/bin`.
- memtest86+ was updated from 5.00-coreboot-002 to 6.00-beta2. It is now the upstream version from https://www.memtest.org/, as coreboot's fork is no longer available. - memtest86+ was updated from 5.00-coreboot-002 to 6.00-beta2. It is now the upstream version from https://www.memtest.org/, as coreboot's fork is no longer available.
- Add udev rules for the Teensy family of microcontrollers.
- There is a new module for the `thunar` program (the Xfce file manager), which depends on the `xfconf` dbus service, and also has a dbus service and a systemd unit. The option `services.xserver.desktopManager.xfce.thunarPlugins` has been renamed to `programs.thunar.plugins`, and in a future release it may be removed.
- There is a new module for the `xfconf` program (the Xfce configuration storage system), which has a dbus service.
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->

View file

@ -128,7 +128,7 @@ let
gptfdisk gptfdisk
nix nix
parted parted
utillinux util-linux
zfs zfs
] ]
); );

View file

@ -22,6 +22,10 @@
, transformOptions ? lib.id # function for additional tranformations of the options , transformOptions ? lib.id # function for additional tranformations of the options
, documentType ? "appendix" # TODO deprecate "appendix" in favor of "none" , documentType ? "appendix" # TODO deprecate "appendix" in favor of "none"
# and/or rename function to moduleOptionDoc for clean slate # and/or rename function to moduleOptionDoc for clean slate
# If you include more than one option list into a document, you need to
# provide different ids.
, variablelistId ? "configuration-variable-list"
, revision ? "" # Specify revision for the options , revision ? "" # Specify revision for the options
# a set of options the docs we are generating will be merged into, as if by recursiveUpdate. # a set of options the docs we are generating will be merged into, as if by recursiveUpdate.
# used to split the options doc build into a static part (nixos/modules) and a dynamic part # used to split the options doc build into a static part (nixos/modules) and a dynamic part
@ -177,6 +181,7 @@ in rec {
${pkgs.libxslt.bin}/bin/xsltproc \ ${pkgs.libxslt.bin}/bin/xsltproc \
--stringparam documentType '${documentType}' \ --stringparam documentType '${documentType}' \
--stringparam revision '${revision}' \ --stringparam revision '${revision}' \
--stringparam variablelistId '${variablelistId}' \
-o intermediate.xml ${./options-to-docbook.xsl} sorted.xml -o intermediate.xml ${./options-to-docbook.xsl} sorted.xml
${pkgs.libxslt.bin}/bin/xsltproc \ ${pkgs.libxslt.bin}/bin/xsltproc \
-o "$out" ${./postprocess-option-descriptions.xsl} intermediate.xml -o "$out" ${./postprocess-option-descriptions.xsl} intermediate.xml

View file

@ -57,19 +57,21 @@ def convertMD(options: Dict[str, Any]) -> str:
try: try:
return super(Renderer, self)._get_method(name) return super(Renderer, self)._get_method(name)
except AttributeError: except AttributeError:
def not_supported(children, **kwargs): def not_supported(*args, **kwargs):
raise NotImplementedError("md node not supported yet", name, children, **kwargs) raise NotImplementedError("md node not supported yet", name, args, **kwargs)
return not_supported return not_supported
def text(self, text): def text(self, text):
return escape(text) return escape(text)
def paragraph(self, text): def paragraph(self, text):
return text + "\n\n" return text + "\n\n"
def newline(self):
return "<literallayout>\n</literallayout>"
def codespan(self, text): def codespan(self, text):
return f"<literal>{text}</literal>" return f"<literal>{escape(text)}</literal>"
def block_code(self, text, info=None): def block_code(self, text, info=None):
info = f" language={quoteattr(info)}" if info is not None else "" info = f" language={quoteattr(info)}" if info is not None else ""
return f"<programlisting{info}>\n{text}</programlisting>" return f"<programlisting{info}>\n{escape(text)}</programlisting>"
def link(self, link, text=None, title=None): def link(self, link, text=None, title=None):
if link[0:1] == '#': if link[0:1] == '#':
attr = "linkend" attr = "linkend"
@ -102,6 +104,8 @@ def convertMD(options: Dict[str, Any]) -> str:
# a single paragraph and the original docbook string is no longer # a single paragraph and the original docbook string is no longer
# available to restore the trailer. # available to restore the trailer.
return f"<{tag}><para>{text.rstrip()}</para></{tag}>" return f"<{tag}><para>{text.rstrip()}</para></{tag}>"
def block_quote(self, text):
return f"<blockquote><para>{text}</para></blockquote>"
def command(self, text): def command(self, text):
return f"<command>{escape(text)}</command>" return f"<command>{escape(text)}</command>"
def option(self, text): def option(self, text):
@ -194,7 +198,7 @@ overrides = pivot(json.load(open(sys.argv[2 + optOffset], 'r')))
for (k, v) in options.items(): for (k, v) in options.items():
# The _module options are not declared in nixos/modules # The _module options are not declared in nixos/modules
if v.value['loc'][0] != "_module": if v.value['loc'][0] != "_module":
v.value['declarations'] = list(map(lambda s: f'nixos/modules/{s}', v.value['declarations'])) v.value['declarations'] = list(map(lambda s: f'nixos/modules/{s}' if isinstance(s, str) else s, v.value['declarations']))
# merge both descriptions # merge both descriptions
for (k, v) in overrides.items(): for (k, v) in overrides.items():

View file

@ -14,6 +14,7 @@
<xsl:param name="revision" /> <xsl:param name="revision" />
<xsl:param name="documentType" /> <xsl:param name="documentType" />
<xsl:param name="program" /> <xsl:param name="program" />
<xsl:param name="variablelistId" />
<xsl:template match="/expr/list"> <xsl:template match="/expr/list">
@ -31,7 +32,8 @@
</xsl:template> </xsl:template>
<xsl:template name="variable-list"> <xsl:template name="variable-list">
<variablelist xml:id="configuration-variable-list"> <variablelist>
<xsl:attribute name="id" namespace="http://www.w3.org/XML/1998/namespace"><xsl:value-of select="$variablelistId"/></xsl:attribute>
<xsl:for-each select="attrs"> <xsl:for-each select="attrs">
<xsl:variable name="id" select=" <xsl:variable name="id" select="
concat('opt-', concat('opt-',

View file

@ -116,7 +116,7 @@ let
gptfdisk gptfdisk
nix nix
parted parted
utillinux util-linux
zfs zfs
] ]
); );

View file

@ -65,7 +65,7 @@ let
${fcBool cfg.hinting.autohint} ${fcBool cfg.hinting.autohint}
</edit> </edit>
<edit mode="append" name="hintstyle"> <edit mode="append" name="hintstyle">
<const>hintslight</const> <const>${cfg.hinting.style}</const>
</edit> </edit>
<edit mode="append" name="antialias"> <edit mode="append" name="antialias">
${fcBool cfg.antialias} ${fcBool cfg.antialias}
@ -226,7 +226,6 @@ in
(mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "useEmbeddedBitmaps" ] [ "fonts" "fontconfig" "useEmbeddedBitmaps" ]) (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "useEmbeddedBitmaps" ] [ "fonts" "fontconfig" "useEmbeddedBitmaps" ])
(mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "forceAutohint" ] [ "fonts" "fontconfig" "forceAutohint" ]) (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "forceAutohint" ] [ "fonts" "fontconfig" "forceAutohint" ])
(mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "renderMonoTTFAsBitmap" ] [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ]) (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "renderMonoTTFAsBitmap" ] [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ])
(mkRemovedOptionModule [ "fonts" "fontconfig" "hinting" "style" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "") (mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "") (mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options") (mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options")
@ -349,6 +348,20 @@ in
fonts, but better than unhinted fonts. fonts, but better than unhinted fonts.
''; '';
}; };
style = mkOption {
type = types.enum [ "hintnone" "hintslight" "hintmedium" "hintfull" ];
default = "hintslight";
description = ''
Hintstyle is the amount of font reshaping done to line up
to the grid.
hintslight will make the font more fuzzy to line up to the grid
but will be better in retaining font shape, while hintfull will
be a crisp font that aligns well to the pixel grid but will lose
a greater amount of font shape.
'';
};
}; };
includeUserConf = mkOption { includeUserConf = mkOption {

View file

@ -53,8 +53,22 @@ with lib;
supportedLocales = mkOption { supportedLocales = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
default = [ (config.i18n.defaultLocale + "/UTF-8") ]; default = builtins.map (l: l + "/UTF-8")
defaultText = literalExpression "[ (config.i18n.defaultLocale + \"/UTF-8\") ]"; (unique (
[
"C.UTF-8"
config.i18n.defaultLocale
] ++ (attrValues (filterAttrs (n: v: n != "LANGUAGE") config.i18n.extraLocaleSettings))
));
defaultText = literalExpression ''
builtins.map (l: l + "/UTF-8")
(unique (
[
"C.UTF-8"
config.i18n.defaultLocale
] ++ (attrValues (filterAttrs (n: v: n != "LANGUAGE") config.i18n.extraLocaleSettings))
))
'';
example = ["en_US.UTF-8/UTF-8" "nl_NL.UTF-8/UTF-8" "nl_NL/ISO-8859-1"]; example = ["en_US.UTF-8/UTF-8" "nl_NL.UTF-8/UTF-8" "nl_NL/ISO-8859-1"];
description = '' description = ''
List of locales that the system should support. The value List of locales that the system should support. The value

View file

@ -8,14 +8,21 @@ let
isQGnome = cfg.platformTheme == "gnome" && builtins.elem cfg.style ["adwaita" "adwaita-dark"]; isQGnome = cfg.platformTheme == "gnome" && builtins.elem cfg.style ["adwaita" "adwaita-dark"];
isQtStyle = cfg.platformTheme == "gtk2" && !(builtins.elem cfg.style ["adwaita" "adwaita-dark"]); isQtStyle = cfg.platformTheme == "gtk2" && !(builtins.elem cfg.style ["adwaita" "adwaita-dark"]);
isQt5ct = cfg.platformTheme == "qt5ct";
isLxqt = cfg.platformTheme == "lxqt";
isKde = cfg.platformTheme == "kde";
packages = if isQGnome then [ pkgs.qgnomeplatform pkgs.adwaita-qt ] packages = if isQGnome then [ pkgs.qgnomeplatform pkgs.adwaita-qt ]
else if isQtStyle then [ pkgs.libsForQt5.qtstyleplugins ] else if isQtStyle then [ pkgs.libsForQt5.qtstyleplugins ]
else if isQt5ct then [ pkgs.libsForQt5.qt5ct ]
else if isLxqt then [ pkgs.lxqt.lxqt-qtplugin pkgs.lxqt.lxqt-config ]
else if isKde then [ pkgs.libsForQt5.plasma-integration pkgs.libsForQt5.systemsettings ]
else throw "`qt5.platformTheme` ${cfg.platformTheme} and `qt5.style` ${cfg.style} are not compatible."; else throw "`qt5.platformTheme` ${cfg.platformTheme} and `qt5.style` ${cfg.style} are not compatible.";
in in
{ {
meta.maintainers = [ maintainers.romildo ];
options = { options = {
qt5 = { qt5 = {
@ -26,11 +33,17 @@ in
type = types.enum [ type = types.enum [
"gtk2" "gtk2"
"gnome" "gnome"
"lxqt"
"qt5ct"
"kde"
]; ];
example = "gnome"; example = "gnome";
relatedPackages = [ relatedPackages = [
"qgnomeplatform" "qgnomeplatform"
["libsForQt5" "qtstyleplugins"] ["libsForQt5" "qtstyleplugins"]
["libsForQt5" "qt5ct"]
["lxqt" "lxqt-qtplugin"]
["libsForQt5" "plasma-integration"]
]; ];
description = '' description = ''
Selects the platform theme to use for Qt5 applications.</para> Selects the platform theme to use for Qt5 applications.</para>
@ -48,6 +61,24 @@ in
<link xlink:href="https://github.com/FedoraQt/QGnomePlatform">qgnomeplatform</link> <link xlink:href="https://github.com/FedoraQt/QGnomePlatform">qgnomeplatform</link>
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term><literal>lxqt</literal></term>
<listitem><para>Use LXQt style set using the
<link xlink:href="https://github.com/lxqt/lxqt-config">lxqt-config-appearance</link>
application.
</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>qt5ct</literal></term>
<listitem><para>Use Qt style set using the
<link xlink:href="https://sourceforge.net/projects/qt5ct/">qt5ct</link>
application.
</para></listitem>
</varlistentry>
<varlistentry>
<term><literal>kde</literal></term>
<listitem><para>Use Qt settings from Plasma.</para></listitem>
</varlistentry>
</variablelist> </variablelist>
''; '';
}; };
@ -96,7 +127,7 @@ in
environment.variables.QT_QPA_PLATFORMTHEME = cfg.platformTheme; environment.variables.QT_QPA_PLATFORMTHEME = cfg.platformTheme;
environment.variables.QT_STYLE_OVERRIDE = cfg.style; environment.variables.QT_STYLE_OVERRIDE = mkIf (! (isQt5ct || isLxqt || isKde)) cfg.style;
environment.systemPackages = packages; environment.systemPackages = packages;

View file

@ -50,7 +50,20 @@ in
default = !(config.environment.etc ? "resolv.conf"); default = !(config.environment.etc ? "resolv.conf");
defaultText = literalExpression ''!(config.environment.etc ? "resolv.conf")''; defaultText = literalExpression ''!(config.environment.etc ? "resolv.conf")'';
description = '' description = ''
DNS configuration is managed by resolvconf. Whether DNS configuration is managed by resolvconf.
'';
};
package = mkOption {
type = types.package;
default = pkgs.openresolv;
defaultText = literalExpression "pkgs.openresolv";
description = ''
The package that provides the system-wide resolvconf command. Defaults to <literal>openresolv</literal>
if this module is enabled. Otherwise, can be used by other modules (for example <option>services.resolved</option>) to
provide a compatibility layer.
This option generally shouldn't be set by the user.
''; '';
}; };
@ -119,10 +132,12 @@ in
exit 1 exit 1
'' ''
else configText; else configText;
environment.systemPackages = [ cfg.package ];
} }
(mkIf cfg.enable { (mkIf cfg.enable {
environment.systemPackages = [ pkgs.openresolv ]; networking.resolvconf.package = pkgs.openresolv;
systemd.services.resolvconf = { systemd.services.resolvconf = {
description = "resolvconf update"; description = "resolvconf update";
@ -134,7 +149,7 @@ in
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
ExecStart = "${pkgs.openresolv}/bin/resolvconf -u"; ExecStart = "${cfg.package}/bin/resolvconf -u";
RemainAfterExit = true; RemainAfterExit = true;
}; };
}; };

View file

@ -1,10 +1,30 @@
{ config, pkgs, lib, ... }: { config, pkgs, lib, ... }:
with lib; let
inherit (lib)
mkEnableOption
mkIf
mkOption
mkRenamedOptionModule
teams
types;
in
{ {
imports = [ imports = [
(mkRenamedOptionModule [ "services" "flatpak" "extraPortals" ] [ "xdg" "portal" "extraPortals" ]) (mkRenamedOptionModule [ "services" "flatpak" "extraPortals" ] [ "xdg" "portal" "extraPortals" ])
({ config, lib, options, ... }:
let
from = [ "xdg" "portal" "gtkUsePortal" ];
fromOpt = lib.getAttrFromPath from options;
in
{
warnings = lib.mkIf config.xdg.portal.gtkUsePortal [
"The option `${lib.showOption from}' defined in ${lib.showFiles fromOpt.files} has been deprecated. Setting the variable globally with `environment.sessionVariables' NixOS option can have unforseen side-effects."
];
}
)
]; ];
meta = { meta = {
@ -32,11 +52,12 @@ with lib;
gtkUsePortal = mkOption { gtkUsePortal = mkOption {
type = types.bool; type = types.bool;
visible = false;
default = false; default = false;
description = '' description = ''
Sets environment variable <literal>GTK_USE_PORTAL</literal> to <literal>1</literal>. Sets environment variable <literal>GTK_USE_PORTAL</literal> to <literal>1</literal>.
This is needed for packages ran outside Flatpak to respect and use XDG Desktop Portals. This will force GTK-based programs ran outside Flatpak to respect and use XDG Desktop Portals
For example, you'd need to set this for non-flatpak Firefox to use native filechoosers. for features like file chooser but it is an unsupported hack that can easily break things.
Defaults to <literal>false</literal> to respect its opt-in nature. Defaults to <literal>false</literal> to respect its opt-in nature.
''; '';
}; };

View file

@ -0,0 +1,49 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.xdg.portal.lxqt;
in
{
meta = {
maintainers = teams.lxqt.members;
};
options.xdg.portal.lxqt = {
enable = mkEnableOption ''
the desktop portal for the LXQt desktop environment.
This will add the <package>lxqt.xdg-desktop-portal-lxqt</package>
package (with the extra Qt styles) into the
<option>xdg.portal.extraPortals</option> option
'';
styles = mkOption {
type = types.listOf types.package;
default = [];
example = literalExpression ''[
pkgs.libsForQt5.qtstyleplugin-kvantum
pkgs.breeze-qt5
pkgs.qtcurve
];
'';
description = ''
Extra Qt styles that will be available to the
<package>lxqt.xdg-desktop-portal-lxqt</package>.
'';
};
};
config = mkIf cfg.enable {
xdg.portal = {
enable = true;
extraPortals = [
(pkgs.lxqt.xdg-desktop-portal-lxqt.override { extraQtStyles = cfg.styles; })
];
};
environment.systemPackages = cfg.styles;
};
}

View file

@ -48,6 +48,6 @@ in
}; };
meta = { meta = {
maintainers = with lib.maintainers; [ kierdavis ]; maintainers = with lib.maintainers; [ superherointj ];
}; };
} }

View file

@ -40,7 +40,7 @@ let
homepage = "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/"; homepage = "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/";
license = licenses.unfreeRedistributable; license = licenses.unfreeRedistributable;
platforms = [ "x86_64-linux" ]; platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ volth ]; maintainers = with maintainers; [ ];
}; };
}; };
in { in {

View file

@ -24,7 +24,7 @@ let
primeEnabled = syncCfg.enable || offloadCfg.enable; primeEnabled = syncCfg.enable || offloadCfg.enable;
nvidiaPersistencedEnabled = cfg.nvidiaPersistenced; nvidiaPersistencedEnabled = cfg.nvidiaPersistenced;
nvidiaSettings = cfg.nvidiaSettings; nvidiaSettings = cfg.nvidiaSettings;
busIDType = types.strMatching "([[:print:]]+\:[0-9]{1,3}\:[0-9]{1,2}\:[0-9])?"; busIDType = types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
in in
{ {

View file

@ -1,160 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
# A dummy /etc/nixos/configuration.nix in the booted CD that
# rebuilds the CD's configuration (and allows the configuration to
# be modified, of course, providing a true live CD). Problem is
# that we don't really know how the CD was built - the Nix
# expression language doesn't allow us to query the expression being
# evaluated. So we'll just hope for the best.
dummyConfiguration = pkgs.writeText "configuration.nix"
''
{ config, pkgs, ... }:
{ # Add your own options below, e.g.:
# services.openssh.enable = true;
nixpkgs.config.platform = pkgs.platforms.fuloong2f_n32;
}
'';
pkgs2storeContents = l : map (x: { object = x; symlink = "none"; }) l;
# A clue for the kernel loading
kernelParams = pkgs.writeText "kernel-params.txt" ''
Kernel Parameters:
init=/boot/init ${toString config.boot.kernelParams}
'';
# System wide nixpkgs config
nixpkgsUserConfig = pkgs.writeText "config.nix" ''
pkgs:
{
platform = pkgs.platforms.fuloong2f_n32;
}
'';
in
{
imports = [ ./system-tarball.nix ];
# Disable some other stuff we don't need.
security.sudo.enable = false;
# Include only the en_US locale. This saves 75 MiB or so compared to
# the full glibcLocales package.
i18n.supportedLocales = ["en_US.UTF-8/UTF-8" "en_US/ISO-8859-1"];
# Include some utilities that are useful for installing or repairing
# the system.
environment.systemPackages =
[ pkgs.w3m # needed for the manual anyway
pkgs.testdisk # useful for repairing boot problems
pkgs.ms-sys # for writing Microsoft boot sectors / MBRs
pkgs.parted
pkgs.ddrescue
pkgs.ccrypt
pkgs.cryptsetup # needed for dm-crypt volumes
# Some networking tools.
pkgs.sshfs-fuse
pkgs.socat
pkgs.screen
pkgs.wpa_supplicant # !!! should use the wpa module
# Hardware-related tools.
pkgs.sdparm
pkgs.hdparm
pkgs.dmraid
# Tools to create / manipulate filesystems.
pkgs.ntfsprogs # for resizing NTFS partitions
pkgs.btrfs-progs
pkgs.jfsutils
# Some compression/archiver tools.
pkgs.unzip
pkgs.zip
pkgs.xz
pkgs.dar # disk archiver
# Some editors.
pkgs.nvi
pkgs.bvi # binary editor
pkgs.joe
];
# The initrd has to contain any module that might be necessary for
# mounting the CD/DVD.
boot.initrd.availableKernelModules =
[ "vfat" "reiserfs" ];
boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10;
boot.kernelParams = [ "console=tty1" ];
boot.postBootCommands =
''
mkdir -p /mnt
cp ${dummyConfiguration} /etc/nixos/configuration.nix
'';
# Some more help text.
services.getty.helpLine =
''
Log in as "root" with an empty password. ${
if config.services.xserver.enable then
"Type `start xserver' to start\nthe graphical user interface."
else ""
}
'';
# Include the firmware for various wireless cards.
networking.enableRalinkFirmware = true;
networking.enableIntel2200BGFirmware = true;
# To speed up further installation of packages, include the complete stdenv
# in the Nix store of the tarball.
tarball.storeContents = pkgs2storeContents [ pkgs.stdenv ]
++ [
{
object = config.system.build.bootStage2;
symlink = "/boot/init";
}
{
object = config.system.build.toplevel;
symlink = "/boot/system";
}
];
tarball.contents = [
{ source = kernelParams;
target = "/kernelparams.txt";
}
{ source = config.boot.kernelPackages.kernel + "/" + config.system.boot.loader.kernelFile;
target = "/boot/" + config.system.boot.loader.kernelFile;
}
{ source = nixpkgsUserConfig;
target = "/root/.nixpkgs/config.nix";
}
];
# Allow sshd to be started manually through "start sshd". It should
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
boot.loader.grub.enable = false;
boot.loader.generationsDir.enable = false;
system.boot.loader.kernelFile = "vmlinux";
nixpkgs.config = {
platform = pkgs.platforms.fuloong2f_n32;
};
}

View file

@ -1,89 +0,0 @@
Let all the files in the system tarball sit in a directory served by NFS (the
NFS root) like this in exportfs:
/home/pcroot 192.168.1.0/24(rw,no_root_squash,no_all_squash)
Run "exportfs -a" after editing /etc/exportfs, for the nfs server to be aware
of the changes.
Use a tftp server serving the root of boot/ (from the system tarball).
In order to have PXE boot, use the boot/dhcpd.conf-example file for your dhcpd
server, as it will point your PXE clients to pxelinux.0 from the tftp server.
Adapt the configuration to your network.
Adapt the pxelinux configuration (boot/pxelinux.cfg/default) to set the path to
your nfrroot. If you use ip=dhcp in the kernel, the nfs server ip will be taken
from dhcp and so you don't have to specify it.
The linux in bzImage includes network drivers for some usual cards.
QEMU Testing
---------------
You can test qemu pxe boot without having a DHCP server adapted, but having
nfsroot, like this:
qemu-system-x86_64 -tftp /home/pcroot/boot -net nic -net user,bootfile=pxelinux.0 -boot n
I don't know how to use NFS through the qemu '-net user' though.
QEMU Testing with NFS root and bridged network
-------------------------------------------------
This allows testing with qemu as any other host in your LAN.
Testing with the real dhcpd server requires setting up a bridge and having a
tap device.
tunctl -t tap0
brctl addbr br0
brctl addif br0 eth0
brctl addif tap0 eth0
ifconfig eth0 0.0.0.0 up
ifconfig tap0 0.0.0.0 up
ifconfig br0 up # With your ip configuration
Then you can run qemu:
qemu-system-x86_64 -boot n -net tap,ifname=tap0,script=no -net nic,model=e1000
Using the system-tarball-pc in a chroot
--------------------------------------------------
Installation:
mkdir nixos-chroot && cd nixos-chroot
tar xf your-system-tarball.tar.xz
mkdir sys dev proc tmp root var run
mount --bind /sys sys
mount --bind /dev dev
mount --bind /proc proc
Activate the system: look for a directory in nix/store similar to:
"/nix/store/y0d1lcj9fppli0hl3x0m0ba5g1ndjv2j-nixos-feb97bx-53f008"
Having found it, activate that nixos system *twice*:
chroot . /nix/store/SOMETHING-nixos-SOMETHING/activate
chroot . /nix/store/SOMETHING-nixos-SOMETHING/activate
This runs a 'hostname' command. Restore your old hostname with:
hostname OLDHOSTNAME
Copy your system resolv.conf to the /etc/resolv.conf inside the chroot:
cp /etc/resolv.conf etc
Then you can get an interactive shell in the nixos chroot. '*' means
to run inside the chroot interactive shell
chroot . /bin/sh
* source /etc/profile
Populate the nix database: that should be done in the init script if you
had booted this nixos. Run:
* `grep local-cmds run/current-system/init`
Then you can proceed normally subscribing to a nixos channel:
nix-channel --add https://nixos.org/channels/nixos-unstable
nix-channel --update
Testing:
nix-env -i hello
which hello
hello

View file

@ -1,163 +0,0 @@
# This module contains the basic configuration for building a NixOS
# tarball, that can directly boot, maybe using PXE or unpacking on a fs.
{ config, lib, pkgs, ... }:
with lib;
let
pkgs2storeContents = l : map (x: { object = x; symlink = "none"; }) l;
# For PXE kernel loading
pxeconfig = pkgs.writeText "pxeconfig-default" ''
default menu.c32
prompt 0
label bootlocal
menu default
localboot 0
timeout 80
TOTALTIMEOUT 9000
label nixos
MENU LABEL ^NixOS using nfsroot
KERNEL bzImage
append ip=dhcp nfsroot=/home/pcroot init=${config.system.build.toplevel}/init rw
# I don't know how to make this boot with nfsroot (using the initrd)
label nixos_initrd
MENU LABEL NixOS booting the poor ^initrd.
KERNEL bzImage
append initrd=initrd ip=dhcp nfsroot=/home/pcroot init=${config.system.build.toplevel}/init rw
label memtest
MENU LABEL ^${pkgs.memtest86.name}
KERNEL memtest
'';
dhcpdExampleConfig = pkgs.writeText "dhcpd.conf-example" ''
# Example configuration for booting PXE.
allow booting;
allow bootp;
# Adapt this to your network configuration.
option domain-name "local";
option subnet-mask 255.255.255.0;
option broadcast-address 192.168.1.255;
option domain-name-servers 192.168.1.1;
option routers 192.168.1.1;
# PXE-specific configuration directives...
# Some BIOS don't accept slashes for paths inside the tftp servers,
# and will report Access Violation if they see slashes.
filename "pxelinux.0";
# For the TFTP and NFS root server. Set the IP of your server.
next-server 192.168.1.34;
subnet 192.168.1.0 netmask 255.255.255.0 {
range 192.168.1.50 192.168.1.55;
}
'';
readme = ./system-tarball-pc-readme.txt;
in
{
imports =
[ ./system-tarball.nix
# Profiles of this basic installation.
../../profiles/all-hardware.nix
../../profiles/base.nix
../../profiles/installation-device.nix
];
# To speed up further installation of packages, include the complete stdenv
# in the Nix store of the tarball.
tarball.storeContents = pkgs2storeContents [ pkgs.stdenv ];
tarball.contents =
[ { source = config.boot.kernelPackages.kernel + "/" + config.system.boot.loader.kernelFile;
target = "/boot/" + config.system.boot.loader.kernelFile;
}
{ source = "${pkgs.syslinux}/share/syslinux/pxelinux.0";
target = "/boot/pxelinux.0";
}
{ source = "${pkgs.syslinux}/share/syslinux/menu.c32";
target = "/boot/menu.c32";
}
{ source = pxeconfig;
target = "/boot/pxelinux.cfg/default";
}
{ source = readme;
target = "/readme.txt";
}
{ source = dhcpdExampleConfig;
target = "/boot/dhcpd.conf-example";
}
{ source = "${pkgs.memtest86}/memtest.bin";
# We can't leave '.bin', because pxelinux interprets this specially,
# and it would not load the image fine.
# http://forum.canardpc.com/threads/46464-0104-when-launched-via-pxe
target = "/boot/memtest";
}
];
# Allow sshd to be started manually through "start sshd". It should
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
# To be able to use the systemTarball to catch troubles.
boot.crashDump = {
enable = true;
kernelPackages = pkgs.linuxKernel.packages.linux_3_4;
};
# No grub for the tarball.
boot.loader.grub.enable = false;
/* fake entry, just to have a happy stage-1. Users
may boot without having stage-1 though */
fileSystems.fake =
{ mountPoint = "/";
device = "/dev/something";
};
nixpkgs.config = {
packageOverrides = p: {
linux_3_4 = p.linux_3_4.override {
extraConfig = ''
# Enable drivers in kernel for most NICs.
E1000 y
# E1000E y
# ATH5K y
8139TOO y
NE2K_PCI y
ATL1 y
ATL1E y
ATL1C y
VORTEX y
VIA_RHINE y
R8169 y
# Enable nfs root boot
UNIX y # http://www.linux-mips.org/archives/linux-mips/2006-11/msg00113.html
IP_PNP y
IP_PNP_DHCP y
FSCACHE y
NFS_FS y
NFS_FSCACHE y
ROOT_NFS y
# Enable devtmpfs
DEVTMPFS y
DEVTMPFS_MOUNT y
'';
};
};
};
}

View file

@ -1,172 +0,0 @@
# This module contains the basic configuration for building a NixOS
# tarball for the sheevaplug.
{ config, lib, pkgs, ... }:
with lib;
let
# A dummy /etc/nixos/configuration.nix in the booted CD that
# rebuilds the CD's configuration (and allows the configuration to
# be modified, of course, providing a true live CD). Problem is
# that we don't really know how the CD was built - the Nix
# expression language doesn't allow us to query the expression being
# evaluated. So we'll just hope for the best.
dummyConfiguration = pkgs.writeText "configuration.nix"
''
{ config, pkgs, ... }:
{
# Add your own options below and run "nixos-rebuild switch".
# E.g.,
# services.openssh.enable = true;
}
'';
pkgs2storeContents = l : map (x: { object = x; symlink = "none"; }) l;
# A clue for the kernel loading
kernelParams = pkgs.writeText "kernel-params.txt" ''
Kernel Parameters:
init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
'';
in
{
imports = [ ./system-tarball.nix ];
# Disable some other stuff we don't need.
security.sudo.enable = false;
# Include only the en_US locale. This saves 75 MiB or so compared to
# the full glibcLocales package.
i18n.supportedLocales = ["en_US.UTF-8/UTF-8" "en_US/ISO-8859-1"];
# Include some utilities that are useful for installing or repairing
# the system.
environment.systemPackages =
[ pkgs.w3m # needed for the manual anyway
pkgs.ddrescue
pkgs.ccrypt
pkgs.cryptsetup # needed for dm-crypt volumes
# Some networking tools.
pkgs.sshfs-fuse
pkgs.socat
pkgs.screen
pkgs.wpa_supplicant # !!! should use the wpa module
# Hardware-related tools.
pkgs.sdparm
pkgs.hdparm
pkgs.dmraid
# Tools to create / manipulate filesystems.
pkgs.btrfs-progs
# Some compression/archiver tools.
pkgs.unzip
pkgs.zip
pkgs.xz
pkgs.dar # disk archiver
# Some editors.
pkgs.nvi
pkgs.bvi # binary editor
pkgs.joe
];
boot.loader.grub.enable = false;
boot.loader.generationsDir.enable = false;
system.boot.loader.kernelFile = "uImage";
boot.initrd.availableKernelModules =
[ "mvsdio" "reiserfs" "ext3" "ums-cypress" "rtc_mv" "ext4" ];
boot.postBootCommands = lib.mkIf (!boot.initrd.systemd.enable)
''
mkdir -p /mnt
cp ${dummyConfiguration} /etc/nixos/configuration.nix
'';
boot.initrd.extraUtilsCommands = lib.mkIf (!boot.initrd.systemd.enable)
''
copy_bin_and_libs ${pkgs.util-linux}/sbin/hwclock
'';
boot.initrd.postDeviceCommands = lib.mkIf (!boot.initrd.systemd.enable)
''
hwclock -s
'';
boot.kernelParams =
[
"selinux=0"
"console=tty1"
# "console=ttyS0,115200n8" # serial console
];
boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_4;
boot.supportedFilesystems = [ "reiserfs" ];
/* fake entry, just to have a happy stage-1. Users
may boot without having stage-1 though */
fileSystems.fake =
{ mountPoint = "/";
device = "/dev/something";
};
services.getty = {
# Some more help text.
helpLine = ''
Log in as "root" with an empty password. ${
if config.services.xserver.enable then
"Type `start xserver' to start\nthe graphical user interface."
else ""
}
'';
};
# Setting vesa, we don't get the nvidia driver, which can't work in arm.
services.xserver.videoDrivers = [ "vesa" ];
documentation.nixos.enable = false;
# Include the firmware for various wireless cards.
networking.enableRalinkFirmware = true;
networking.enableIntel2200BGFirmware = true;
# To speed up further installation of packages, include the complete stdenv
# in the Nix store of the tarball.
tarball.storeContents = pkgs2storeContents [ pkgs.stdenv ];
tarball.contents = [
{ source = kernelParams;
target = "/kernelparams.txt";
}
{ source = config.boot.kernelPackages.kernel + "/" + config.system.boot.loader.kernelFile;
target = "/boot/" + config.system.boot.loader.kernelFile;
}
{ source = pkgs.ubootSheevaplug;
target = "/boot/uboot";
}
];
# Allow sshd to be started manually through "start sshd". It should
# not be started by default on the installation CD because the
# default root password is empty.
services.openssh.enable = true;
systemd.services.openssh.wantedBy = lib.mkOverride 50 [];
# cpufrequtils fails to build on non-pc
powerManagement.enable = false;
nixpkgs.config = {
platform = pkgs.platforms.sheevaplug;
};
}

View file

@ -1,93 +0,0 @@
# This module creates a bootable ISO image containing the given NixOS
# configuration. The derivation for the ISO image will be placed in
# config.system.build.tarball.
{ config, lib, pkgs, ... }:
with lib;
let
versionFile = pkgs.writeText "nixos-label" config.system.nixos.label;
in
{
options = {
tarball.contents = mkOption {
example = literalExpression ''
[ { source = pkgs.memtest86 + "/memtest.bin";
target = "boot/memtest.bin";
}
]
'';
description = ''
This option lists files to be copied to fixed locations in the
generated ISO image.
'';
};
tarball.storeContents = mkOption {
example = literalExpression "[ pkgs.stdenv ]";
description = ''
This option lists additional derivations to be included in the
Nix store in the generated ISO image.
'';
};
};
config = {
# In stage 1 of the boot, mount the CD/DVD as the root FS by label
# so that we don't need to know its device.
fileSystems = { };
# boot.initrd.availableKernelModules = [ "mvsdio" "reiserfs" "ext3" "ext4" ];
# boot.initrd.kernelModules = [ "rtc_mv" ];
# Closures to be copied to the Nix store on the CD, namely the init
# script and the top-level system configuration directory.
tarball.storeContents =
[ { object = config.system.build.toplevel;
symlink = "/run/current-system";
}
];
# Individual files to be included on the CD, outside of the Nix
# store on the CD.
tarball.contents =
[ { source = config.system.build.initialRamdisk + "/" + config.system.boot.loader.initrdFile;
target = "/boot/" + config.system.boot.loader.initrdFile;
}
{ source = versionFile;
target = "/nixos-version.txt";
}
];
# Create the tarball
system.build.tarball = import ../../../lib/make-system-tarball.nix {
inherit (pkgs) stdenv closureInfo pixz;
inherit (config.tarball) contents storeContents;
};
boot.postBootCommands =
''
# After booting, register the contents of the Nix store on the
# CD in the Nix database in the tmpfs.
if [ -f /nix-path-registration ]; then
${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration &&
rm /nix-path-registration
fi
# nixos-rebuild also requires a "system" profile and an
# /etc/NIXOS tag.
touch /etc/NIXOS
${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
'';
};
}

View file

@ -81,7 +81,7 @@ with lib;
# Create the initrd # Create the initrd
system.build.netbootRamdisk = pkgs.makeInitrd { system.build.netbootRamdisk = pkgs.makeInitrdNG {
inherit (config.boot.initrd) compressor; inherit (config.boot.initrd) compressor;
prepend = [ "${config.system.build.initialRamdisk}/initrd" ]; prepend = [ "${config.system.build.initialRamdisk}/initrd" ];

View file

@ -1,14 +1,15 @@
getVersion() { getVersion() {
local dir="$1" local dir="$1"
rev= rev=
if [ -e "$dir/.git" ]; then gitDir="$dir/.git"
if [ -e "$gitDir" ]; then
if [ -z "$(type -P git)" ]; then if [ -z "$(type -P git)" ]; then
echo "warning: Git not found; cannot figure out revision of $dir" >&2 echo "warning: Git not found; cannot figure out revision of $dir" >&2
return return
fi fi
cd "$dir" cd "$dir"
rev=$(git rev-parse --short HEAD) rev=$(git --git-dir="$gitDir" rev-parse --short HEAD)
if git describe --always --dirty | grep -q dirty; then if git --git-dir="$gitDir" describe --always --dirty | grep -q dirty; then
rev+=M rev+=M
fi fi
fi fi

View file

@ -300,6 +300,12 @@ if ($virt eq "oracle") {
push @attrs, "virtualisation.virtualbox.guest.enable = true;" push @attrs, "virtualisation.virtualbox.guest.enable = true;"
} }
# Check if we're a Parallels guest. If so, enable the guest additions.
# It is blocked by https://github.com/systemd/systemd/pull/23859
if ($virt eq "parallels") {
push @attrs, "hardware.parallels.enable = true;";
push @attrs, "nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ \"prl-tools\" ];";
}
# Likewise for QEMU. # Likewise for QEMU.
if ($virt eq "qemu" || $virt eq "kvm" || $virt eq "bochs") { if ($virt eq "qemu" || $virt eq "kvm" || $virt eq "bochs") {

View file

@ -3,7 +3,7 @@
# IMPORTANT! # IMPORTANT!
# We only add static uids and gids for services where it is not feasible # We only add static uids and gids for services where it is not feasible
# to change uids/gids on service start, in example a service with a lot of # to change uids/gids on service start, for example a service with a lot of
# files. Please also check if the service is applicable for systemd's # files. Please also check if the service is applicable for systemd's
# DynamicUser option and does not need a uid/gid allocation at all. # DynamicUser option and does not need a uid/gid allocation at all.
# Systemd can also change ownership of service directories using the # Systemd can also change ownership of service directories using the

View file

@ -10,6 +10,7 @@
./config/xdg/mime.nix ./config/xdg/mime.nix
./config/xdg/portal.nix ./config/xdg/portal.nix
./config/xdg/portals/wlr.nix ./config/xdg/portals/wlr.nix
./config/xdg/portals/lxqt.nix
./config/appstream.nix ./config/appstream.nix
./config/console.nix ./config/console.nix
./config/xdg/sounds.nix ./config/xdg/sounds.nix
@ -217,6 +218,7 @@
./programs/sway.nix ./programs/sway.nix
./programs/system-config-printer.nix ./programs/system-config-printer.nix
./programs/thefuck.nix ./programs/thefuck.nix
./programs/thunar.nix
./programs/tmux.nix ./programs/tmux.nix
./programs/traceroute.nix ./programs/traceroute.nix
./programs/tsm-client.nix ./programs/tsm-client.nix
@ -229,6 +231,7 @@
./programs/weylus.nix ./programs/weylus.nix
./programs/wireshark.nix ./programs/wireshark.nix
./programs/wshowkeys.nix ./programs/wshowkeys.nix
./programs/xfconf.nix
./programs/xfs_quota.nix ./programs/xfs_quota.nix
./programs/xonsh.nix ./programs/xonsh.nix
./programs/xss-lock.nix ./programs/xss-lock.nix
@ -350,6 +353,7 @@
./services/databases/cockroachdb.nix ./services/databases/cockroachdb.nix
./services/databases/couchdb.nix ./services/databases/couchdb.nix
./services/databases/dragonflydb.nix ./services/databases/dragonflydb.nix
./services/databases/dgraph.nix
./services/databases/firebird.nix ./services/databases/firebird.nix
./services/databases/foundationdb.nix ./services/databases/foundationdb.nix
./services/databases/hbase.nix ./services/databases/hbase.nix
@ -612,6 +616,7 @@
./services/misc/plex.nix ./services/misc/plex.nix
./services/misc/plikd.nix ./services/misc/plikd.nix
./services/misc/podgrab.nix ./services/misc/podgrab.nix
./services/misc/polaris.nix
./services/misc/prowlarr.nix ./services/misc/prowlarr.nix
./services/misc/tautulli.nix ./services/misc/tautulli.nix
./services/misc/pinnwand.nix ./services/misc/pinnwand.nix
@ -824,6 +829,7 @@
./services/networking/libreswan.nix ./services/networking/libreswan.nix
./services/networking/lldpd.nix ./services/networking/lldpd.nix
./services/networking/logmein-hamachi.nix ./services/networking/logmein-hamachi.nix
./services/networking/lokinet.nix
./services/networking/lxd-image-server.nix ./services/networking/lxd-image-server.nix
./services/networking/magic-wormhole-mailbox-server.nix ./services/networking/magic-wormhole-mailbox-server.nix
./services/networking/matterbridge.nix ./services/networking/matterbridge.nix
@ -1028,6 +1034,7 @@
./services/torrent/peerflix.nix ./services/torrent/peerflix.nix
./services/torrent/rtorrent.nix ./services/torrent/rtorrent.nix
./services/torrent/transmission.nix ./services/torrent/transmission.nix
./services/tracing/tempo.nix
./services/ttys/getty.nix ./services/ttys/getty.nix
./services/ttys/gpm.nix ./services/ttys/gpm.nix
./services/ttys/kmscon.nix ./services/ttys/kmscon.nix
@ -1043,7 +1050,6 @@
./services/web-apps/code-server.nix ./services/web-apps/code-server.nix
./services/web-apps/baget.nix ./services/web-apps/baget.nix
./services/web-apps/convos.nix ./services/web-apps/convos.nix
./services/web-apps/cryptpad.nix
./services/web-apps/dex.nix ./services/web-apps/dex.nix
./services/web-apps/discourse.nix ./services/web-apps/discourse.nix
./services/web-apps/documize.nix ./services/web-apps/documize.nix
@ -1055,6 +1061,7 @@
./services/web-apps/gerrit.nix ./services/web-apps/gerrit.nix
./services/web-apps/gotify-server.nix ./services/web-apps/gotify-server.nix
./services/web-apps/grocy.nix ./services/web-apps/grocy.nix
./services/web-apps/healthchecks.nix
./services/web-apps/hedgedoc.nix ./services/web-apps/hedgedoc.nix
./services/web-apps/hledger-web.nix ./services/web-apps/hledger-web.nix
./services/web-apps/icingaweb2/icingaweb2.nix ./services/web-apps/icingaweb2/icingaweb2.nix
@ -1078,6 +1085,7 @@
./services/web-apps/nexus.nix ./services/web-apps/nexus.nix
./services/web-apps/nifi.nix ./services/web-apps/nifi.nix
./services/web-apps/node-red.nix ./services/web-apps/node-red.nix
./services/web-apps/phylactery.nix
./services/web-apps/pict-rs.nix ./services/web-apps/pict-rs.nix
./services/web-apps/peertube.nix ./services/web-apps/peertube.nix
./services/web-apps/plantuml-server.nix ./services/web-apps/plantuml-server.nix
@ -1269,7 +1277,6 @@
./virtualisation/parallels-guest.nix ./virtualisation/parallels-guest.nix
./virtualisation/podman/default.nix ./virtualisation/podman/default.nix
./virtualisation/qemu-guest-agent.nix ./virtualisation/qemu-guest-agent.nix
./virtualisation/railcar.nix
./virtualisation/spice-usb-redirection.nix ./virtualisation/spice-usb-redirection.nix
./virtualisation/virtualbox-guest.nix ./virtualisation/virtualbox-guest.nix
./virtualisation/virtualbox-host.nix ./virtualisation/virtualbox-host.nix

View file

@ -1,31 +1,9 @@
{ config, lib, pkgs, ... }: { lib, ... }:
with lib; with lib;
{ {
meta.maintainers = [ maintainers.romildo ]; imports = [
(mkRemovedOptionModule [ "programs" "qt5ct" "enable" ] "Use qt5.platformTheme = \"qt5ct\" instead.")
###### interface ];
options = {
programs.qt5ct = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Whether to enable the Qt5 Configuration Tool (qt5ct), a
program that allows users to configure Qt5 settings (theme,
font, icons, etc.) under desktop environments or window
manager without Qt integration.
Official home page: <link xlink:href="https://sourceforge.net/projects/qt5ct/">https://sourceforge.net/projects/qt5ct/</link>
'';
};
};
};
###### implementation
config = mkIf config.programs.qt5ct.enable {
environment.variables.QT_QPA_PLATFORMTHEME = "qt5ct";
environment.systemPackages = with pkgs; [ libsForQt5.qt5ct ];
};
} }

View file

@ -0,0 +1,45 @@
{ config, lib, pkgs, ... }:
with lib;
let cfg = config.programs.thunar;
in {
meta = {
maintainers = teams.xfce.members;
};
options = {
programs.thunar = {
enable = mkEnableOption "Thunar, the Xfce file manager";
plugins = mkOption {
default = [];
type = types.listOf types.package;
description = "List of thunar plugins to install.";
example = literalExpression "with pkgs.xfce; [ thunar-archive-plugin thunar-volman ]";
};
};
};
config = mkIf cfg.enable (
let package = pkgs.xfce.thunar.override { thunarPlugins = cfg.plugins; };
in {
environment.systemPackages = [
package
];
services.dbus.packages = [
package
];
systemd.packages = [
package
];
programs.xfconf.enable = true;
}
);
}

View file

@ -0,0 +1,27 @@
{ config, lib, pkgs, ... }:
with lib;
let cfg = config.programs.xfconf;
in {
meta = {
maintainers = teams.xfce.members;
};
options = {
programs.xfconf = {
enable = mkEnableOption "Xfconf, the Xfce configuration storage system";
};
};
config = mkIf cfg.enable {
environment.systemPackages = [
pkgs.xfce.xfconf
];
services.dbus.packages = [
pkgs.xfce.xfconf
];
};
}

View file

@ -68,6 +68,7 @@ with lib;
prey-bash-client is deprecated upstream prey-bash-client is deprecated upstream
'') '')
(mkRemovedOptionModule [ "services" "quagga" ] "the corresponding package has been removed from nixpkgs") (mkRemovedOptionModule [ "services" "quagga" ] "the corresponding package has been removed from nixpkgs")
(mkRemovedOptionModule [ "services" "railcar" ] "the corresponding package has been removed from nixpkgs")
(mkRemovedOptionModule [ "services" "seeks" ] "") (mkRemovedOptionModule [ "services" "seeks" ] "")
(mkRemovedOptionModule [ "services" "ssmtp" ] '' (mkRemovedOptionModule [ "services" "ssmtp" ] ''
The ssmtp package and the corresponding module have been removed due to The ssmtp package and the corresponding module have been removed due to
@ -98,6 +99,7 @@ with lib;
(mkRemovedOptionModule [ "services" "virtuoso" ] "The corresponding package was removed from nixpkgs.") (mkRemovedOptionModule [ "services" "virtuoso" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "openfire" ] "The corresponding package was removed from nixpkgs.") (mkRemovedOptionModule [ "services" "openfire" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "riak" ] "The corresponding package was removed from nixpkgs.") (mkRemovedOptionModule [ "services" "riak" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "cryptpad" ] "The corresponding package was removed from nixpkgs.")
# Do NOT add any option renames here, see top of the file # Do NOT add any option renames here, see top of the file
]; ];

View file

@ -48,6 +48,7 @@ in {
# navidrome uses online services to download additional album metadata / covers # navidrome uses online services to download additional album metadata / covers
"${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt" "${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt"
builtins.storeDir builtins.storeDir
"/etc"
] ++ lib.optional (cfg.settings ? MusicFolder) cfg.settings.MusicFolder; ] ++ lib.optional (cfg.settings ? MusicFolder) cfg.settings.MusicFolder;
CapabilityBoundingSet = ""; CapabilityBoundingSet = "";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ]; RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];

View file

@ -17,8 +17,8 @@ let
compressCmd = getAttr cfg.compression { compressCmd = getAttr cfg.compression {
"none" = "cat"; "none" = "cat";
"gzip" = "${pkgs.gzip}/bin/gzip -c"; "gzip" = "${pkgs.gzip}/bin/gzip -c -${toString cfg.compressionLevel}";
"zstd" = "${pkgs.zstd}/bin/zstd -c"; "zstd" = "${pkgs.zstd}/bin/zstd -c -${toString cfg.compressionLevel}";
}; };
mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}"; mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}";
@ -130,16 +130,33 @@ in {
The type of compression to use on the generated database dump. The type of compression to use on the generated database dump.
''; '';
}; };
compressionLevel = mkOption {
type = types.ints.between 1 19;
default = 6;
description = ''
The compression level used when compression is enabled.
gzip accepts levels 1 to 9. zstd accepts levels 1 to 19.
'';
};
}; };
}; };
config = mkMerge [ config = mkMerge [
{ {
assertions = [{ assertions = [
{
assertion = cfg.backupAll -> cfg.databases == []; assertion = cfg.backupAll -> cfg.databases == [];
message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases"; message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases";
}]; }
{
assertion = cfg.compression == "none" ||
(cfg.compression == "gzip" && cfg.compressionLevel >= 1 && cfg.compressionLevel <= 9) ||
(cfg.compression == "zstd" && cfg.compressionLevel >= 1 && cfg.compressionLevel <= 19);
message = "config.services.postgresqlBackup.compressionLevel must be set between 1 and 9 for gzip and 1 and 19 for zstd";
}
];
} }
(mkIf cfg.enable { (mkIf cfg.enable {
systemd.tmpfiles.rules = [ systemd.tmpfiles.rules = [

View file

@ -13,6 +13,13 @@ in
services.zrepl = { services.zrepl = {
enable = mkEnableOption "zrepl"; enable = mkEnableOption "zrepl";
package = mkOption {
type = types.package;
default = pkgs.zrepl;
defaultText = literalExpression "pkgs.zrepl";
description = "Which package to use for zrepl";
};
settings = mkOption { settings = mkOption {
default = { }; default = { };
description = '' description = ''
@ -30,14 +37,14 @@ in
### Implementation ### ### Implementation ###
config = mkIf cfg.enable { config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.zrepl ]; environment.systemPackages = [ cfg.package ];
# zrepl looks for its config in this location by default. This # zrepl looks for its config in this location by default. This
# allows the use of e.g. `zrepl signal wakeup <job>` without having # allows the use of e.g. `zrepl signal wakeup <job>` without having
# to specify the storepath of the config. # to specify the storepath of the config.
environment.etc."zrepl/zrepl.yml".source = configFile; environment.etc."zrepl/zrepl.yml".source = configFile;
systemd.packages = [ pkgs.zrepl ]; systemd.packages = [ cfg.package ];
# Note that pkgs.zrepl copies and adapts the upstream systemd unit, and # Note that pkgs.zrepl copies and adapts the upstream systemd unit, and
# the fields defined here only override certain fields from that unit. # the fields defined here only override certain fields from that unit.

View file

@ -298,6 +298,7 @@ in
environment = env // { environment = env // {
HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init"; HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
}; };
path = [ pkgs.util-linux ];
preStart = '' preStart = ''
mkdir -p ${baseDir} mkdir -p ${baseDir}
chown hydra:hydra ${baseDir} chown hydra:hydra ${baseDir}
@ -318,11 +319,11 @@ in
${optionalString haveLocalDB '' ${optionalString haveLocalDB ''
if ! [ -e ${baseDir}/.db-created ]; then if ! [ -e ${baseDir}/.db-created ]; then
${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -O hydra hydra runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -O hydra hydra
touch ${baseDir}/.db-created touch ${baseDir}/.db-created
fi fi
echo "create extension if not exists pg_trgm" | ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
''} ''}
if [ ! -e ${cfg.gcRootsDir} ]; then if [ ! -e ${cfg.gcRootsDir} ]; then

View file

@ -0,0 +1,148 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.dgraph;
settingsFormat = pkgs.formats.json {};
configFile = settingsFormat.generate "config.json" cfg.settings;
dgraphWithNode = pkgs.runCommand "dgraph" {
nativeBuildInputs = [ pkgs.makeWrapper ];
}
''
mkdir -p $out/bin
makeWrapper ${cfg.package}/bin/dgraph $out/bin/dgraph \
--set PATH '${lib.makeBinPath [ pkgs.nodejs ]}:$PATH' \
'';
securityOptions = {
NoNewPrivileges = true;
AmbientCapabilities = "";
CapabilityBoundingSet = "";
DeviceAllow = "";
LockPersonality = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RemoveIPC = true;
RestrictNamespaces = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallErrorNumber = "EPERM";
SystemCallFilter = [
"@system-service"
"~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
];
};
in
{
options = {
services.dgraph = {
enable = mkEnableOption "Dgraph native GraphQL database with a graph backend";
package = lib.mkPackageOption pkgs "dgraph" { };
settings = mkOption {
type = settingsFormat.type;
default = {};
description = ''
Contents of the dgraph config. For more details see https://dgraph.io/docs/deploy/config
'';
};
alpha = {
host = mkOption {
type = types.str;
default = "localhost";
description = ''
The host which dgraph alpha will be run on.
'';
};
port = mkOption {
type = types.port;
default = 7080;
description = ''
The port which to run dgraph alpha on.
'';
};
};
zero = {
host = mkOption {
type = types.str;
default = "localhost";
description = ''
The host which dgraph zero will be run on.
'';
};
port = mkOption {
type = types.port;
default = 5080;
description = ''
The port which to run dgraph zero on.
'';
};
};
};
};
config = mkIf cfg.enable {
services.dgraph.settings = {
badger.compression = mkDefault "zstd:3";
};
systemd.services.dgraph-zero = {
description = "Dgraph native GraphQL database with a graph backend. Zero controls node clustering";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
StateDirectory = "dgraph-zero";
WorkingDirectory = "/var/lib/dgraph-zero";
DynamicUser = true;
ExecStart = "${cfg.package}/bin/dgraph zero --my ${cfg.zero.host}:${toString cfg.zero.port}";
Restart = "on-failure";
} // securityOptions;
};
systemd.services.dgraph-alpha = {
description = "Dgraph native GraphQL database with a graph backend. Alpha serves data";
after = [ "network.target" "dgraph-zero.service" ];
requires = [ "dgraph-zero.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
StateDirectory = "dgraph-alpha";
WorkingDirectory = "/var/lib/dgraph-alpha";
DynamicUser = true;
ExecStart = "${dgraphWithNode}/bin/dgraph alpha --config ${configFile} --my ${cfg.alpha.host}:${toString cfg.alpha.port} --zero ${cfg.zero.host}:${toString cfg.zero.port}";
ExecStop = ''
${pkgs.curl}/bin/curl --data "mutation { shutdown { response { message code } } }" \
--header 'Content-Type: application/graphql' \
-X POST \
http://localhost:8080/admin
'';
Restart = "on-failure";
} // securityOptions;
};
};
meta.maintainers = with lib.maintainers; [ happysalada ];
}

View file

@ -166,7 +166,11 @@ in {
save = mkOption { save = mkOption {
type = with types; listOf (listOf int); type = with types; listOf (listOf int);
default = [ [900 1] [300 10] [60 10000] ]; default = [ [900 1] [300 10] [60 10000] ];
description = "The schedule in which data is persisted to disk, represented as a list of lists where the first element represent the amount of seconds and the second the number of changes."; description = mdDoc ''
The schedule in which data is persisted to disk, represented as a list of lists where the first element represent the amount of seconds and the second the number of changes.
If set to the empty list (`[]`) then RDB persistence will be disabled (useful if you are using AOF or don't want any persistence).
'';
}; };
slaveOf = mkOption { slaveOf = mkOption {
@ -268,7 +272,11 @@ in {
syslog-enabled = config.syslog; syslog-enabled = config.syslog;
databases = config.databases; databases = config.databases;
maxclients = config.maxclients; maxclients = config.maxclients;
save = map (d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}") config.save; save = if config.save == []
then ''""'' # Disable saving with `save = ""`
else map
(d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}")
config.save;
dbfilename = "dump.rdb"; dbfilename = "dump.rdb";
dir = "/var/lib/${redisName name}"; dir = "/var/lib/${redisName name}";
appendOnly = config.appendOnly; appendOnly = config.appendOnly;

View file

@ -50,6 +50,12 @@ in
default_access.properties["enable-flatpak-portal"] = false default_access.properties["enable-flatpak-portal"] = false
''; '';
}; };
environment.etc."wireplumber/bluetooth.lua.d/80-systemwide.lua" = lib.mkIf config.services.pipewire.systemWide {
text = ''
-- When running system-wide, logind-integration needs to be disabled.
bluez_monitor.properties["with-logind"] = false
'';
};
systemd.packages = [ cfg.package ]; systemd.packages = [ cfg.package ];

View file

@ -143,6 +143,9 @@ in {
language = "python"; language = "python";
logo32 = "''${env.sitePackages}/ipykernel/resources/logo-32x32.png"; logo32 = "''${env.sitePackages}/ipykernel/resources/logo-32x32.png";
logo64 = "''${env.sitePackages}/ipykernel/resources/logo-64x64.png"; logo64 = "''${env.sitePackages}/ipykernel/resources/logo-64x64.png";
extraPaths = {
"cool.txt" = pkgs.writeText "cool" "cool content";
};
}; };
} }
''; '';

View file

@ -56,5 +56,14 @@ with lib;
Path to 64x64 logo png. Path to 64x64 logo png.
''; '';
}; };
extraPaths = mkOption {
type = types.attrsOf types.path;
default = { };
example = literalExpression ''"{ examples = ''${env.sitePack}/IRkernel/kernelspec/kernel.js"; }'';
description = ''
Extra paths to link in kernel directory
'';
};
}; };
} }

View file

@ -389,10 +389,9 @@ in {
"mysql.service" "mysql.service"
"postgresql.service" "postgresql.service"
]; ];
reloadTriggers = [ reloadTriggers = lib.optional (cfg.config != null) configFile
configFile ++ lib.optional (cfg.lovelaceConfig != null) lovelaceConfigFile;
lovelaceConfigFile
];
preStart = let preStart = let
copyConfig = if cfg.configWritable then '' copyConfig = if cfg.configWritable then ''
cp --no-preserve=mode ${configFile} "${cfg.configDir}/configuration.yaml" cp --no-preserve=mode ${configFile} "${cfg.configDir}/configuration.yaml"

View file

@ -167,22 +167,23 @@ let
sed -e "s/\bsu\s.*/su $user $group/" \ sed -e "s/\bsu\s.*/su $user $group/" \
-e "s/\b\(create\s\+[0-9]*\s*\|createolddir\s\+[0-9]*\s\+\).*/\1$user $group/" \ -e "s/\b\(create\s\+[0-9]*\s*\|createolddir\s\+[0-9]*\s\+\).*/\1$user $group/" \
-e "1imissingok" -e "s/\bnomissingok\b//" \ -e "1imissingok" -e "s/\bnomissingok\b//" \
$out > /tmp/logrotate.conf $out > logrotate.conf
# Since this makes for very verbose builds only show real error. # Since this makes for very verbose builds only show real error.
# There is no way to control log level, but logrotate hardcodes # There is no way to control log level, but logrotate hardcodes
# 'error:' at common log level, so we can use grep, taking care # 'error:' at common log level, so we can use grep, taking care
# to keep error codes # to keep error codes
set -o pipefail set -o pipefail
if ! ${pkgs.buildPackages.logrotate}/sbin/logrotate --debug /tmp/logrotate.conf 2>&1 \ if ! ${pkgs.buildPackages.logrotate}/sbin/logrotate -s logrotate.status \
| ( ! grep "error:" ) > /tmp/logrotate-error; then --debug logrotate.conf 2>&1 \
| ( ! grep "error:" ) > logrotate-error; then
echo "Logrotate configuration check failed." echo "Logrotate configuration check failed."
echo "The failing configuration (after adjustments to pass tests in sandbox) was:" echo "The failing configuration (after adjustments to pass tests in sandbox) was:"
printf "%s\n" "-------" printf "%s\n" "-------"
cat /tmp/logrotate.conf cat logrotate.conf
printf "%s\n" "-------" printf "%s\n" "-------"
echo "The error reported by logrotate was as follow:" echo "The error reported by logrotate was as follow:"
printf "%s\n" "-------" printf "%s\n" "-------"
cat /tmp/logrotate-error cat logrotate-error
printf "%s\n" "-------" printf "%s\n" "-------"
echo "You can disable this check with services.logrotate.checkConfig = false," echo "You can disable this check with services.logrotate.checkConfig = false,"
echo "but if you think it should work please report this failure along with" echo "but if you think it should work please report this failure along with"
@ -193,7 +194,7 @@ let
}; };
mailOption = mailOption =
if foldr (n: a: a || n ? mail) false (attrValues cfg.settings) if foldr (n: a: a || (n.mail or false) != false) false (attrValues cfg.settings)
then "--mail=${pkgs.mailutils}/bin/mail" then "--mail=${pkgs.mailutils}/bin/mail"
else ""; else "";
in in

View file

@ -6,7 +6,7 @@ let
cfg = config.services.mailman; cfg = config.services.mailman;
inherit (pkgs.mailmanPackages.buildEnvs { withHyperkitty = cfg.hyperkitty.enable; }) inherit (pkgs.mailmanPackages.buildEnvs { withHyperkitty = cfg.hyperkitty.enable; withLDAP = cfg.ldap.enable; })
mailmanEnv webEnv; mailmanEnv webEnv;
withPostgresql = config.services.postgresql.enable; withPostgresql = config.services.postgresql.enable;
@ -87,6 +87,114 @@ in {
description = "Enable Mailman on this host. Requires an active MTA on the host (e.g. Postfix)."; description = "Enable Mailman on this host. Requires an active MTA on the host (e.g. Postfix).";
}; };
ldap = {
enable = mkEnableOption "LDAP auth";
serverUri = mkOption {
type = types.str;
example = "ldaps://ldap.host";
description = ''
LDAP host to connect against.
'';
};
bindDn = mkOption {
type = types.str;
example = "cn=root,dc=nixos,dc=org";
description = ''
Service account to bind against.
'';
};
bindPasswordFile = mkOption {
type = types.str;
example = "/run/secrets/ldap-bind";
description = ''
Path to the file containing the bind password of the servie account
defined by <xref linkend="opt-services.mailman.ldap.bindDn" />.
'';
};
superUserGroup = mkOption {
type = types.nullOr types.str;
default = null;
example = "cn=admin,ou=groups,dc=nixos,dc=org";
description = ''
Group where a user must be a member of to gain superuser rights.
'';
};
userSearch = {
query = mkOption {
type = types.str;
example = "(&(objectClass=inetOrgPerson)(|(uid=%(user)s)(mail=%(user)s)))";
description = ''
Query to find a user in the LDAP database.
'';
};
ou = mkOption {
type = types.str;
example = "ou=users,dc=nixos,dc=org";
description = ''
Organizational unit to look up a user.
'';
};
};
groupSearch = {
type = mkOption {
type = types.enum [
"posixGroup" "groupOfNames" "memberDNGroup" "nestedMemberDNGroup" "nestedGroupOfNames"
"groupOfUniqueNames" "nestedGroupOfUniqueNames" "activeDirectoryGroup" "nestedActiveDirectoryGroup"
"organizationalRoleGroup" "nestedOrganizationalRoleGroup"
];
default = "posixGroup";
apply = v: "${toUpper (substring 0 1 v)}${substring 1 (stringLength v) v}Type";
description = ''
Type of group to perform a group search against.
'';
};
query = mkOption {
type = types.str;
example = "(objectClass=groupOfNames)";
description = ''
Query to find a group associated to a user in the LDAP database.
'';
};
ou = mkOption {
type = types.str;
example = "ou=groups,dc=nixos,dc=org";
description = ''
Organizational unit to look up a group.
'';
};
};
attrMap = {
username = mkOption {
default = "uid";
type = types.str;
description = ''
LDAP-attribute that corresponds to the <literal>username</literal>-attribute in mailman.
'';
};
firstName = mkOption {
default = "givenName";
type = types.str;
description = ''
LDAP-attribute that corresponds to the <literal>firstName</literal>-attribute in mailman.
'';
};
lastName = mkOption {
default = "sn";
type = types.str;
description = ''
LDAP-attribute that corresponds to the <literal>lastName</literal>-attribute in mailman.
'';
};
email = mkOption {
default = "mail";
type = types.str;
description = ''
LDAP-attribute that corresponds to the <literal>email</literal>-attribute in mailman.
'';
};
};
};
enablePostfix = mkOption { enablePostfix = mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
@ -274,6 +382,34 @@ in {
with open('/var/lib/mailman-web/settings_local.json') as f: with open('/var/lib/mailman-web/settings_local.json') as f:
globals().update(json.load(f)) globals().update(json.load(f))
${optionalString (cfg.ldap.enable) ''
import ldap
from django_auth_ldap.config import LDAPSearch, ${cfg.ldap.groupSearch.type}
AUTH_LDAP_SERVER_URI = "${cfg.ldap.serverUri}"
AUTH_LDAP_BIND_DN = "${cfg.ldap.bindDn}"
with open("${cfg.ldap.bindPasswordFile}") as f:
AUTH_LDAP_BIND_PASSWORD = f.read().rstrip('\n')
AUTH_LDAP_USER_SEARCH = LDAPSearch("${cfg.ldap.userSearch.ou}",
ldap.SCOPE_SUBTREE, "${cfg.ldap.userSearch.query}")
AUTH_LDAP_GROUP_TYPE = ${cfg.ldap.groupSearch.type}()
AUTH_LDAP_GROUP_SEARCH = LDAPSearch("${cfg.ldap.groupSearch.ou}",
ldap.SCOPE_SUBTREE, "${cfg.ldap.groupSearch.query}")
AUTH_LDAP_USER_ATTR_MAP = {
${concatStrings (flip mapAttrsToList cfg.ldap.attrMap (key: value: ''
"${key}": "${value}",
''))}
}
${optionalString (cfg.ldap.superUserGroup != null) ''
AUTH_LDAP_USER_FLAGS_BY_GROUP = {
"is_superuser": "${cfg.ldap.superUserGroup}"
}
''}
AUTHENTICATION_BACKENDS = (
"django_auth_ldap.backend.LDAPBackend",
"django.contrib.auth.backends.ModelBackend"
)
''}
''; '';
services.nginx = mkIf (cfg.serve.enable && cfg.webHosts != []) { services.nginx = mkIf (cfg.serve.enable && cfg.webHosts != []) {

View file

@ -33,21 +33,26 @@
<link xlink:href="https://github.com/matrix-org/synapse#synapse-installation"> <link xlink:href="https://github.com/matrix-org/synapse#synapse-installation">
installation instructions of Synapse </link>. installation instructions of Synapse </link>.
<programlisting> <programlisting>
{ pkgs, lib, ... }: { pkgs, lib, config, ... }:
let let
fqdn = fqdn = "${config.networking.hostName}.${config.networking.domain}";
let clientConfig = {
join = hostName: domain: hostName + lib.optionalString (domain != null) ".${domain}"; "m.homeserver".base_url = "https://${fqdn}";
in join config.networking.hostName config.networking.domain; "m.identity_server" = {};
in {
networking = {
<link linkend="opt-networking.hostName">hostName</link> = "myhostname";
<link linkend="opt-networking.domain">domain</link> = "example.org";
}; };
<link linkend="opt-networking.firewall.allowedTCPPorts">networking.firewall.allowedTCPPorts</link> = [ 80 443 ]; serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443";
mkWellKnown = data: ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON data}';
'';
in {
<xref linkend="opt-networking.hostName" /> = "myhostname";
<xref linkend="opt-networking.domain" /> = "example.org";
<xref linkend="opt-networking.firewall.allowedTCPPorts" /> = [ 80 443 ];
<link linkend="opt-services.postgresql.enable">services.postgresql.enable</link> = true; <xref linkend="opt-services.postgresql.enable" /> = true;
<link linkend="opt-services.postgresql.initialScript">services.postgresql.initialScript</link> = pkgs.writeText "synapse-init.sql" '' <xref linkend="opt-services.postgresql.initialScript" /> = pkgs.writeText "synapse-init.sql" ''
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse'; CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse" CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
TEMPLATE template0 TEMPLATE template0
@ -57,78 +62,41 @@ in {
services.nginx = { services.nginx = {
<link linkend="opt-services.nginx.enable">enable</link> = true; <link linkend="opt-services.nginx.enable">enable</link> = true;
# only recommendedProxySettings and recommendedGzipSettings are strictly required,
# but the rest make sense as well
<link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true; <link linkend="opt-services.nginx.recommendedTlsSettings">recommendedTlsSettings</link> = true;
<link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true; <link linkend="opt-services.nginx.recommendedOptimisation">recommendedOptimisation</link> = true;
<link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true; <link linkend="opt-services.nginx.recommendedGzipSettings">recommendedGzipSettings</link> = true;
<link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true; <link linkend="opt-services.nginx.recommendedProxySettings">recommendedProxySettings</link> = true;
<link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = { <link linkend="opt-services.nginx.virtualHosts">virtualHosts</link> = {
# This host section can be placed on a different host than the rest, "${config.networking.domain}" = { <co xml:id='ex-matrix-synapse-dns' />
# i.e. to delegate from the host being accessible as ${config.networking.domain}
# to another host actually running the Matrix homeserver.
"${config.networking.domain}" = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/server".extraConfig</link> = mkWellKnown serverConfig; <co xml:id='ex-matrix-synapse-well-known-server' />
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/server".extraConfig</link> = <link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/client".extraConfig</link> = mkWellKnown clientConfig; <co xml:id='ex-matrix-synapse-well-known-client' />
let
# use 443 instead of the default 8448 port to unite
# the client-server and server-server port for simplicity
server = { "m.server" = "${fqdn}:443"; };
in ''
add_header Content-Type application/json;
return 200 '${builtins.toJSON server}';
'';
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."= /.well-known/matrix/client".extraConfig</link> =
let
client = {
"m.homeserver" = { "base_url" = "https://${fqdn}"; };
"m.identity_server" = { "base_url" = "https://vector.im"; };
}; };
# ACAO required to allow element-web on any URL to request this json file "${fqdn}" = {
in ''
add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '${builtins.toJSON client}';
'';
};
# Reverse proxy for Matrix client-server and server-server communication
${fqdn} = {
<link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true; <link linkend="opt-services.nginx.virtualHosts._name_.enableACME">enableACME</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true; <link linkend="opt-services.nginx.virtualHosts._name_.forceSSL">forceSSL</link> = true;
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."/".extraConfig</link> = '' <co xml:id='ex-matrix-synapse-rev-default' />
# Or do a redirect instead of the 404, or whatever is appropriate for you.
# But do not put a Matrix Web client here! See the Element web section below.
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.extraConfig">locations."/".extraConfig</link> = ''
return 404; return 404;
''; '';
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">locations."/_matrix".proxyPass</link> = "http://[::1]:8008"; <co xml:id='ex-matrix-synapse-rev-proxy-pass' />
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">locations."/_synapse/client".proxyPass</link> = "http://[::1]:8008"; <co xml:id='ex-matrix-synapse-rev-client' />
};
};
};
# forward all Matrix API calls to the synapse Matrix homeserver
locations."/_matrix" = {
<link linkend="opt-services.nginx.virtualHosts._name_.locations._name_.proxyPass">proxyPass</link> = "http://[::1]:8008"; # without a trailing /
};
};
};
};
services.matrix-synapse = { services.matrix-synapse = {
<link linkend="opt-services.matrix-synapse.enable">enable</link> = true; <link linkend="opt-services.matrix-synapse.enable">enable</link> = true;
<link linkend="opt-services.matrix-synapse.settings.server_name">server_name</link> = config.networking.domain; <link linkend="opt-services.matrix-synapse.settings.server_name">settings.server_name</link> = config.networking.domain;
<link linkend="opt-services.matrix-synapse.settings.listeners">listeners</link> = [ <link linkend="opt-services.matrix-synapse.settings.listeners">settings.listeners</link> = [
{ { <link linkend="opt-services.matrix-synapse.settings.listeners._.port">port</link> = 8008;
<link linkend="opt-services.matrix-synapse.settings.listeners._.port">port</link> = 8008;
<link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_addresses</link> = [ "::1" ]; <link linkend="opt-services.matrix-synapse.settings.listeners._.bind_addresses">bind_addresses</link> = [ "::1" ];
<link linkend="opt-services.matrix-synapse.settings.listeners._.type">type</link> = "http"; <link linkend="opt-services.matrix-synapse.settings.listeners._.type">type</link> = "http";
<link linkend="opt-services.matrix-synapse.settings.listeners._.tls">tls</link> = false; <link linkend="opt-services.matrix-synapse.settings.listeners._.tls">tls</link> = false;
<link linkend="opt-services.matrix-synapse.settings.listeners._.x_forwarded">x_forwarded</link> = true; <link linkend="opt-services.matrix-synapse.settings.listeners._.x_forwarded">x_forwarded</link> = true;
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources">resources</link> = [ { <link linkend="opt-services.matrix-synapse.settings.listeners._.resources">resources</link> = [ {
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.names">names</link> = [ "client" ]; <link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.names">names</link> = [ "client" "federation" ];
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.compress">compress</link> = true; <link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.compress">compress</link> = true;
} {
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.names">names</link> = [ "federation" ];
<link linkend="opt-services.matrix-synapse.settings.listeners._.resources._.compress">compress</link> = false;
} ]; } ];
} }
]; ];
@ -136,7 +104,8 @@ in {
} }
</programlisting> </programlisting>
</para> </para>
<calloutlist>
<callout arearefs='ex-matrix-synapse-dns'>
<para> <para>
If the <code>A</code> and <code>AAAA</code> DNS records on If the <code>A</code> and <code>AAAA</code> DNS records on
<literal>example.org</literal> do not point on the same host as the records <literal>example.org</literal> do not point on the same host as the records
@ -149,7 +118,45 @@ in {
<literal>myotherhost.example.org</literal> by only changing the <literal>myotherhost.example.org</literal> by only changing the
<code>/.well-known</code> redirection target. <code>/.well-known</code> redirection target.
</para> </para>
</callout>
<callout arearefs='ex-matrix-synapse-well-known-server'>
<para>
This section is not needed if the <link linkend="opt-services.matrix-synapse.settings.server_name">server_name</link>
of <package>matrix-synapse</package> is equal to the domain (i.e.
<literal>example.org</literal> from <literal>@foo:example.org</literal>)
and the federation port is 8448.
Further reference can be found in the <link xlink:href="https://matrix-org.github.io/synapse/latest/delegate.html">docs
about delegation</link>.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-well-known-client'>
<para>
This is usually needed for homeserver discovery (from e.g. other Matrix clients).
Further reference can be found in the <link xlink:href="https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient">upstream docs</link>
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-default'>
<para>
It's also possible to do a redirect here or something else, this vhost is not
needed for Matrix. It's recommended though to <emphasis>not put</emphasis> element
here, see also the <link linkend='ex-matrix-synapse-rev-default'>section about Element</link>.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-proxy-pass'>
<para>
Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
<emphasis>must not</emphasis> be used here.
</para>
</callout>
<callout arearefs='ex-matrix-synapse-rev-client'>
<para>
Forward requests for e.g. SSO and password-resets.
</para>
</callout>
</calloutlist>
</section>
<section xml:id="module-services-matrix-register-users">
<title>Registering Matrix users</title>
<para> <para>
If you want to run a server with public registration by anybody, you can If you want to run a server with public registration by anybody, you can
then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.settings.enable_registration</link> = then enable <literal><link linkend="opt-services.matrix-synapse.settings.enable_registration">services.matrix-synapse.settings.enable_registration</link> =
@ -159,7 +166,7 @@ in {
To create a new user or admin, run the following after you have set the secret To create a new user or admin, run the following after you have set the secret
and have rebuilt NixOS: and have rebuilt NixOS:
<screen> <screen>
<prompt>$ </prompt>nix run nixpkgs.matrix-synapse <prompt>$ </prompt>nix-shell -p matrix-synapse
<prompt>$ </prompt>register_new_matrix_user -k <replaceable>your-registration-shared-secret</replaceable> http://localhost:8008 <prompt>$ </prompt>register_new_matrix_user -k <replaceable>your-registration-shared-secret</replaceable> http://localhost:8008
<prompt>New user localpart: </prompt><replaceable>your-username</replaceable> <prompt>New user localpart: </prompt><replaceable>your-username</replaceable>
<prompt>Password:</prompt> <prompt>Password:</prompt>
@ -168,12 +175,51 @@ in {
Success! Success!
</screen> </screen>
In the example, this would create a user with the Matrix Identifier In the example, this would create a user with the Matrix Identifier
<literal>@your-username:example.org</literal>. Note that the registration <literal>@your-username:example.org</literal>.
secret ends up in the nix store and therefore is world-readable by any user <warning>
on your machine, so it makes sense to only temporarily activate the <para>
<link linkend="opt-services.matrix-synapse.settings.registration_shared_secret">registration_shared_secret</link> When using <xref linkend="opt-services.matrix-synapse.settings.registration_shared_secret" />, the secret
option until a better solution for NixOS is in place. will end up in the world-readable store. Instead it's recommended to deploy the secret
in an additional file like this:
<itemizedlist>
<listitem>
<para>
Create a file with the following contents:
<programlisting>registration_shared_secret: your-very-secret-secret</programlisting>
</para> </para>
</listitem>
<listitem>
<para>
Deploy the file with a secret-manager such as <link xlink:href="https://nixops.readthedocs.io/en/latest/overview.html#managing-keys"><option>deployment.keys</option></link>
from <citerefentry><refentrytitle>nixops</refentrytitle><manvolnum>1</manvolnum></citerefentry>
or <link xlink:href="https://github.com/Mic92/sops-nix/">sops-nix</link> to
e.g. <filename>/run/secrets/matrix-shared-secret</filename> and ensure that it's readable
by <package>matrix-synapse</package>.
</para>
</listitem>
<listitem>
<para>
Include the file like this in your configuration:
<programlisting>
{
<xref linkend="opt-services.matrix-synapse.extraConfigFiles" /> = [
"/run/secrets/matrix-shared-secret"
];
}
</programlisting>
</para>
</listitem>
</itemizedlist>
</para>
</warning>
</para>
<note>
<para>
It's also possible to user alternative authentication mechanism such as
<link xlink:href="https://github.com/matrix-org/matrix-synapse-ldap3">LDAP (via <literal>matrix-synapse-ldap3</literal>)</link>
or <link xlink:href="https://matrix-org.github.io/synapse/latest/openid.html">OpenID</link>.
</para>
</note>
</section> </section>
<section xml:id="module-services-matrix-element-web"> <section xml:id="module-services-matrix-element-web">
<title>Element (formerly known as Riot) Web Client</title> <title>Element (formerly known as Riot) Web Client</title>
@ -206,10 +252,7 @@ Success!
<link linkend="opt-services.nginx.virtualHosts._name_.root">root</link> = pkgs.element-web.override { <link linkend="opt-services.nginx.virtualHosts._name_.root">root</link> = pkgs.element-web.override {
conf = { conf = {
default_server_config."m.homeserver" = { default_server_config = clientConfig; # see `clientConfig` from the snippet above.
"base_url" = "https://${fqdn}";
"server_name" = "${fqdn}";
};
}; };
}; };
}; };
@ -217,15 +260,17 @@ Success!
</programlisting> </programlisting>
</para> </para>
<note>
<para> <para>
Note that the Element developers do not recommend running Element and your Matrix The Element developers do not recommend running Element and your Matrix
homeserver on the same fully-qualified domain name for security reasons. In homeserver on the same fully-qualified domain name for security reasons. In
the example, this means that you should not reuse the the example, this means that you should not reuse the
<literal>myhostname.example.org</literal> virtualHost to also serve Element, <literal>myhostname.example.org</literal> virtualHost to also serve Element,
but instead serve it on a different subdomain, like but instead serve it on a different subdomain, like
<literal>element.example.org</literal> in the example. See the <literal>element.example.org</literal> in the example. See the
<link xlink:href="https://github.com/vector-im/riot-web#important-security-note">Element <link xlink:href="https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes">Element
Important Security Notes</link> for more information on this subject. Important Security Notes</link> for more information on this subject.
</para> </para>
</note>
</section> </section>
</chapter> </chapter>

View file

@ -45,6 +45,10 @@ in
# get the command line client on system path to make some use of the service # get the command line client on system path to make some use of the service
environment.systemPackages = [ pkgs.dict ]; environment.systemPackages = [ pkgs.dict ];
environment.etc."dict.conf".text = ''
server localhost
'';
users.users.dictd = users.users.dictd =
{ group = "dictd"; { group = "dictd";
description = "DICT.org dictd server"; description = "DICT.org dictd server";

View file

@ -2,6 +2,7 @@
let let
cfg = config.services.geoipupdate; cfg = config.services.geoipupdate;
inherit (builtins) isAttrs isString isInt isList typeOf hashString;
in in
{ {
imports = [ imports = [
@ -27,11 +28,30 @@ in
}; };
settings = lib.mkOption { settings = lib.mkOption {
example = lib.literalExpression ''
{
AccountID = 200001;
DatabaseDirectory = "/var/lib/GeoIP";
LicenseKey = { _secret = "/run/keys/maxmind_license_key"; };
Proxy = "10.0.0.10:8888";
ProxyUserPassword = { _secret = "/run/keys/proxy_pass"; };
}
'';
description = '' description = ''
<productname>geoipupdate</productname> configuration <productname>geoipupdate</productname> configuration
options. See options. See
<link xlink:href="https://github.com/maxmind/geoipupdate/blob/main/doc/GeoIP.conf.md" /> <link xlink:href="https://github.com/maxmind/geoipupdate/blob/main/doc/GeoIP.conf.md" />
for a full list of available options. for a full list of available options.
Settings containing secret data should be set to an
attribute set containing the attribute
<literal>_secret</literal> - a string pointing to a file
containing the value the option should be set to. See the
example to get a better picture of this: in the resulting
<filename>GeoIP.conf</filename> file, the
<literal>ProxyUserPassword</literal> key will be set to the
contents of the
<filename>/run/keys/proxy_pass</filename> file.
''; '';
type = lib.types.submodule { type = lib.types.submodule {
freeformType = freeformType =
@ -65,11 +85,18 @@ in
}; };
LicenseKey = lib.mkOption { LicenseKey = lib.mkOption {
type = lib.types.path; type = with lib.types; either path (attrsOf path);
description = '' description = ''
A file containing the <productname>MaxMind</productname> A file containing the
license key. <productname>MaxMind</productname> license key.
Always handled as a secret whether the value is
wrapped in a <literal>{ _secret = ...; }</literal>
attrset or not (refer to <xref
linkend="opt-services.geoipupdate.settings" /> for
details).
''; '';
apply = x: if isAttrs x then x else { _secret = x; };
}; };
DatabaseDirectory = lib.mkOption { DatabaseDirectory = lib.mkOption {
@ -102,6 +129,9 @@ in
systemd.services.geoipupdate-create-db-dir = { systemd.services.geoipupdate-create-db-dir = {
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
script = '' script = ''
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
mkdir -p ${cfg.settings.DatabaseDirectory} mkdir -p ${cfg.settings.DatabaseDirectory}
chmod 0755 ${cfg.settings.DatabaseDirectory} chmod 0755 ${cfg.settings.DatabaseDirectory}
''; '';
@ -115,32 +145,41 @@ in
"network-online.target" "network-online.target"
"nss-lookup.target" "nss-lookup.target"
]; ];
path = [ pkgs.replace-secret ];
wants = [ "network-online.target" ]; wants = [ "network-online.target" ];
startAt = cfg.interval; startAt = cfg.interval;
serviceConfig = { serviceConfig = {
ExecStartPre = ExecStartPre =
let let
isSecret = v: isAttrs v && v ? _secret && isString v._secret;
geoipupdateKeyValue = lib.generators.toKeyValue { geoipupdateKeyValue = lib.generators.toKeyValue {
mkKeyValue = lib.flip lib.generators.mkKeyValueDefault " " rec { mkKeyValue = lib.flip lib.generators.mkKeyValueDefault " " rec {
mkValueString = v: with builtins; mkValueString = v:
if isInt v then toString v if isInt v then toString v
else if isString v then v else if isString v then v
else if true == v then "1" else if true == v then "1"
else if false == v then "0" else if false == v then "0"
else if isList v then lib.concatMapStringsSep " " mkValueString v else if isList v then lib.concatMapStringsSep " " mkValueString v
else if isSecret v then hashString "sha256" v._secret
else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}"; else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
}; };
}; };
secretPaths = lib.catAttrs "_secret" (lib.collect isSecret cfg.settings);
mkSecretReplacement = file: ''
replace-secret ${lib.escapeShellArgs [ (hashString "sha256" file) file "/run/geoipupdate/GeoIP.conf" ]}
'';
secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
geoipupdateConf = pkgs.writeText "geoipupdate.conf" (geoipupdateKeyValue cfg.settings); geoipupdateConf = pkgs.writeText "geoipupdate.conf" (geoipupdateKeyValue cfg.settings);
script = '' script = ''
set -o errexit -o pipefail -o nounset -o errtrace
shopt -s inherit_errexit
chown geoip "${cfg.settings.DatabaseDirectory}" chown geoip "${cfg.settings.DatabaseDirectory}"
cp ${geoipupdateConf} /run/geoipupdate/GeoIP.conf cp ${geoipupdateConf} /run/geoipupdate/GeoIP.conf
${pkgs.replace-secret}/bin/replace-secret '${cfg.settings.LicenseKey}' \ ${secretReplacements}
'${cfg.settings.LicenseKey}' \
/run/geoipupdate/GeoIP.conf
''; '';
in in
"+${pkgs.writeShellScript "start-pre-full-privileges" script}"; "+${pkgs.writeShellScript "start-pre-full-privileges" script}";

View file

@ -1063,7 +1063,7 @@ in {
chown ${cfg.user}:${cfg.group} ${cfg.registry.certFile} chown ${cfg.user}:${cfg.group} ${cfg.registry.certFile}
''; '';
serviceConfig = { unitConfig = {
ConditionPathExists = "!${cfg.registry.certFile}"; ConditionPathExists = "!${cfg.registry.certFile}";
}; };
}; };

View file

@ -83,7 +83,7 @@ let
}; };
in in
{ {
meta.maintainers = with maintainers; [ earvstedt Flakebi ]; meta.maintainers = with maintainers; [ erikarvstedt Flakebi ];
imports = [ imports = [
(mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ]) (mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ])

View file

@ -0,0 +1,151 @@
{ config
, pkgs
, lib
, ...}:
with lib;
let
cfg = config.services.polaris;
settingsFormat = pkgs.formats.toml {};
in
{
options = {
services.polaris = {
enable = mkEnableOption "Polaris Music Server";
package = mkPackageOption pkgs "polaris" { };
user = mkOption {
type = types.str;
default = "polaris";
description = "User account under which Polaris runs.";
};
group = mkOption {
type = types.str;
default = "polaris";
description = "Group under which Polaris is run.";
};
extraGroups = mkOption {
type = types.listOf types.str;
default = [];
description = "Polaris' auxiliary groups.";
example = literalExpression ''["media" "music"]'';
};
port = mkOption {
type = types.port;
default = 5050;
description = ''
The port which the Polaris REST api and web UI should listen to.
Note: polaris is hardcoded to listen to the hostname "0.0.0.0".
'';
};
settings = mkOption {
type = settingsFormat.type;
default = {};
description = ''
Contents for the TOML Polaris config, applied each start.
Although poorly documented, an example may be found here:
<link xlink:href="https://github.com/agersant/polaris/blob/374d0ca56fc0a466d797a4b252e2078607476797/test-data/config.toml">test-config.toml</link>
'';
example = literalExpression ''
{
settings.reindex_every_n_seconds = 7*24*60*60; # weekly, default is 1800
settings.album_art_pattern =
"(cover|front|folder)\.(jpeg|jpg|png|bmp|gif)";
mount_dirs = [
{
name = "NAS";
source = "/mnt/nas/music";
}
{
name = "Local";
source = "/home/my_user/Music";
}
];
}
'';
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = ''
Open the configured port in the firewall.
'';
};
};
};
config = mkIf cfg.enable {
systemd.services.polaris = {
description = "Polaris Music Server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = rec {
User = cfg.user;
Group = cfg.group;
DynamicUser = true;
SupplementaryGroups = cfg.extraGroups;
StateDirectory = "polaris";
CacheDirectory = "polaris";
ExecStart = escapeShellArgs ([
"${cfg.package}/bin/polaris"
"--foreground"
"--port" cfg.port
"--database" "/var/lib/${StateDirectory}/db.sqlite"
"--cache" "/var/cache/${CacheDirectory}"
] ++ optionals (cfg.settings != {}) [
"--config" (settingsFormat.generate "polaris-config.toml" cfg.settings)
]);
Restart = "on-failure";
# Security options:
#NoNewPrivileges = true; # implied by DynamicUser
#RemoveIPC = true; # implied by DynamicUser
AmbientCapabilities = "";
CapabilityBoundingSet = "";
DeviceAllow = "";
LockPersonality = true;
#PrivateTmp = true; # implied by DynamicUser
PrivateDevices = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictNamespaces = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictRealtime = true;
#RestrictSUIDSGID = true; # implied by DynamicUser
SystemCallArchitectures = "native";
SystemCallErrorNumber = "EPERM";
SystemCallFilter = [
"@system-service"
"~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
];
};
};
networking.firewall = mkIf cfg.openFirewall {
allowedTCPPorts = [ cfg.port ];
};
};
meta.maintainers = with maintainers; [ pbsds ];
}

View file

@ -11,6 +11,14 @@ in
services.radarr = { services.radarr = {
enable = mkEnableOption "Radarr"; enable = mkEnableOption "Radarr";
package = mkOption {
description = "Radarr package to use";
default = pkgs.radarr;
defaultText = literalExpression "pkgs.radarr";
example = literalExpression "pkgs.radarr";
type = types.package;
};
dataDir = mkOption { dataDir = mkOption {
type = types.str; type = types.str;
default = "/var/lib/radarr/.config/Radarr"; default = "/var/lib/radarr/.config/Radarr";
@ -51,7 +59,7 @@ in
Type = "simple"; Type = "simple";
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
ExecStart = "${pkgs.radarr}/bin/Radarr -nobrowser -data='${cfg.dataDir}'"; ExecStart = "${cfg.package}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
Restart = "on-failure"; Restart = "on-failure";
}; };
}; };

View file

@ -24,16 +24,6 @@ let
+ cfg.web.extraConfig + cfg.web.extraConfig
); );
graphiteApiConfig = pkgs.writeText "graphite-api.yaml" ''
search_index: ${dataDir}/index
${optionalString (config.time.timeZone != null) "time_zone: ${config.time.timeZone}"}
${optionalString (cfg.api.finders != []) "finders:"}
${concatMapStringsSep "\n" (f: " - " + f.moduleName) cfg.api.finders}
${optionalString (cfg.api.functions != []) "functions:"}
${concatMapStringsSep "\n" (f: " - " + f) cfg.api.functions}
${cfg.api.extraConfig}
'';
seyrenConfig = { seyrenConfig = {
SEYREN_URL = cfg.seyren.seyrenUrl; SEYREN_URL = cfg.seyren.seyrenUrl;
MONGO_URL = cfg.seyren.mongoUrl; MONGO_URL = cfg.seyren.mongoUrl;
@ -72,6 +62,8 @@ let
in { in {
imports = [ imports = [
(mkRemovedOptionModule ["services" "graphite" "api"] "")
(mkRemovedOptionModule ["services" "graphite" "beacon"] "")
(mkRemovedOptionModule ["services" "graphite" "pager"] "") (mkRemovedOptionModule ["services" "graphite" "pager"] "")
]; ];
@ -115,88 +107,6 @@ in {
}; };
}; };
api = {
enable = mkOption {
description = ''
Whether to enable graphite api. Graphite api is lightweight alternative
to graphite web, with api and without dashboard. It's advised to use
grafana as alternative dashboard and influxdb as alternative to
graphite carbon.
For more information visit
<link xlink:href="https://graphite-api.readthedocs.org/en/latest/"/>
'';
default = false;
type = types.bool;
};
finders = mkOption {
description = "List of finder plugins to load.";
default = [];
example = literalExpression "[ pkgs.python3Packages.influxgraph ]";
type = types.listOf types.package;
};
functions = mkOption {
description = "List of functions to load.";
default = [
"graphite_api.functions.SeriesFunctions"
"graphite_api.functions.PieFunctions"
];
type = types.listOf types.str;
};
listenAddress = mkOption {
description = "Graphite web service listen address.";
default = "127.0.0.1";
type = types.str;
};
port = mkOption {
description = "Graphite api service port.";
default = 8080;
type = types.int;
};
package = mkOption {
description = "Package to use for graphite api.";
default = pkgs.python3Packages.graphite_api;
defaultText = literalExpression "pkgs.python3Packages.graphite_api";
type = types.package;
};
extraConfig = mkOption {
description = "Extra configuration for graphite api.";
default = ''
whisper:
directories:
- ${dataDir}/whisper
'';
defaultText = literalExpression ''
'''
whisper:
directories:
- ''${config.${opt.dataDir}}/whisper
'''
'';
example = ''
allowed_origins:
- dashboard.example.com
cheat_times: true
influxdb:
host: localhost
port: 8086
user: influxdb
pass: influxdb
db: metrics
cache:
CACHE_TYPE: 'filesystem'
CACHE_DIR: '/tmp/graphite-api-cache'
'';
type = types.lines;
};
};
carbon = { carbon = {
config = mkOption { config = mkOption {
description = "Content of carbon configuration file."; description = "Content of carbon configuration file.";
@ -354,16 +264,6 @@ in {
''; '';
}; };
}; };
beacon = {
enable = mkEnableOption "graphite beacon";
config = mkOption {
description = "Graphite beacon configuration.";
default = {};
type = types.attrs;
};
};
}; };
###### implementation ###### implementation
@ -489,44 +389,6 @@ in {
environment.systemPackages = [ pkgs.python3Packages.graphite-web ]; environment.systemPackages = [ pkgs.python3Packages.graphite-web ];
})) }))
(mkIf cfg.api.enable {
systemd.services.graphiteApi = {
description = "Graphite Api Interface";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
environment = {
PYTHONPATH = let
aenv = pkgs.python3.buildEnv.override {
extraLibs = [ cfg.api.package pkgs.cairo pkgs.python3Packages.cffi ] ++ cfg.api.finders;
};
in "${aenv}/${pkgs.python3.sitePackages}";
GRAPHITE_API_CONFIG = graphiteApiConfig;
LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
};
serviceConfig = {
ExecStart = ''
${pkgs.python3Packages.waitress}/bin/waitress-serve \
--host=${cfg.api.listenAddress} --port=${toString cfg.api.port} \
graphite_api.app:app
'';
User = "graphite";
Group = "graphite";
PermissionsStartOnly = true;
};
preStart = ''
if ! test -e ${dataDir}/db-created; then
mkdir -p ${dataDir}/cache/
chmod 0700 ${dataDir}/cache/
chown graphite:graphite ${cfg.dataDir}
chown -R graphite:graphite ${cfg.dataDir}/cache
touch ${dataDir}/db-created
fi
'';
};
})
(mkIf cfg.seyren.enable { (mkIf cfg.seyren.enable {
systemd.services.seyren = { systemd.services.seyren = {
description = "Graphite Alerting Dashboard"; description = "Graphite Alerting Dashboard";
@ -550,25 +412,9 @@ in {
services.mongodb.enable = mkDefault true; services.mongodb.enable = mkDefault true;
}) })
(mkIf cfg.beacon.enable {
systemd.services.graphite-beacon = {
description = "Grpahite Beacon Alerting Daemon";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
ExecStart = ''
${pkgs.python3Packages.graphite_beacon}/bin/graphite-beacon \
--config=${pkgs.writeText "graphite-beacon.json" (builtins.toJSON cfg.beacon.config)}
'';
User = "graphite";
Group = "graphite";
};
};
})
(mkIf ( (mkIf (
cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay || cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay ||
cfg.web.enable || cfg.api.enable || cfg.web.enable || cfg.seyren.enable
cfg.seyren.enable || cfg.beacon.enable
) { ) {
users.users.graphite = { users.users.graphite = {
uid = config.ids.uids.graphite; uid = config.ids.uids.graphite;

View file

@ -3,7 +3,19 @@
let let
cfg = config.services.parsedmarc; cfg = config.services.parsedmarc;
opt = options.services.parsedmarc; opt = options.services.parsedmarc;
ini = pkgs.formats.ini {}; isSecret = v: isAttrs v && v ? _secret && isString v._secret;
ini = pkgs.formats.ini {
mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" rec {
mkValueString = v:
if isInt v then toString v
else if isString v then v
else if true == v then "True"
else if false == v then "False"
else if isSecret v then hashString "sha256" v._secret
else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
};
};
inherit (builtins) elem isAttrs isString isInt isList typeOf hashString;
in in
{ {
options.services.parsedmarc = { options.services.parsedmarc = {
@ -107,11 +119,35 @@ in
}; };
settings = lib.mkOption { settings = lib.mkOption {
example = lib.literalExpression ''
{
imap = {
host = "imap.example.com";
user = "alice@example.com";
password = { _secret = "/run/keys/imap_password" };
watch = true;
};
splunk_hec = {
url = "https://splunkhec.example.com";
token = { _secret = "/run/keys/splunk_token" };
index = "email";
};
}
'';
description = '' description = ''
Configuration parameters to set in Configuration parameters to set in
<filename>parsedmarc.ini</filename>. For a full list of <filename>parsedmarc.ini</filename>. For a full list of
available parameters, see available parameters, see
<link xlink:href="https://domainaware.github.io/parsedmarc/#configuration-file" />. <link xlink:href="https://domainaware.github.io/parsedmarc/#configuration-file" />.
Settings containing secret data should be set to an attribute
set containing the attribute <literal>_secret</literal> - a
string pointing to a file containing the value the option
should be set to. See the example to get a better picture of
this: in the resulting <filename>parsedmarc.ini</filename>
file, the <literal>splunk_hec.token</literal> key will be set
to the contents of the
<filename>/run/keys/splunk_token</filename> file.
''; '';
type = lib.types.submodule { type = lib.types.submodule {
@ -170,11 +206,18 @@ in
}; };
password = lib.mkOption { password = lib.mkOption {
type = with lib.types; nullOr path; type = with lib.types; nullOr (either path (attrsOf path));
default = null; default = null;
description = '' description = ''
The path to a file containing the IMAP server password. The IMAP server password.
Always handled as a secret whether the value is
wrapped in a <literal>{ _secret = ...; }</literal>
attrset or not (refer to <xref
linkend="opt-services.parsedmarc.settings" /> for
details).
''; '';
apply = x: if isAttrs x || x == null then x else { _secret = x; };
}; };
watch = lib.mkOption { watch = lib.mkOption {
@ -228,11 +271,18 @@ in
}; };
password = lib.mkOption { password = lib.mkOption {
type = with lib.types; nullOr path; type = with lib.types; nullOr (either path (attrsOf path));
default = null; default = null;
description = '' description = ''
The path to a file containing the SMTP server password. The SMTP server password.
Always handled as a secret whether the value is
wrapped in a <literal>{ _secret = ...; }</literal>
attrset or not (refer to <xref
linkend="opt-services.parsedmarc.settings" /> for
details).
''; '';
apply = x: if isAttrs x || x == null then x else { _secret = x; };
}; };
from = lib.mkOption { from = lib.mkOption {
@ -274,12 +324,19 @@ in
}; };
password = lib.mkOption { password = lib.mkOption {
type = with lib.types; nullOr path; type = with lib.types; nullOr (either path (attrsOf path));
default = null; default = null;
description = '' description = ''
The path to a file containing the password to use when The password to use when connecting to Elasticsearch,
connecting to Elasticsearch, if required. if required.
Always handled as a secret whether the value is
wrapped in a <literal>{ _secret = ...; }</literal>
attrset or not (refer to <xref
linkend="opt-services.parsedmarc.settings" /> for
details).
''; '';
apply = x: if isAttrs x || x == null then x else { _secret = x; };
}; };
ssl = lib.mkOption { ssl = lib.mkOption {
@ -299,63 +356,6 @@ in
''; '';
}; };
}; };
kafka = {
hosts = lib.mkOption {
default = [];
type = with lib.types; listOf str;
apply = x: if x == [] then null else lib.concatStringsSep "," x;
description = ''
A list of Apache Kafka hosts to publish parsed reports
to.
'';
};
user = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
description = ''
Username to use when connecting to Kafka, if
required.
'';
};
password = lib.mkOption {
type = with lib.types; nullOr path;
default = null;
description = ''
The path to a file containing the password to use when
connecting to Kafka, if required.
'';
};
ssl = lib.mkOption {
type = with lib.types; nullOr bool;
default = null;
description = ''
Whether to use an encrypted SSL/TLS connection.
'';
};
aggregate_topic = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
example = "aggregate";
description = ''
The Kafka topic to publish aggregate reports on.
'';
};
forensic_topic = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
example = "forensic";
description = ''
The Kafka topic to publish forensic reports on.
'';
};
};
}; };
}; };
@ -404,21 +404,14 @@ in
enable = cfg.provision.grafana.datasource || cfg.provision.grafana.dashboard; enable = cfg.provision.grafana.datasource || cfg.provision.grafana.dashboard;
datasources = datasources =
let let
pkgVer = lib.getVersion config.services.elasticsearch.package; esVersion = lib.getVersion config.services.elasticsearch.package;
esVersion =
if lib.versionOlder pkgVer "7" then
"60"
else if lib.versionOlder pkgVer "8" then
"70"
else
throw "When provisioning parsedmarc grafana datasources: unknown Elasticsearch version.";
in in
lib.mkIf cfg.provision.grafana.datasource [ lib.mkIf cfg.provision.grafana.datasource [
{ {
name = "dmarc-ag"; name = "dmarc-ag";
type = "elasticsearch"; type = "elasticsearch";
access = "proxy"; access = "proxy";
url = "localhost:9200"; url = "http://localhost:9200";
jsonData = { jsonData = {
timeField = "date_range"; timeField = "date_range";
inherit esVersion; inherit esVersion;
@ -428,7 +421,7 @@ in
name = "dmarc-fo"; name = "dmarc-fo";
type = "elasticsearch"; type = "elasticsearch";
access = "proxy"; access = "proxy";
url = "localhost:9200"; url = "http://localhost:9200";
jsonData = { jsonData = {
timeField = "date_range"; timeField = "date_range";
inherit esVersion; inherit esVersion;
@ -467,12 +460,17 @@ in
# lists, empty attrsets and null. This makes it possible to # lists, empty attrsets and null. This makes it possible to
# list interesting options in `settings` without them always # list interesting options in `settings` without them always
# ending up in the resulting config. # ending up in the resulting config.
filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! builtins.elem v [ null [] {} ])) cfg.settings; filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null [] {} ])) cfg.settings;
# Extract secrets (attributes set to an attrset with a
# "_secret" key) from the settings and generate the commands
# to run to perform the secret replacements.
secretPaths = lib.catAttrs "_secret" (lib.collect isSecret filteredConfig);
parsedmarcConfig = ini.generate "parsedmarc.ini" filteredConfig; parsedmarcConfig = ini.generate "parsedmarc.ini" filteredConfig;
mkSecretReplacement = file: mkSecretReplacement = file: ''
lib.optionalString (file != null) '' replace-secret ${lib.escapeShellArgs [ (hashString "sha256" file) file "/run/parsedmarc/parsedmarc.ini" ]}
replace-secret '${file}' '${file}' /run/parsedmarc/parsedmarc.ini
''; '';
secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
in in
{ {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
@ -487,10 +485,7 @@ in
umask u=rwx,g=,o= umask u=rwx,g=,o=
cp ${parsedmarcConfig} /run/parsedmarc/parsedmarc.ini cp ${parsedmarcConfig} /run/parsedmarc/parsedmarc.ini
chown parsedmarc:parsedmarc /run/parsedmarc/parsedmarc.ini chown parsedmarc:parsedmarc /run/parsedmarc/parsedmarc.ini
${mkSecretReplacement cfg.settings.smtp.password} ${secretReplacements}
${mkSecretReplacement cfg.settings.imap.password}
${mkSecretReplacement cfg.settings.elasticsearch.password}
${mkSecretReplacement cfg.settings.kafka.password}
'' + lib.optionalString cfg.provision.localMail.enable '' '' + lib.optionalString cfg.provision.localMail.enable ''
openssl rand -hex 64 >/run/parsedmarc/dmarc_user_passwd openssl rand -hex 64 >/run/parsedmarc/dmarc_user_passwd
replace-secret '@imap-password@' '/run/parsedmarc/dmarc_user_passwd' /run/parsedmarc/parsedmarc.ini replace-secret '@imap-password@' '/run/parsedmarc/dmarc_user_passwd' /run/parsedmarc/parsedmarc.ini

View file

@ -44,6 +44,8 @@ in
]; ];
# The timex collector needs to access clock APIs # The timex collector needs to access clock APIs
ProtectClock = any (collector: collector == "timex") cfg.disabledCollectors; ProtectClock = any (collector: collector == "timex") cfg.disabledCollectors;
# Allow space monitoring under /home
ProtectHome = true;
}; };
}; };
} }

View file

@ -283,7 +283,7 @@ in
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
StateDirectory = ""; StateDirectory = "";
ReadWritePaths = [ "" cfg.dataDir ]; ReadWritePaths = optionals (!cfg.autoMount) [ "" cfg.dataDir ];
} // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; }; } // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; };
} // optionalAttrs (!cfg.startWhenNeeded) { } // optionalAttrs (!cfg.startWhenNeeded) {
wantedBy = [ "default.target" ]; wantedBy = [ "default.target" ];

View file

@ -13,7 +13,7 @@ let
foreground=YES foreground=YES
use=${cfg.use} use=${cfg.use}
login=${cfg.username} login=${cfg.username}
password=${lib.optionalString (cfg.protocol == "nsupdate") "/run/${RuntimeDirectory}/ddclient.key"} password=${if cfg.protocol == "nsupdate" then "/run/${RuntimeDirectory}/ddclient.key" else "@password_placeholder@"}
protocol=${cfg.protocol} protocol=${cfg.protocol}
${lib.optionalString (cfg.script != "") "script=${cfg.script}"} ${lib.optionalString (cfg.script != "") "script=${cfg.script}"}
${lib.optionalString (cfg.server != "") "server=${cfg.server}"} ${lib.optionalString (cfg.server != "") "server=${cfg.server}"}
@ -33,10 +33,9 @@ let
${lib.optionalString (cfg.configFile == null) (if (cfg.protocol == "nsupdate") then '' ${lib.optionalString (cfg.configFile == null) (if (cfg.protocol == "nsupdate") then ''
install ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key install ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
'' else if (cfg.passwordFile != null) then '' '' else if (cfg.passwordFile != null) then ''
password=$(printf "%q" "$(head -n 1 "${cfg.passwordFile}")") "${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "${cfg.passwordFile}" "/run/${RuntimeDirectory}/ddclient.conf"
sed -i "s|^password=$|password=$password|" /run/${RuntimeDirectory}/ddclient.conf
'' else '' '' else ''
sed -i '/^password=$/d' /run/${RuntimeDirectory}/ddclient.conf sed -i '/^password=@password_placeholder@$/d' /run/${RuntimeDirectory}/ddclient.conf
'')} '')}
''; '';

View file

@ -215,7 +215,7 @@ in
# dhcpcd. So do a "systemctl restart" instead. # dhcpcd. So do a "systemctl restart" instead.
stopIfChanged = false; stopIfChanged = false;
path = [ dhcpcd pkgs.nettools pkgs.openresolv ]; path = [ dhcpcd pkgs.nettools config.networking.resolvconf.package ];
unitConfig.ConditionCapability = "CAP_NET_ADMIN"; unitConfig.ConditionCapability = "CAP_NET_ADMIN";

View file

@ -158,6 +158,10 @@ let
(sec "addressbook") (sec "addressbook")
(strOpt "defaulturl" cfg.addressbook.defaulturl) (strOpt "defaulturl" cfg.addressbook.defaulturl)
] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions) ] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
++ [
(sec "meshnets")
(boolOpt "yggdrasil" cfg.yggdrasil.enable)
] ++ (optionalNullString "yggaddress" cfg.yggdrasil.address)
++ (flip map ++ (flip map
(collect (proto: proto ? port && proto ? address) cfg.proto) (collect (proto: proto ? port && proto ? address) cfg.proto)
(proto: let protoOpts = [ (proto: let protoOpts = [
@ -546,6 +550,17 @@ in
''; '';
}; };
yggdrasil.enable = mkEnableOption "Yggdrasil";
yggdrasil.address = mkOption {
type = with types; nullOr str;
default = null;
description = ''
Your local yggdrasil address. Specify it if you want to bind your router to a
particular address.
'';
};
proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // { proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // {
auth = mkEnableOption "Webconsole authentication"; auth = mkEnableOption "Webconsole authentication";

View file

@ -0,0 +1,157 @@
{ config, lib, pkgs, ... }:
let
cfg = config.services.lokinet;
dataDir = "/var/lib/lokinet";
settingsFormat = pkgs.formats.ini { listsAsDuplicateKeys = true; };
configFile = settingsFormat.generate "lokinet.ini" (lib.filterAttrsRecursive (n: v: v != null) cfg.settings);
in with lib; {
options.services.lokinet = {
enable = mkEnableOption "Lokinet daemon";
package = mkOption {
type = types.package;
default = pkgs.lokinet;
defaultText = literalExpression "pkgs.lokinet";
description = "Lokinet package to use.";
};
useLocally = mkOption {
type = types.bool;
default = false;
example = true;
description = "Whether to use Lokinet locally.";
};
settings = mkOption {
type = with types;
submodule {
freeformType = settingsFormat.type;
options = {
dns = {
bind = mkOption {
type = str;
default = "127.3.2.1";
description = "Address to bind to for handling DNS requests.";
};
upstream = mkOption {
type = listOf str;
default = [ "9.9.9.10" ];
example = [ "1.1.1.1" "8.8.8.8" ];
description = ''
Upstream resolver(s) to use as fallback for non-loki addresses.
Multiple values accepted.
'';
};
};
network = {
exit = mkOption {
type = bool;
default = false;
description = ''
Whether to act as an exit node. Beware that this
increases demand on the server and may pose liability concerns.
Enable at your own risk.
'';
};
exit-node = mkOption {
type = nullOr (listOf str);
default = null;
example = ''
exit-node = [ "example.loki" ]; # maps all exit traffic to example.loki
exit-node = [ "example.loki:100.0.0.0/24" ]; # maps 100.0.0.0/24 to example.loki
'';
description = ''
Specify a `.loki` address and an optional ip range to use as an exit broker.
See <link xlink:href="http://probably.loki/wiki/index.php?title=Exit_Nodes"/> for
a list of exit nodes.
'';
};
keyfile = mkOption {
type = nullOr str;
default = null;
example = "snappkey.private";
description = ''
The private key to persist address with. If not specified the address will be ephemeral.
This keyfile is generated automatically if the specified file doesn't exist.
'';
};
};
};
};
default = { };
example = literalExpression ''
{
dns = {
bind = "127.3.2.1";
upstream = [ "1.1.1.1" "8.8.8.8" ];
};
network.exit-node = [ "example.loki" "example2.loki" ];
}
'';
description = ''
Configuration for Lokinet.
Currently, the best way to view the available settings is by
generating a config file using `lokinet -g`.
'';
};
};
config = mkIf cfg.enable {
networking.resolvconf.extraConfig = mkIf cfg.useLocally ''
name_servers="${cfg.settings.dns.bind}"
'';
systemd.services.lokinet = {
description = "Lokinet";
after = [ "network-online.target" "network.target" ];
wants = [ "network-online.target" "network.target" ];
wantedBy = [ "multi-user.target" ];
preStart = ''
ln -sf ${cfg.package}/share/bootstrap.signed ${dataDir}
${pkgs.coreutils}/bin/install -m 600 ${configFile} ${dataDir}/lokinet.ini
${optionalString (cfg.settings.network.keyfile != null) ''
${pkgs.crudini}/bin/crudini --set ${dataDir}/lokinet.ini network keyfile "${dataDir}/${cfg.settings.network.keyfile}"
''}
'';
serviceConfig = {
DynamicUser = true;
StateDirectory = "lokinet";
AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" ];
ExecStart = "${cfg.package}/bin/lokinet ${dataDir}/lokinet.ini";
Restart = "always";
RestartSec = "5s";
# hardening
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateTmp = true;
PrivateMounts = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
ReadWritePaths = "/dev/net/tun";
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
};
};
environment.systemPackages = [ cfg.package ];
};
}

View file

@ -511,8 +511,13 @@ in
dataDir = mkOption { dataDir = mkOption {
type = types.path; type = types.path;
description = "Directory where Prosody stores its data";
default = "/var/lib/prosody"; default = "/var/lib/prosody";
description = ''
The prosody home directory used to store all data. If left as the default value
this directory will automatically be created before the prosody server starts, otherwise
you are responsible for ensuring the directory exists with appropriate ownership
and permissions.
'';
}; };
disco_items = mkOption { disco_items = mkOption {
@ -524,13 +529,29 @@ in
user = mkOption { user = mkOption {
type = types.str; type = types.str;
default = "prosody"; default = "prosody";
description = "User account under which prosody runs."; description = ''
User account under which prosody runs.
<note><para>
If left as the default value this user will automatically be created
on system activation, otherwise you are responsible for
ensuring the user exists before the prosody service starts.
</para></note>
'';
}; };
group = mkOption { group = mkOption {
type = types.str; type = types.str;
default = "prosody"; default = "prosody";
description = "Group account under which prosody runs."; description = ''
Group account under which prosody runs.
<note><para>
If left as the default value this group will automatically be created
on system activation, otherwise you are responsible for
ensuring the group exists before the prosody service starts.
</para></note>
'';
}; };
allowRegistration = mkOption { allowRegistration = mkOption {
@ -839,9 +860,8 @@ in
users.users.prosody = mkIf (cfg.user == "prosody") { users.users.prosody = mkIf (cfg.user == "prosody") {
uid = config.ids.uids.prosody; uid = config.ids.uids.prosody;
description = "Prosody user"; description = "Prosody user";
createHome = true;
inherit (cfg) group; inherit (cfg) group;
home = "${cfg.dataDir}"; home = cfg.dataDir;
}; };
users.groups.prosody = mkIf (cfg.group == "prosody") { users.groups.prosody = mkIf (cfg.group == "prosody") {
@ -854,7 +874,8 @@ in
wants = [ "network-online.target" ]; wants = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."prosody/prosody.cfg.lua".source ]; restartTriggers = [ config.environment.etc."prosody/prosody.cfg.lua".source ];
serviceConfig = { serviceConfig = mkMerge [
{
User = cfg.user; User = cfg.user;
Group = cfg.group; Group = cfg.group;
Type = "forking"; Type = "forking";
@ -875,7 +896,11 @@ in
RestrictNamespaces = true; RestrictNamespaces = true;
RestrictRealtime = true; RestrictRealtime = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
}; }
(mkIf (cfg.dataDir == "/var/lib/prosody") {
StateDirectory = "prosody";
})
];
}; };
}; };

View file

@ -16,9 +16,9 @@ in
###### interface ###### interface
options = { options.services.radvd = {
services.radvd.enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = description =
@ -32,7 +32,16 @@ in
''; '';
}; };
services.radvd.config = mkOption { package = mkOption {
type = types.package;
default = pkgs.radvd;
defaultText = literalExpression "pkgs.radvd";
description = ''
The RADVD package to use for the RADVD service.
'';
};
config = mkOption {
type = types.lines; type = types.lines;
example = example =
'' ''
@ -67,7 +76,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
serviceConfig = serviceConfig =
{ ExecStart = "@${pkgs.radvd}/bin/radvd radvd -n -u radvd -C ${confFile}"; { ExecStart = "@${cfg.package}/bin/radvd radvd -n -u radvd -C ${confFile}";
Restart = "always"; Restart = "always";
}; };
}; };

View file

@ -179,8 +179,8 @@ in {
description = mdDoc '' description = mdDoc ''
Folders which should be shared by Syncthing. Folders which should be shared by Syncthing.
Note that you can still add devices manually, but those changes Note that you can still add folders manually, but those changes
will be reverted on restart if [overrideDevices](#opt-services.syncthing.overrideDevices) will be reverted on restart if [overrideFolders](#opt-services.syncthing.overrideFolders)
is enabled. is enabled.
''; '';
example = literalExpression '' example = literalExpression ''

View file

@ -47,7 +47,7 @@ in {
systemd.services.tailscaled = { systemd.services.tailscaled = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ path = [
pkgs.openresolv # for configuring DNS in some configs config.networking.resolvconf.package # for configuring DNS in some configs
pkgs.procps # for collecting running services (opt-in feature) pkgs.procps # for collecting running services (opt-in feature)
pkgs.glibc # for `getent` to look up user shells pkgs.glibc # for `getent` to look up user shells
]; ];

View file

@ -273,7 +273,7 @@ let
after = [ "network.target" "network-online.target" ]; after = [ "network.target" "network-online.target" ];
wantedBy = optional values.autostart "multi-user.target"; wantedBy = optional values.autostart "multi-user.target";
environment.DEVICE = name; environment.DEVICE = name;
path = [ pkgs.kmod pkgs.wireguard-tools ]; path = [ pkgs.kmod pkgs.wireguard-tools config.networking.resolvconf.package ];
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
@ -332,5 +332,11 @@ in {
# breaks the wg-quick routing because wireguard packets leave with a fwmark from wireguard. # breaks the wg-quick routing because wireguard packets leave with a fwmark from wireguard.
networking.firewall.checkReversePath = false; networking.firewall.checkReversePath = false;
systemd.services = mapAttrs' generateUnit cfg.interfaces; systemd.services = mapAttrs' generateUnit cfg.interfaces;
# Prevent networkd from clearing the rules set by wg-quick when restarted (e.g. when waking up from suspend).
systemd.network.config.networkConfig.ManageForeignRoutingPolicyRules = mkDefault false;
# WireGuard interfaces should be ignored in determining whether the network is online.
systemd.network.wait-online.ignoredInterfaces = builtins.attrNames cfg.interfaces;
}; };
} }

View file

@ -6,7 +6,7 @@ let
cfg = config.services.privacyidea; cfg = config.services.privacyidea;
opt = options.services.privacyidea; opt = options.services.privacyidea;
uwsgi = pkgs.uwsgi.override { plugins = [ "python3" ]; }; uwsgi = pkgs.uwsgi.override { plugins = [ "python3" ]; python3 = pkgs.python39; };
python = uwsgi.python3; python = uwsgi.python3;
penv = python.withPackages (const [ pkgs.privacyidea ]); penv = python.withPackages (const [ pkgs.privacyidea ]);
logCfg = pkgs.writeText "privacyidea-log.cfg" '' logCfg = pkgs.writeText "privacyidea-log.cfg" ''

View file

@ -7,6 +7,8 @@ let
opt = options.services.vault; opt = options.services.vault;
configFile = pkgs.writeText "vault.hcl" '' configFile = pkgs.writeText "vault.hcl" ''
# vault in dev mode will refuse to start if its configuration sets listener
${lib.optionalString (!cfg.dev) ''
listener "tcp" { listener "tcp" {
address = "${cfg.address}" address = "${cfg.address}"
${if (cfg.tlsCertFile == null || cfg.tlsKeyFile == null) then '' ${if (cfg.tlsCertFile == null || cfg.tlsKeyFile == null) then ''
@ -17,6 +19,7 @@ let
''} ''}
${cfg.listenerExtraConfig} ${cfg.listenerExtraConfig}
} }
''}
storage "${cfg.storageBackend}" { storage "${cfg.storageBackend}" {
${optionalString (cfg.storagePath != null) ''path = "${cfg.storagePath}"''} ${optionalString (cfg.storagePath != null) ''path = "${cfg.storagePath}"''}
${optionalString (cfg.storageConfig != null) cfg.storageConfig} ${optionalString (cfg.storageConfig != null) cfg.storageConfig}
@ -30,8 +33,10 @@ let
''; '';
allConfigPaths = [configFile] ++ cfg.extraSettingsPaths; allConfigPaths = [configFile] ++ cfg.extraSettingsPaths;
configOptions = escapeShellArgs
configOptions = escapeShellArgs (concatMap (p: ["-config" p]) allConfigPaths); (lib.optional cfg.dev "-dev" ++
lib.optional (cfg.dev && cfg.devRootTokenID != null) "-dev-root-token-id=${cfg.devRootTokenID}"
++ (concatMap (p: ["-config" p]) allConfigPaths));
in in
@ -47,6 +52,22 @@ in
description = "This option specifies the vault package to use."; description = "This option specifies the vault package to use.";
}; };
dev = mkOption {
type = types.bool;
default = false;
description = ''
In this mode, Vault runs in-memory and starts unsealed. This option is not meant production but for development and testing i.e. for nixos tests.
'';
};
devRootTokenID = mkOption {
type = types.str;
default = false;
description = ''
Initial root token. This only applies when <option>services.vault.dev</option> is true
'';
};
address = mkOption { address = mkOption {
type = types.str; type = types.str;
default = "127.0.0.1:8200"; default = "127.0.0.1:8200";
@ -186,6 +207,9 @@ in
Group = "vault"; Group = "vault";
ExecStart = "${cfg.package}/bin/vault server ${configOptions}"; ExecStart = "${cfg.package}/bin/vault server ${configOptions}";
ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID"; ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
StateDirectory = "vault";
# In `dev` mode vault will put its token here
Environment = lib.optional (cfg.dev) "HOME=/var/lib/vault";
PrivateDevices = true; PrivateDevices = true;
PrivateTmp = true; PrivateTmp = true;
ProtectSystem = "full"; ProtectSystem = "full";

Some files were not shown because too many files have changed in this diff Show more