Project import generated by Copybara.

GitOrigin-RevId: a64e169e396460d6b5763a1de1dd197df8421688
This commit is contained in:
Default email 2023-03-24 01:07:29 +01:00
parent 83405b6dd2
commit 410b979fe2
1450 changed files with 32158 additions and 15248 deletions

View file

@ -238,7 +238,7 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
# VsCode Extensions # VsCode Extensions
/pkgs/applications/editors/vscode @superherointj /pkgs/applications/editors/vscode @superherointj
/pkgs/applications/editors/vscode/extensions @jonringer /pkgs/applications/editors/vscode/extensions @jonringer @superherointj
# Prometheus exporter modules and tests # Prometheus exporter modules and tests
/nixos/modules/services/monitoring/prometheus/exporters.nix @WilliButz /nixos/modules/services/monitoring/prometheus/exporters.nix @WilliButz
@ -310,3 +310,8 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
/pkgs/build-support/node/build-npm-package @winterqt /pkgs/build-support/node/build-npm-package @winterqt
/pkgs/build-support/node/fetch-npm-deps @winterqt /pkgs/build-support/node/fetch-npm-deps @winterqt
/doc/languages-frameworks/javascript.section.md @winterqt /doc/languages-frameworks/javascript.section.md @winterqt
# OCaml
/pkgs/build-support/ocaml @romildo @superherointj @ulrikstrid
/pkgs/development/compilers/ocaml @romildo @superherointj @ulrikstrid
/pkgs/development/ocaml-modules @romildo @superherointj @ulrikstrid

View file

@ -1,7 +1,7 @@
name: "Check that maintainer list is sorted" name: "Check that maintainer list is sorted"
on: on:
pull_request: pull_request_target:
paths: paths:
- 'maintainers/maintainer-list.nix' - 'maintainers/maintainer-list.nix'
permissions: permissions:
@ -13,7 +13,10 @@ jobs:
if: github.repository_owner == 'NixOS' if: github.repository_owner == 'NixOS'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19 with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v20
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true

View file

@ -14,7 +14,7 @@ nixpkgs follows the [official elixir deprecation schedule](https://hexdocs.pm/el
All BEAM-related expressions are available via the top-level `beam` attribute, which includes: All BEAM-related expressions are available via the top-level `beam` attribute, which includes:
- `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlangR22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`). - `interpreters`: a set of compilers running on the BEAM, including multiple Erlang/OTP versions (`beam.interpreters.erlang_22`, etc), Elixir (`beam.interpreters.elixir`) and LFE (Lisp Flavoured Erlang) (`beam.interpreters.lfe`).
- `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlang22`. - `packages`: a set of package builders (Mix and rebar3), each compiled with a specific Erlang/OTP version, e.g. `beam.packages.erlang22`.
@ -22,7 +22,7 @@ The default Erlang compiler, defined by `beam.interpreters.erlang`, is aliased a
To create a package builder built with a custom Erlang version, use the lambda, `beam.packagesWith`, which accepts an Erlang/OTP derivation and produces a package builder similar to `beam.packages.erlang`. To create a package builder built with a custom Erlang version, use the lambda, `beam.packagesWith`, which accepts an Erlang/OTP derivation and produces a package builder similar to `beam.packages.erlang`.
Many Erlang/OTP distributions available in `beam.interpreters` have versions with ODBC and/or Java enabled or without wx (no observer support). For example, there's `beam.interpreters.erlangR22_odbc_javac`, which corresponds to `beam.interpreters.erlangR22` and `beam.interpreters.erlangR22_nox`, which corresponds to `beam.interpreters.erlangR22`. Many Erlang/OTP distributions available in `beam.interpreters` have versions with ODBC and/or Java enabled or without wx (no observer support). For example, there's `beam.interpreters.erlang_22_odbc_javac`, which corresponds to `beam.interpreters.erlang_22` and `beam.interpreters.erlang_22_nox`, which corresponds to `beam.interpreters.erlang_22`.
## Build Tools {#build-tools} ## Build Tools {#build-tools}
@ -128,7 +128,7 @@ You will need to run the build process once to fix the hash to correspond to you
###### FOD {#fixed-output-derivation} ###### FOD {#fixed-output-derivation}
A fixed output derivation will download mix dependencies from the internet. To ensure reproducibility, a hash will be supplied. Note that mix is relatively reproducible. An FOD generating a different hash on each run hasn't been observed (as opposed to npm where the chances are relatively high). See [elixir_ls](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/beam-modules/elixir-ls/default.nix) for a usage example of FOD. A fixed output derivation will download mix dependencies from the internet. To ensure reproducibility, a hash will be supplied. Note that mix is relatively reproducible. An FOD generating a different hash on each run hasn't been observed (as opposed to npm where the chances are relatively high). See [elixir-ls](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/beam-modules/elixir-ls/default.nix) for a usage example of FOD.
Practical steps Practical steps
@ -154,7 +154,7 @@ Here is how your `default.nix` file would look for a phoenix project.
with import <nixpkgs> { }; with import <nixpkgs> { };
let let
# beam.interpreters.erlangR23 is available if you need a particular version # beam.interpreters.erlang_23 is available if you need a particular version
packages = beam.packagesWith beam.interpreters.erlang; packages = beam.packagesWith beam.interpreters.erlang;
pname = "your_project"; pname = "your_project";
@ -274,7 +274,7 @@ Usually, we need to create a `shell.nix` file and do our development inside of t
with pkgs; with pkgs;
let let
elixir = beam.packages.erlangR24.elixir_1_12; elixir = beam.packages.erlang_24.elixir_1_12;
in in
mkShell { mkShell {
buildInputs = [ elixir ]; buildInputs = [ elixir ];

View file

@ -88,7 +88,7 @@ To package Dotnet applications, you can use `buildDotnetModule`. This has simila
* `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies. * `runtimeDeps` is used to wrap libraries into `LD_LIBRARY_PATH`. This is how dotnet usually handles runtime dependencies.
* `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`. * `buildType` is used to change the type of build. Possible values are `Release`, `Debug`, etc. By default, this is set to `Release`.
* `selfContainedBuild` allows to enable the [self-contained](https://docs.microsoft.com/en-us/dotnet/core/deploying/#publish-self-contained) build flag. By default, it is set to false and generated applications have a dependency on the selected dotnet runtime. If enabled, the dotnet runtime is bundled into the executable and the built app has no dependency on Dotnet. * `selfContainedBuild` allows to enable the [self-contained](https://docs.microsoft.com/en-us/dotnet/core/deploying/#publish-self-contained) build flag. By default, it is set to false and generated applications have a dependency on the selected dotnet runtime. If enabled, the dotnet runtime is bundled into the executable and the built app has no dependency on Dotnet.
* `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used. * `dotnet-sdk` is useful in cases where you need to change what dotnet SDK is being used. You can also set this to the result of `dotnetSdkPackages.combinePackages`, if the project uses multiple SDKs to build.
* `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore. * `dotnet-runtime` is useful in cases where you need to change what dotnet runtime is being used. This can be either a regular dotnet runtime, or an aspnetcore.
* `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute. * `dotnet-test-sdk` is useful in cases where unit tests expect a different dotnet SDK. By default, this is set to the `dotnet-sdk` attribute.
* `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this. * `testProjectFile` is useful in cases where the regular project file does not contain the unit tests. It gets restored and build, but not installed. You may need to regenerate your nuget lockfile after setting this.

View file

@ -411,13 +411,13 @@ rustPlatform.buildRustPackage rec {
} }
``` ```
## Compiling non-Rust packages that include Rust code {#compiling-non-rust-packages-that-include-rust-code} ### Compiling non-Rust packages that include Rust code {#compiling-non-rust-packages-that-include-rust-code}
Several non-Rust packages incorporate Rust code for performance- or Several non-Rust packages incorporate Rust code for performance- or
security-sensitive parts. `rustPlatform` exposes several functions and security-sensitive parts. `rustPlatform` exposes several functions and
hooks that can be used to integrate Cargo in non-Rust packages. hooks that can be used to integrate Cargo in non-Rust packages.
### Vendoring of dependencies {#vendoring-of-dependencies} #### Vendoring of dependencies {#vendoring-of-dependencies}
Since network access is not allowed in sandboxed builds, Rust crate Since network access is not allowed in sandboxed builds, Rust crate
dependencies need to be retrieved using a fetcher. `rustPlatform` dependencies need to be retrieved using a fetcher. `rustPlatform`
@ -477,7 +477,7 @@ added. To find the correct hash, you can first use `lib.fakeSha256` or
`lib.fakeHash` as a stub hash. Building `cargoDeps` will then inform `lib.fakeHash` as a stub hash. Building `cargoDeps` will then inform
you of the correct hash. you of the correct hash.
### Hooks {#hooks} #### Hooks {#hooks}
`rustPlatform` provides the following hooks to automate Cargo builds: `rustPlatform` provides the following hooks to automate Cargo builds:
@ -513,7 +513,7 @@ you of the correct hash.
* `bindgenHook`: for crates which use `bindgen` as a build dependency, lets * `bindgenHook`: for crates which use `bindgen` as a build dependency, lets
`bindgen` find `libclang` and `libclang` find the libraries in `buildInputs`. `bindgen` find `libclang` and `libclang` find the libraries in `buildInputs`.
### Examples {#examples} #### Examples {#examples}
#### Python package using `setuptools-rust` {#python-package-using-setuptools-rust} #### Python package using `setuptools-rust` {#python-package-using-setuptools-rust}
@ -642,7 +642,127 @@ buildPythonPackage rec {
} }
``` ```
## Setting Up `nix-shell` {#setting-up-nix-shell} ## `buildRustCrate`: Compiling Rust crates using Nix instead of Cargo {#compiling-rust-crates-using-nix-instead-of-cargo}
### Simple operation {#simple-operation}
When run, `cargo build` produces a file called `Cargo.lock`,
containing pinned versions of all dependencies. Nixpkgs contains a
tool called `crate2Nix` (`nix-shell -p crate2nix`), which can be
used to turn a `Cargo.lock` into a Nix expression. That Nix
expression calls `rustc` directly (hence bypassing Cargo), and can
be used to compile a crate and all its dependencies.
See [`crate2nix`'s documentation](https://github.com/kolloch/crate2nix#known-restrictions)
for instructions on how to use it.
### Handling external dependencies {#handling-external-dependencies}
Some crates require external libraries. For crates from
[crates.io](https://crates.io), such libraries can be specified in
`defaultCrateOverrides` package in nixpkgs itself.
Starting from that file, one can add more overrides, to add features
or build inputs by overriding the hello crate in a separate file.
```nix
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: { buildInputs = [ openssl ]; };
};
}
```
Here, `crateOverrides` is expected to be a attribute set, where the
key is the crate name without version number and the value a function.
The function gets all attributes passed to `buildRustCrate` as first
argument and returns a set that contains all attribute that should be
overwritten.
For more complicated cases, such as when parts of the crate's
derivation depend on the crate's version, the `attrs` argument of
the override above can be read, as in the following example, which
patches the derivation:
```nix
with import <nixpkgs> {};
((import ./hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
hello = attrs: lib.optionalAttrs (lib.versionAtLeast attrs.version "1.0") {
postPatch = ''
substituteInPlace lib/zoneinfo.rs \
--replace "/usr/share/zoneinfo" "${tzdata}/share/zoneinfo"
'';
};
};
}
```
Another situation is when we want to override a nested
dependency. This actually works in the exact same way, since the
`crateOverrides` parameter is forwarded to the crate's
dependencies. For instance, to override the build inputs for crate
`libc` in the example above, where `libc` is a dependency of the main
crate, we could do:
```nix
with import <nixpkgs> {};
((import hello.nix).hello {}).override {
crateOverrides = defaultCrateOverrides // {
libc = attrs: { buildInputs = []; };
};
}
```
### Options and phases configuration {#options-and-phases-configuration}
Actually, the overrides introduced in the previous section are more
general. A number of other parameters can be overridden:
- The version of `rustc` used to compile the crate:
```nix
(hello {}).override { rust = pkgs.rust; };
```
- Whether to build in release mode or debug mode (release mode by
default):
```nix
(hello {}).override { release = false; };
```
- Whether to print the commands sent to `rustc` when building
(equivalent to `--verbose` in cargo:
```nix
(hello {}).override { verbose = false; };
```
- Extra arguments to be passed to `rustc`:
```nix
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
```
- Phases, just like in any other derivation, can be specified using
the following attributes: `preUnpack`, `postUnpack`, `prePatch`,
`patches`, `postPatch`, `preConfigure` (in the case of a Rust crate,
this is run before calling the "build" script), `postConfigure`
(after the "build" script),`preBuild`, `postBuild`, `preInstall` and
`postInstall`. As an example, here is how to create a new module
before running the build script:
```nix
(hello {}).override {
preConfigure = ''
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
'';
};
```
### Setting Up `nix-shell` {#setting-up-nix-shell}
Oftentimes you want to develop code from within `nix-shell`. Unfortunately Oftentimes you want to develop code from within `nix-shell`. Unfortunately
`buildRustCrate` does not support common `nix-shell` operations directly `buildRustCrate` does not support common `nix-shell` operations directly

View file

@ -101,11 +101,11 @@ To build a `stdenv` package in a [`nix-shell`](https://nixos.org/manual/nix/unst
```bash ```bash
nix-shell '<nixpkgs>' -A some_package nix-shell '<nixpkgs>' -A some_package
eval ${unpackPhase:-unpackPhase} eval "${unpackPhase:-unpackPhase}"
cd $sourceRoot cd $sourceRoot
eval ${patchPhase:-patchPhase} eval "${patchPhase:-patchPhase}"
eval ${configurePhase:-configurePhase} eval "${configurePhase:-configurePhase}"
eval ${buildPhase:-buildPhase} eval "${buildPhase:-buildPhase}"
``` ```
To modify a [phase](#sec-stdenv-phases), first print it with To modify a [phase](#sec-stdenv-phases), first print it with
@ -380,39 +380,107 @@ Values inside it are not passed to the builder, so you can change them without t
#### `passthru.updateScript` {#var-passthru-updateScript} #### `passthru.updateScript` {#var-passthru-updateScript}
A script to be run by `maintainers/scripts/update.nix` when the package is matched. It needs to be an executable file, either on the file system: A script to be run by `maintainers/scripts/update.nix` when the package is matched. The attribute can contain one of the following:
```nix - []{#var-passthru-updateScript-command} an executable file, either on the file system:
passthru.updateScript = ./update.sh;
```
or inside the expression itself: ```nix
passthru.updateScript = ./update.sh;
```
```nix or inside the expression itself:
passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts
set -eu -o pipefail ```nix
passthru.updateScript = writeScript "update-zoom-us" ''
#!/usr/bin/env nix-shell
#!nix-shell -i bash -p curl pcre common-updater-scripts
version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')" set -eu -o pipefail
update-source-version zoom-us "$version"
'';
```
The attribute can also contain a list, a script followed by arguments to be passed to it: version="$(curl -sI https://zoom.us/client/latest/zoom_x86_64.tar.xz | grep -Fi 'Location:' | pcregrep -o1 '/(([0-9]\.?)+)/')"
update-source-version zoom-us "$version"
'';
```
```nix - a list, a script followed by arguments to be passed to it:
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
```
The script will be run with the `UPDATE_NIX_NAME`, `UPDATE_NIX_PNAME`, `UPDATE_NIX_OLD_VERSION` and `UPDATE_NIX_ATTR_PATH` environment variables set respectively to the name, pname, old version and attribute path of the package it is supposed to update. ```nix
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
```
- an attribute set containing:
- [`command`]{#var-passthru-updateScript-set-command} a string or list in the [format expected by `passthru.updateScript`](#var-passthru-updateScript-command).
- [`attrPath`]{#var-passthru-updateScript-set-attrPath} (optional) a string containing the canonical attribute path for the package. If present, it will be passed to the update script instead of the attribute path on which the package was discovered during Nixpkgs traversal.
- [`supportedFeatures`]{#var-passthru-updateScript-set-supportedFeatures} (optional) a list of the [extra features](#var-passthru-updateScript-supported-features) the script supports.
```nix
passthru.updateScript = {
command = [ ../../update.sh pname ];
attrPath = pname;
supportedFeatures = [ … ];
};
```
##### How update scripts are executed? {#var-passthru-updateScript-execution}
Update scripts are to be invoked by `maintainers/scripts/update.nix` script. You can run `nix-shell maintainers/scripts/update.nix` in the root of Nixpkgs repository for information on how to use it. `update.nix` offers several modes for selecting packages to update (e.g. select by attribute path, traverse Nixpkgs and filter by maintainer, etc.), and it will execute update scripts for all matched packages that have an `updateScript` attribute.
Each update script will be passed the following environment variables:
- [`UPDATE_NIX_NAME`]{#var-passthru-updateScript-env-UPDATE_NIX_NAME} content of the `name` attribute of the updated package.
- [`UPDATE_NIX_PNAME`]{#var-passthru-updateScript-env-UPDATE_NIX_PNAME} content of the `pname` attribute of the updated package.
- [`UPDATE_NIX_OLD_VERSION`]{#var-passthru-updateScript-env-UPDATE_NIX_OLD_VERSION} content of the `version` attribute of the updated package.
- [`UPDATE_NIX_ATTR_PATH`]{#var-passthru-updateScript-env-UPDATE_NIX_ATTR_PATH} attribute path the `update.nix` discovered the package on (or the [canonical `attrPath`](#var-passthru-updateScript-set-attrPath) when available). Example: `pantheon.elementary-terminal`
::: {.note} ::: {.note}
The script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that the update scripts will be run in parallel by default; you should avoid running `git commit` or any other commands that cannot handle that. An update script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that `update.nix` executes update scripts in parallel by default so you should avoid running `git commit` or any other commands that cannot handle that.
::: :::
For information about how to run the updates, execute `nix-shell maintainers/scripts/update.nix`. ::: {.tip}
While update scripts should not create commits themselves, `maintainers/scripts/update.nix` supports automatically creating commits when running it with `--argstr commit true`. If you need to customize commit message, you can have the update script implement [`commit`](#var-passthru-updateScript-commit) feature.
:::
##### Supported features {#var-passthru-updateScript-supported-features}
###### `commit` {#var-passthru-updateScript-commit}
This feature allows update scripts to *ask* `update.nix` to create Git commits.
When support of this feature is declared, whenever the update script exits with `0` return status, it is expected to print a JSON list containing an object described below for each updated attribute to standard output.
When `update.nix` is run with `--argstr commit true` arguments, it will create a separate commit for each of the objects. An empty list can be returned when the script did not update any files, for example, when the package is already at the latest version.
The commit object contains the following values:
- [`attrPath`]{#var-passthru-updateScript-commit-attrPath} a string containing attribute path.
- [`oldVersion`]{#var-passthru-updateScript-commit-oldVersion} a string containing old version.
- [`newVersion`]{#var-passthru-updateScript-commit-newVersion} a string containing new version.
- [`files`]{#var-passthru-updateScript-commit-files} a non-empty list of file paths (as strings) to add to the commit.
- [`commitBody`]{#var-passthru-updateScript-commit-commitBody} (optional) a string with extra content to be appended to the default commit message (useful for adding changelog links).
- [`commitMessage`]{#var-passthru-updateScript-commit-commitMessage} (optional) a string to use instead of the default commit message.
If the returned array contains exactly one object (e.g. `[{}]`), all values are optional and will be determined automatically.
```{=docbook}
<example>
<title>Standard output of an update script using commit feature</title>
```
```json
[
{
"attrPath": "volume_key",
"oldVersion": "0.3.11",
"newVersion": "0.3.12",
"files": [
"/path/to/nixpkgs/pkgs/development/libraries/volume-key/default.nix"
]
}
]
```
```{=docbook}
</example>
```
### Recursive attributes in `mkDerivation` {#mkderivation-recursive-attributes} ### Recursive attributes in `mkDerivation` {#mkderivation-recursive-attributes}

View file

@ -422,7 +422,7 @@ ${expr "" v}
(if v then "True" else "False") (if v then "True" else "False")
else if isFunction v then else if isFunction v then
abort "generators.toDhall: cannot convert a function to Dhall" abort "generators.toDhall: cannot convert a function to Dhall"
else if isNull v then else if v == null then
abort "generators.toDhall: cannot convert a null to Dhall" abort "generators.toDhall: cannot convert a null to Dhall"
else else
builtins.toJSON v; builtins.toJSON v;

View file

@ -1416,6 +1416,17 @@
githubId = 5193600; githubId = 5193600;
name = "Atkins Chang"; name = "Atkins Chang";
}; };
atkrad = {
name = "Mohammad Abdolirad";
email = "m.abdolirad@gmail.com";
github = "atkrad";
githubId = 351364;
keys = [
{
fingerprint = "0380 F2F8 DF7A BA1A E7DB D84A 1935 1496 62CA FDB8";
}
];
};
atnnn = { atnnn = {
email = "etienne@atnnn.com"; email = "etienne@atnnn.com";
github = "AtnNn"; github = "AtnNn";
@ -1458,6 +1469,15 @@
githubId = 12958979; githubId = 12958979;
name = "Mika Naylor"; name = "Mika Naylor";
}; };
autumnal = {
name = "Sven Friedrich";
email = "sven@autumnal.de";
github = "sevenautumns";
githubId = 20627275;
keys = [{
fingerprint = "6A2E 7FDD 1037 11A8 B996 E28E B051 064E 2FCA B71B";
}];
};
avakhrenev = { avakhrenev = {
email = "avakhrenev@gmail.com"; email = "avakhrenev@gmail.com";
github = "avakhrenev"; github = "avakhrenev";
@ -4124,6 +4144,12 @@
githubId = 6689924; githubId = 6689924;
name = "David Terry"; name = "David Terry";
}; };
dylanmtaylor = {
email = "dylan@dylanmtaylor.com";
github = "dylamtaylor";
githubId = 277927;
name = "Dylan Taylor";
};
dysinger = { dysinger = {
email = "tim@dysinger.net"; email = "tim@dysinger.net";
github = "dysinger"; github = "dysinger";
@ -4658,6 +4684,12 @@
github = "ethindp"; github = "ethindp";
githubId = 8030501; githubId = 8030501;
}; };
ethinx = {
email = "eth2net@gmail.com";
github = "ethinx";
githubId = 965612;
name = "York Wong";
};
Etjean = { Etjean = {
email = "et.jean@outlook.fr"; email = "et.jean@outlook.fr";
github = "Etjean"; github = "Etjean";
@ -6521,6 +6553,16 @@
fingerprint = "6C2B 55D4 4E04 8266 6B7D DA1A 422E 9EDA E015 7170"; fingerprint = "6C2B 55D4 4E04 8266 6B7D DA1A 422E 9EDA E015 7170";
}]; }];
}; };
infinitivewitch = {
name = "Infinitive Witch";
email = "infinitivewitch@disroot.org";
matrix = "@infinitivewitch:fedora.im";
github = "infinitivewitch";
githubId = 128256833;
keys = [{
fingerprint = "CF3D F4AD C7BD 1FDB A88B E4B3 CA2D 43DA 939D 94FB";
}];
};
ingenieroariel = { ingenieroariel = {
email = "ariel@nunez.co"; email = "ariel@nunez.co";
github = "ingenieroariel"; github = "ingenieroariel";
@ -7706,6 +7748,13 @@
githubId = 87115; githubId = 87115;
name = "Wael Nasreddine"; name = "Wael Nasreddine";
}; };
kalebpace = {
email = "kaleb.pace@pm.me";
matrix = "@kalebpace:matrix.org";
github = "kalebpace";
githubId = 5586615;
name = "Kaleb Pace";
};
kalekseev = { kalekseev = {
email = "mail@kalekseev.com"; email = "mail@kalekseev.com";
github = "kalekseev"; github = "kalekseev";
@ -7791,7 +7840,6 @@
name = "Claudius Holeksa"; name = "Claudius Holeksa";
}; };
ken-matsui = { ken-matsui = {
email = "nix@kmatsui.me";
github = "ken-matsui"; github = "ken-matsui";
githubId = 26405363; githubId = 26405363;
name = "Ken Matsui"; name = "Ken Matsui";
@ -8174,6 +8222,12 @@
githubId = 735008; githubId = 735008;
name = "Louis Taylor"; name = "Louis Taylor";
}; };
kranurag7 = {
email = "contact.anurag7@gmail.com";
github = "kranurag7";
githubId = 81210977;
name = "Anurag";
};
kranzes = { kranzes = {
email = "personal@ilanjoselevich.com"; email = "personal@ilanjoselevich.com";
github = "Kranzes"; github = "Kranzes";
@ -9156,6 +9210,11 @@
githubId = 115218; githubId = 115218;
name = "Felix Richter"; name = "Felix Richter";
}; };
MakiseKurisu = {
github = "MakiseKurisu";
githubId = 2321672;
name = "Makise Kurisu";
};
malbarbo = { malbarbo = {
email = "malbarbo@gmail.com"; email = "malbarbo@gmail.com";
github = "malbarbo"; github = "malbarbo";
@ -9242,6 +9301,12 @@
github = "marius851000"; github = "marius851000";
githubId = 22586596; githubId = 22586596;
}; };
markbeep = {
email = "mrkswrn@gmail.com";
github = "markbeep";
githubId = 20665331;
name = "Mark";
};
markus1189 = { markus1189 = {
email = "markus1189@gmail.com"; email = "markus1189@gmail.com";
github = "markus1189"; github = "markus1189";
@ -12707,6 +12772,11 @@
githubId = 12279531; githubId = 12279531;
name = "Ricardo Guevara"; name = "Ricardo Guevara";
}; };
rhendric = {
name = "Ryan Hendrickson";
github = "rhendric";
githubId = 1570964;
};
rhoriguchi = { rhoriguchi = {
email = "ryan.horiguchi@gmail.com"; email = "ryan.horiguchi@gmail.com";
github = "rhoriguchi"; github = "rhoriguchi";
@ -14278,6 +14348,16 @@
githubId = 22163194; githubId = 22163194;
name = "Stel Abrego"; name = "Stel Abrego";
}; };
stepbrobd = {
name = "StepBroBD";
github = "StepBroBD";
githubId = 81826728;
email = "Hi@StepBroBD.com";
matrix = "@stepbrobd:matrix.org";
keys = [{
fingerprint = "5D8B FA8B 286A C2EF 6EE4 8598 F742 B72C 8926 1A51";
}];
};
stephank = { stephank = {
email = "nix@stephank.nl"; email = "nix@stephank.nl";
matrix = "@skochen:matrix.org"; matrix = "@skochen:matrix.org";
@ -15662,6 +15742,12 @@
github = "deviant"; github = "deviant";
githubId = 68829907; githubId = 68829907;
}; };
vaci = {
email = "vaci@vaci.org";
github = "vaci";
githubId = 6882568;
name = "Vaci";
};
vaibhavsagar = { vaibhavsagar = {
email = "vaibhavsagar@gmail.com"; email = "vaibhavsagar@gmail.com";
matrix = "@vaibhavsagar:matrix.org"; matrix = "@vaibhavsagar:matrix.org";
@ -16714,6 +16800,12 @@
githubId = 908716; githubId = 908716;
name = "Zach Coyle"; name = "Zach Coyle";
}; };
Zaechus = {
email = "zaechus@proton.me";
github = "Zaechus";
githubId = 19353212;
name = "Maxwell Anderson";
};
zagy = { zagy = {
email = "cz@flyingcircus.io"; email = "cz@flyingcircus.io";
github = "zagy"; github = "zagy";

View file

@ -127,8 +127,7 @@ echo "$urllist" | xargs wget $wgetargs -nH -r -c --no-parent && {
# TODO fetch only missing tar.xz files # TODO fetch only missing tar.xz files
echo "fetching $filecount tar.xz files ..." echo "fetching $filecount tar.xz files ..."
urllist="$(echo "$filelist" | while read file; do echo "$BASE_URL/$file"; done)" echo "$filelist" | xargs wget $wgetargs -nH -r -c --no-parent
echo "$urllist" | xargs wget $wgetargs -nH -r -c --no-parent
echo "generating sha256 files ..." echo "generating sha256 files ..."
find . -type f -name '*.tar.xz' | while read src; do find . -type f -name '*.tar.xz' | while read src; do

View file

@ -3,7 +3,7 @@ let
pkgs = import ../../.. {}; pkgs = import ../../.. {};
inherit (pkgs) lib; inherit (pkgs) lib;
getDeps = _: pkg: { getDeps = _: pkg: {
deps = builtins.filter (x: !isNull x) (map (x: x.pname or null) (pkg.propagatedBuildInputs or [])); deps = builtins.filter (x: x != null) (map (x: x.pname or null) (pkg.propagatedBuildInputs or []));
broken = (pkg.meta.hydraPlatforms or [null]) == []; broken = (pkg.meta.hydraPlatforms or [null]) == [];
}; };
in in

View file

@ -0,0 +1,468 @@
#!/usr/bin/env nix-shell
#!nix-shell update-octave-shell.nix -i python3
"""
Update a Octave package expression by passing in the `.nix` file, or the directory containing it.
You can pass in multiple files or paths.
You'll likely want to use
``
$ ./update-octave-libraries ../../pkgs/development/octave-modules/**/default.nix
``
to update all non-pinned libraries in that folder.
"""
import argparse
import os
import pathlib
import re
import requests
import yaml
from concurrent.futures import ThreadPoolExecutor as Pool
from packaging.version import Version as _Version
from packaging.version import InvalidVersion
from packaging.specifiers import SpecifierSet
import collections
import subprocess
import tempfile
INDEX = "https://raw.githubusercontent.com/gnu-octave/packages/main/packages"
"""url of Octave packages' source on GitHub"""
EXTENSIONS = ['tar.gz', 'tar.bz2', 'tar', 'zip']
"""Permitted file extensions. These are evaluated from left to right and the first occurance is returned."""
PRERELEASES = False
GIT = "git"
NIXPGKS_ROOT = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode('utf-8').strip()
import logging
logging.basicConfig(level=logging.INFO)
class Version(_Version, collections.abc.Sequence):
def __init__(self, version):
super().__init__(version)
# We cannot use `str(Version(0.04.21))` because that becomes `0.4.21`
# https://github.com/avian2/unidecode/issues/13#issuecomment-354538882
self.raw_version = version
def __getitem__(self, i):
return self._version.release[i]
def __len__(self):
return len(self._version.release)
def __iter__(self):
yield from self._version.release
def _get_values(attribute, text):
"""Match attribute in text and return all matches.
:returns: List of matches.
"""
regex = '{}\s+=\s+"(.*)";'.format(attribute)
regex = re.compile(regex)
values = regex.findall(text)
return values
def _get_unique_value(attribute, text):
"""Match attribute in text and return unique match.
:returns: Single match.
"""
values = _get_values(attribute, text)
n = len(values)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return values[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _get_line_and_value(attribute, text):
"""Match attribute in text. Return the line and the value of the attribute."""
regex = '({}\s+=\s+"(.*)";)'.format(attribute)
regex = re.compile(regex)
value = regex.findall(text)
n = len(value)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return value[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _replace_value(attribute, value, text):
"""Search and replace value of attribute in text."""
old_line, old_value = _get_line_and_value(attribute, text)
new_line = old_line.replace(old_value, value)
new_text = text.replace(old_line, new_line)
return new_text
def _fetch_page(url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
return list(yaml.safe_load_all(r.content))[0]
else:
raise ValueError("request for {} failed".format(url))
def _fetch_github(url):
headers = {}
token = os.environ.get('GITHUB_API_TOKEN')
if token:
headers["Authorization"] = f"token {token}"
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
raise ValueError("request for {} failed".format(url))
SEMVER = {
'major' : 0,
'minor' : 1,
'patch' : 2,
}
def _determine_latest_version(current_version, target, versions):
"""Determine latest version, given `target`, returning the more recent version.
"""
current_version = Version(current_version)
def _parse_versions(versions):
for v in versions:
try:
yield Version(v)
except InvalidVersion:
pass
versions = _parse_versions(versions)
index = SEMVER[target]
ceiling = list(current_version[0:index])
if len(ceiling) == 0:
ceiling = None
else:
ceiling[-1]+=1
ceiling = Version(".".join(map(str, ceiling)))
# We do not want prereleases
versions = SpecifierSet(prereleases=PRERELEASES).filter(versions)
if ceiling is not None:
versions = SpecifierSet(f"<{ceiling}").filter(versions)
return (max(sorted(versions))).raw_version
def _get_latest_version_octave_packages(package, extension, current_version, target):
"""Get latest version and hash from Octave Packages."""
url = "{}/{}.yaml".format(INDEX, package)
yaml = _fetch_page(url)
versions = list(map(lambda pv: pv['id'], yaml['versions']))
version = _determine_latest_version(current_version, target, versions)
try:
releases = [v if v['id'] == version else None for v in yaml['versions']]
except KeyError as e:
raise KeyError('Could not find version {} for {}'.format(version, package)) from e
for release in releases:
if release['url'].endswith(extension):
sha256 = release['sha256']
break
else:
sha256 = None
return version, sha256, None
def _get_latest_version_github(package, extension, current_version, target):
def strip_prefix(tag):
return re.sub("^[^0-9]*", "", tag)
def get_prefix(string):
matches = re.findall(r"^([^0-9]*)", string)
return next(iter(matches), "")
# when invoked as an updateScript, UPDATE_NIX_ATTR_PATH will be set
# this allows us to work with packages which live outside of octave-modules
attr_path = os.environ.get("UPDATE_NIX_ATTR_PATH", f"octavePackages.{package}")
try:
homepage = subprocess.check_output(
["nix", "eval", "-f", f"{NIXPGKS_ROOT}/default.nix", "--raw", f"{attr_path}.src.meta.homepage"])\
.decode('utf-8')
except Exception as e:
raise ValueError(f"Unable to determine homepage: {e}")
owner_repo = homepage[len("https://github.com/"):] # remove prefix
owner, repo = owner_repo.split("/")
url = f"https://api.github.com/repos/{owner}/{repo}/releases"
all_releases = _fetch_github(url)
releases = list(filter(lambda x: not x['prerelease'], all_releases))
if len(releases) == 0:
raise ValueError(f"{homepage} does not contain any stable releases")
versions = map(lambda x: strip_prefix(x['tag_name']), releases)
version = _determine_latest_version(current_version, target, versions)
release = next(filter(lambda x: strip_prefix(x['tag_name']) == version, releases))
prefix = get_prefix(release['tag_name'])
try:
sha256 = subprocess.check_output(["nix-prefetch-url", "--type", "sha256", "--unpack", f"{release['tarball_url']}"], stderr=subprocess.DEVNULL)\
.decode('utf-8').strip()
except:
# this may fail if they have both a branch and a tag of the same name, attempt tag name
tag_url = str(release['tarball_url']).replace("tarball","tarball/refs/tags")
sha256 = subprocess.check_output(["nix-prefetch-url", "--type", "sha256", "--unpack", tag_url], stderr=subprocess.DEVNULL)\
.decode('utf-8').strip()
return version, sha256, prefix
def _get_latest_version_git(package, extension, current_version, target):
"""NOTE: Unimplemented!"""
# attr_path = os.environ.get("UPDATE_NIX_ATTR_PATH", f"octavePackages.{package}")
# try:
# download_url = subprocess.check_output(
# ["nix", "--extra-experimental-features", "nix-command", "eval", "-f", f"{NIXPGKS_ROOT}/default.nix", "--raw", f"{attr_path}.src.url"])\
# .decode('utf-8')
# except Exception as e:
# raise ValueError(f"Unable to determine download link: {e}")
# with tempfile.TemporaryDirectory(prefix=attr_path) as new_clone_location:
# subprocess.run(["git", "clone", download_url, new_clone_location])
# newest_commit = subprocess.check_output(
# ["git" "rev-parse" "$(git branch -r)" "|" "tail" "-n" "1"]).decode('utf-8')
pass
FETCHERS = {
'fetchFromGitHub' : _get_latest_version_github,
'fetchurl' : _get_latest_version_octave_packages,
'fetchgit' : _get_latest_version_git,
}
DEFAULT_SETUPTOOLS_EXTENSION = 'tar.gz'
FORMATS = {
'setuptools' : DEFAULT_SETUPTOOLS_EXTENSION,
}
def _determine_fetcher(text):
# Count occurrences of fetchers.
nfetchers = sum(text.count('src = {}'.format(fetcher)) for fetcher in FETCHERS.keys())
if nfetchers == 0:
raise ValueError("no fetcher.")
elif nfetchers > 1:
raise ValueError("multiple fetchers.")
else:
# Then we check which fetcher to use.
for fetcher in FETCHERS.keys():
if 'src = {}'.format(fetcher) in text:
return fetcher
def _determine_extension(text, fetcher):
"""Determine what extension is used in the expression.
If we use:
- fetchPypi, we check if format is specified.
- fetchurl, we determine the extension from the url.
- fetchFromGitHub we simply use `.tar.gz`.
"""
if fetcher == 'fetchurl':
url = _get_unique_value('url', text)
extension = os.path.splitext(url)[1]
elif fetcher == 'fetchFromGitHub' or fetcher == 'fetchgit':
if "fetchSubmodules" in text:
raise ValueError("fetchFromGitHub fetcher doesn't support submodules")
extension = "tar.gz"
return extension
def _update_package(path, target):
# Read the expression
with open(path, 'r') as f:
text = f.read()
# Determine pname. Many files have more than one pname
pnames = _get_values('pname', text)
# Determine version.
version = _get_unique_value('version', text)
# First we check how many fetchers are mentioned.
fetcher = _determine_fetcher(text)
extension = _determine_extension(text, fetcher)
# Attempt a fetch using each pname, e.g. backports-zoneinfo vs backports.zoneinfo
successful_fetch = False
for pname in pnames:
if fetcher == "fetchgit":
logging.warning(f"You must update {pname} MANUALLY!")
return { 'path': path, 'target': target, 'pname': pname,
'old_version': version, 'new_version': version }
try:
new_version, new_sha256, prefix = FETCHERS[fetcher](pname, extension, version, target)
successful_fetch = True
break
except ValueError:
continue
if not successful_fetch:
raise ValueError(f"Unable to find correct package using these pnames: {pnames}")
if new_version == version:
logging.info("Path {}: no update available for {}.".format(path, pname))
return False
elif Version(new_version) <= Version(version):
raise ValueError("downgrade for {}.".format(pname))
if not new_sha256:
raise ValueError("no file available for {}.".format(pname))
text = _replace_value('version', new_version, text)
# hashes from pypi are 16-bit encoded sha256's, normalize it to sri to avoid merge conflicts
# sri hashes have been the default format since nix 2.4+
sri_hash = subprocess.check_output(["nix", "--extra-experimental-features", "nix-command", "hash", "to-sri", "--type", "sha256", new_sha256]).decode('utf-8').strip()
# fetchers can specify a sha256, or a sri hash
try:
text = _replace_value('sha256', sri_hash, text)
except ValueError:
text = _replace_value('hash', sri_hash, text)
if fetcher == 'fetchFromGitHub':
# in the case of fetchFromGitHub, it's common to see `rev = version;` or `rev = "v${version}";`
# in which no string value is meant to be substituted. However, we can just overwrite the previous value.
regex = '(rev\s+=\s+[^;]*;)'
regex = re.compile(regex)
matches = regex.findall(text)
n = len(matches)
if n == 0:
raise ValueError("Unable to find rev value for {}.".format(pname))
else:
# forcefully rewrite rev, incase tagging conventions changed for a release
match = matches[0]
text = text.replace(match, f'rev = "refs/tags/{prefix}${{version}}";')
# incase there's no prefix, just rewrite without interpolation
text = text.replace('"${version}";', 'version;')
with open(path, 'w') as f:
f.write(text)
logging.info("Path {}: updated {} from {} to {}".format(path, pname, version, new_version))
result = {
'path' : path,
'target': target,
'pname': pname,
'old_version' : version,
'new_version' : new_version,
#'fetcher' : fetcher,
}
return result
def _update(path, target):
# We need to read and modify a Nix expression.
if os.path.isdir(path):
path = os.path.join(path, 'default.nix')
# If a default.nix does not exist, we quit.
if not os.path.isfile(path):
logging.info("Path {}: does not exist.".format(path))
return False
# If file is not a Nix expression, we quit.
if not path.endswith(".nix"):
logging.info("Path {}: does not end with `.nix`.".format(path))
return False
try:
return _update_package(path, target)
except ValueError as e:
logging.warning("Path {}: {}".format(path, e))
return False
def _commit(path, pname, old_version, new_version, pkgs_prefix="octave: ", **kwargs):
"""Commit result.
"""
msg = f'{pkgs_prefix}{pname}: {old_version} -> {new_version}'
try:
subprocess.check_call([GIT, 'add', path])
subprocess.check_call([GIT, 'commit', '-m', msg])
except subprocess.CalledProcessError as e:
subprocess.check_call([GIT, 'checkout', path])
raise subprocess.CalledProcessError(f'Could not commit {path}') from e
return True
def main():
epilog = """
environment variables:
GITHUB_API_TOKEN\tGitHub API token used when updating github packages
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog)
parser.add_argument('package', type=str, nargs='+')
parser.add_argument('--target', type=str, choices=SEMVER.keys(), default='major')
parser.add_argument('--commit', action='store_true', help='Create a commit for each package update')
parser.add_argument('--use-pkgs-prefix', action='store_true', help='Use octavePackages.${pname}: instead of octave: ${pname}: when making commits')
args = parser.parse_args()
target = args.target
packages = list(map(os.path.abspath, args.package))
logging.info("Updating packages...")
# Use threads to update packages concurrently
with Pool() as p:
results = list(filter(bool, p.map(lambda pkg: _update(pkg, target), packages)))
logging.info("Finished updating packages.")
commit_options = {}
if args.use_pkgs_prefix:
logging.info("Using octavePackages. prefix for commits")
commit_options["pkgs_prefix"] = "octavePackages."
# Commits are created sequentially.
if args.commit:
logging.info("Committing updates...")
# list forces evaluation
list(map(lambda x: _commit(**x, **commit_options), results))
logging.info("Finished committing updates")
count = len(results)
logging.info("{} package(s) updated".format(count))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,12 @@
{ nixpkgs ? import ../.. { }
}:
with nixpkgs;
let
pyEnv = python3.withPackages(ps: with ps; [ packaging requests toolz pyyaml ]);
in
mkShell {
packages = [
pyEnv
nix-prefetch-scripts
];
}

View file

@ -166,7 +166,11 @@ let
--manpage-urls ${manpageUrls} \ --manpage-urls ${manpageUrls} \
--revision ${lib.escapeShellArg revision} \ --revision ${lib.escapeShellArg revision} \
./manual.md \ ./manual.md \
./manual-combined.xml ./manual-combined-pre.xml
${pkgs.libxslt.bin}/bin/xsltproc \
-o manual-combined.xml ${./../../lib/make-options-doc/postprocess-option-descriptions.xsl} \
manual-combined-pre.xml
${linterFunctions} ${linterFunctions}

View file

@ -65,7 +65,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- [ArchiSteamFarm](https://github.com/JustArchiNET/ArchiSteamFarm), a C# application with primary purpose of idling Steam cards from multiple accounts simultaneously. Available as [services.archisteamfarm](#opt-services.archisteamfarm.enable). - [ArchiSteamFarm](https://github.com/JustArchiNET/ArchiSteamFarm), a C# application with primary purpose of idling Steam cards from multiple accounts simultaneously. Available as [services.archisteamfarm](#opt-services.archisteamfarm.enable).
- [BaGet](https://loic-sharma.github.io/BaGet/), a lightweight NuGet and symbol server. Available at [services.baget](#opt-services.baget.enable). - [BaGet](https://loic-sharma.github.io/BaGet/), a lightweight NuGet and symbol server. Available at services.baget.
- [bird-lg](https://github.com/xddxdd/bird-lg-go), a BGP looking glass for Bird Routing. Available as [services.bird-lg](#opt-services.bird-lg.package). - [bird-lg](https://github.com/xddxdd/bird-lg-go), a BGP looking glass for Bird Routing. Available as [services.bird-lg](#opt-services.bird-lg.package).

View file

@ -47,6 +47,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in golang with many filters. Available as [services.goeland](#opt-services.goeland.enable). - [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
- [alertmanager-irc-relay](https://github.com/google/alertmanager-irc-relay), a Prometheus Alertmanager IRC Relay. Available as [services.prometheus.alertmanagerIrcRelay](options.html#opt-services.prometheus.alertmanagerIrcRelay.enable).
- [tts](https://github.com/coqui-ai/TTS), a battle-tested deep learning toolkit for Text-to-Speech. Mutiple servers may be configured below [services.tts.servers](#opt-services.tts.servers). - [tts](https://github.com/coqui-ai/TTS), a battle-tested deep learning toolkit for Text-to-Speech. Mutiple servers may be configured below [services.tts.servers](#opt-services.tts.servers).
- [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable). - [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable).
@ -57,6 +59,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [QDMR](https://dm3mat.darc.de/qdmr/), a GUI application and command line tool for programming DMR radios [programs.qdmr](#opt-programs.qdmr.enable) - [QDMR](https://dm3mat.darc.de/qdmr/), a GUI application and command line tool for programming DMR radios [programs.qdmr](#opt-programs.qdmr.enable)
- [keyd](https://github.com/rvaiya/keyd), a key remapping daemon for linux. Available as [services.keyd](#opt-services.keyd.enable).
- [v2rayA](https://v2raya.org), a Linux web GUI client of Project V which supports V2Ray, Xray, SS, SSR, Trojan and Pingtunnel. Available as [services.v2raya](options.html#opt-services.v2raya.enable). - [v2rayA](https://v2raya.org), a Linux web GUI client of Project V which supports V2Ray, Xray, SS, SSR, Trojan and Pingtunnel. Available as [services.v2raya](options.html#opt-services.v2raya.enable).
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable). - [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
@ -65,6 +69,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable). - [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
- [peroxide](https://github.com/ljanyst/peroxide), a fork of the official [ProtonMail bridge](https://github.com/ProtonMail/proton-bridge) that aims to be similar to [Hydroxide](https://github.com/emersion/hydroxide). Available as [services.peroxide](#opt-services.peroxide.enable).
- [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met. - [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met.
- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable). - [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
@ -75,6 +81,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable). - [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable).
- [ReGreet](https://github.com/rharish101/ReGreet), a clean and customizable greeter for greetd. Available as [programs.regreet](#opt-programs.regreet.enable).
## Backward Incompatibilities {#sec-release-23.05-incompatibilities} ## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -116,16 +124,22 @@ In addition to numerous new and upgraded packages, this release has the followin
- `tut` has been updated from 1.0.34 to 2.0.0, and now uses the TOML format for the configuration file instead of INI. Additional information can be found [here](https://github.com/RasmusLindroth/tut/releases/tag/2.0.0). - `tut` has been updated from 1.0.34 to 2.0.0, and now uses the TOML format for the configuration file instead of INI. Additional information can be found [here](https://github.com/RasmusLindroth/tut/releases/tag/2.0.0).
- `i3status-rust` has been updated from 0.22.0 to 0.30.5, and this brings many changes to its configuration format. Additional information can be found [here](https://github.com/greshake/i3status-rust/blob/v0.30.0/NEWS.md).
- The `wordpress` derivation no longer contains any builtin plugins or themes. If you need them you have to add them back to prevent your site from breaking. You can find them in `wordpressPackages.{plugins,themes}`. - The `wordpress` derivation no longer contains any builtin plugins or themes. If you need them you have to add them back to prevent your site from breaking. You can find them in `wordpressPackages.{plugins,themes}`.
- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`. - `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
- `services.xserver.desktopManager.plasma5.excludePackages` has been moved to `environment.plasma5.excludePackages`, for consistency with other Desktop Environments
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2. - The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12). - `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation. - The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
- `fail2ban` has been updated to 1.0.2, which has a few breaking changes compared to 0.11.2 ([changelog for 1.0.1](https://github.com/fail2ban/fail2ban/blob/1.0.1/ChangeLog), [changelog for 1.0.2](https://github.com/fail2ban/fail2ban/blob/1.0.2/ChangeLog))
- Calling `makeSetupHook` without passing a `name` argument is deprecated. - Calling `makeSetupHook` without passing a `name` argument is deprecated.
- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name. - `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
@ -148,6 +162,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- Deprecated `xlibsWrapper` transitional package has been removed in favour of direct use of its constitutents: `xorg.libX11`, `freetype` and others. - Deprecated `xlibsWrapper` transitional package has been removed in favour of direct use of its constitutents: `xorg.libX11`, `freetype` and others.
- The latest available version of Nextcloud is v26 (available as `pkgs.nextcloud26`) which uses PHP 8.2 as interpreter by default. The installation logic is as follows:
- If `system.stateVersion` is >=23.05, `pkgs.nextcloud26` will be installed by default.
- If `system.stateVersion` is >=22.11, `pkgs.nextcloud25` will be installed by default.
- Please note that an upgrade from v24 (or older) to v26 directly is not possible. Please upgrade to `nextcloud25` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud25;`](options.html#opt-services.nextcloud.package).
- It's recommended to use the latest version available (i.e. v26) and to specify that using `services.nextcloud.package`.
- .NET 5.0 was removed due to being end-of-life, use a newer, supported .NET version - https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core - .NET 5.0 was removed due to being end-of-life, use a newer, supported .NET version - https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core
- The iputils package, which is installed by default, no longer provides the - The iputils package, which is installed by default, no longer provides the
@ -159,6 +179,14 @@ In addition to numerous new and upgraded packages, this release has the followin
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround. - conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
- The catch-all `hardware.video.hidpi.enable` option was removed. Users on high density displays may want to:
- Set `services.xserver.upscaleDefaultCursor` to upscale the default X11 cursor for higher resolutions
- Adjust settings under `fonts.fontconfig` according to preference
- Adjust `console.font` according to preference, though the kernel will generally choose a reasonably sized font
- The `baget` package and module was removed due to being unmaintained.
## Other Notable Changes {#sec-release-23.05-notable-changes} ## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -202,6 +230,8 @@ In addition to numerous new and upgraded packages, this release has the followin
The `{aclUse,superUser,disableActions}` attributes have been renamed, `pluginsConfig` now also accepts an attribute set of booleans, passing plain PHP is deprecated. The `{aclUse,superUser,disableActions}` attributes have been renamed, `pluginsConfig` now also accepts an attribute set of booleans, passing plain PHP is deprecated.
Same applies to `acl` which now also accepts structured settings. Same applies to `acl` which now also accepts structured settings.
- The `zsh` package changes the way to set environment variables on NixOS systems where `programs.zsh.enable` equals `false`. It now sources `/etc/set-environment` when reading the system-level `zshenv` file. Before, it sourced `/etc/profile` when reading the system-level `zprofile` file.
- The `wordpress` service now takes configuration via the `services.wordpress.sites.<name>.settings` attribute set, `extraConfig` is still available to append additional text to `wp-config.php`. - The `wordpress` service now takes configuration via the `services.wordpress.sites.<name>.settings` attribute set, `extraConfig` is still available to append additional text to `wp-config.php`.
- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services. - To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
@ -217,6 +247,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `mastodon` now supports connection to a remote `PostgreSQL` database. - `mastodon` now supports connection to a remote `PostgreSQL` database.
- `nextcloud` has an option to enable SSE-C in S3.
- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`. - `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.
Before upgrading, read the release notes for PeerTube: Before upgrading, read the release notes for PeerTube:
- [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0) - [Release v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0)
@ -296,8 +328,6 @@ In addition to numerous new and upgraded packages, this release has the followin
- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package. - [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
- `tvbrowser-bin` was removed, and now `tvbrowser` is built from source.
- `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision - `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision
- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path. - The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.

View file

@ -24,7 +24,7 @@ in rec {
} }
'' ''
name=${shellEscape name} name=${shellEscape name}
mkdir -p "$out/$(dirname "$name")" mkdir -p "$out/$(dirname -- "$name")"
echo -n "$text" > "$out/$name" echo -n "$text" > "$out/$name"
'' ''
else else

View file

@ -179,7 +179,6 @@ class Driver:
start_command=cmd, start_command=cmd,
name=name, name=name,
keep_vm_state=args.get("keep_vm_state", False), keep_vm_state=args.get("keep_vm_state", False),
allow_reboot=args.get("allow_reboot", False),
) )
def serial_stdout_on(self) -> None: def serial_stdout_on(self) -> None:

View file

@ -144,7 +144,7 @@ class StartCommand:
self, self,
monitor_socket_path: Path, monitor_socket_path: Path,
shell_socket_path: Path, shell_socket_path: Path,
allow_reboot: bool = False, # TODO: unused, legacy? allow_reboot: bool = False,
) -> str: ) -> str:
display_opts = "" display_opts = ""
display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"]) display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
@ -152,16 +152,14 @@ class StartCommand:
display_opts += " -nographic" display_opts += " -nographic"
# qemu options # qemu options
qemu_opts = "" qemu_opts = (
qemu_opts += (
""
if allow_reboot
else " -no-reboot"
" -device virtio-serial" " -device virtio-serial"
" -device virtconsole,chardev=shell" " -device virtconsole,chardev=shell"
" -device virtio-rng-pci" " -device virtio-rng-pci"
" -serial stdio" " -serial stdio"
) )
if not allow_reboot:
qemu_opts += " -no-reboot"
# TODO: qemu script already catpures this env variable, legacy? # TODO: qemu script already catpures this env variable, legacy?
qemu_opts += " " + os.environ.get("QEMU_OPTS", "") qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
@ -195,9 +193,10 @@ class StartCommand:
shared_dir: Path, shared_dir: Path,
monitor_socket_path: Path, monitor_socket_path: Path,
shell_socket_path: Path, shell_socket_path: Path,
allow_reboot: bool,
) -> subprocess.Popen: ) -> subprocess.Popen:
return subprocess.Popen( return subprocess.Popen(
self.cmd(monitor_socket_path, shell_socket_path), self.cmd(monitor_socket_path, shell_socket_path, allow_reboot),
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
@ -312,7 +311,6 @@ class Machine:
start_command: StartCommand start_command: StartCommand
keep_vm_state: bool keep_vm_state: bool
allow_reboot: bool
process: Optional[subprocess.Popen] process: Optional[subprocess.Popen]
pid: Optional[int] pid: Optional[int]
@ -337,13 +335,11 @@ class Machine:
start_command: StartCommand, start_command: StartCommand,
name: str = "machine", name: str = "machine",
keep_vm_state: bool = False, keep_vm_state: bool = False,
allow_reboot: bool = False,
callbacks: Optional[List[Callable]] = None, callbacks: Optional[List[Callable]] = None,
) -> None: ) -> None:
self.out_dir = out_dir self.out_dir = out_dir
self.tmp_dir = tmp_dir self.tmp_dir = tmp_dir
self.keep_vm_state = keep_vm_state self.keep_vm_state = keep_vm_state
self.allow_reboot = allow_reboot
self.name = name self.name = name
self.start_command = start_command self.start_command = start_command
self.callbacks = callbacks if callbacks is not None else [] self.callbacks = callbacks if callbacks is not None else []
@ -741,9 +737,10 @@ class Machine:
self.connected = True self.connected = True
def screenshot(self, filename: str) -> None: def screenshot(self, filename: str) -> None:
word_pattern = re.compile(r"^\w+$") if "." not in filename:
if word_pattern.match(filename): filename += ".png"
filename = os.path.join(self.out_dir, f"{filename}.png") if "/" not in filename:
filename = os.path.join(self.out_dir, filename)
tmp = f"{filename}.ppm" tmp = f"{filename}.ppm"
with self.nested( with self.nested(
@ -874,7 +871,7 @@ class Machine:
self.process.stdin.write(chars.encode()) self.process.stdin.write(chars.encode())
self.process.stdin.flush() self.process.stdin.flush()
def start(self) -> None: def start(self, allow_reboot: bool = False) -> None:
if self.booted: if self.booted:
return return
@ -898,6 +895,7 @@ class Machine:
self.shared_dir, self.shared_dir,
self.monitor_path, self.monitor_path,
self.shell_path, self.shell_path,
allow_reboot,
) )
self.monitor, _ = monitor_socket.accept() self.monitor, _ = monitor_socket.accept()
self.shell, _ = shell_socket.accept() self.shell, _ = shell_socket.accept()
@ -946,6 +944,15 @@ class Machine:
self.send_monitor_command("quit") self.send_monitor_command("quit")
self.wait_for_shutdown() self.wait_for_shutdown()
def reboot(self) -> None:
"""Press Ctrl+Alt+Delete in the guest.
Prepares the machine to be reconnected which is useful if the
machine was started with `allow_reboot = True`
"""
self.send_key(f"ctrl-alt-delete")
self.connected = False
def wait_for_x(self) -> None: def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that """Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient. testing the existence of /tmp/.X11-unix/X0 is insufficient.

View file

@ -21,7 +21,7 @@ let
# Sadly, systemd-vconsole-setup doesn't support binary keymaps. # Sadly, systemd-vconsole-setup doesn't support binary keymaps.
vconsoleConf = pkgs.writeText "vconsole.conf" '' vconsoleConf = pkgs.writeText "vconsole.conf" ''
KEYMAP=${cfg.keyMap} KEYMAP=${cfg.keyMap}
FONT=${cfg.font} ${optionalString (cfg.font != null) "FONT=${cfg.font}"}
''; '';
consoleEnv = kbd: pkgs.buildEnv { consoleEnv = kbd: pkgs.buildEnv {
@ -45,14 +45,19 @@ in
}; };
font = mkOption { font = mkOption {
type = with types; either str path; type = with types; nullOr (either str path);
default = "Lat2-Terminus16"; default = null;
example = "LatArCyrHeb-16"; example = "LatArCyrHeb-16";
description = mdDoc '' description = mdDoc ''
The font used for the virtual consoles. Leave empty to use The font used for the virtual consoles.
whatever the {command}`setfont` program considers the Can be `null`, a font name, or a path to a PSF font file.
default font.
Can be either a font name or a path to a PSF font file. Use `null` to let the kernel choose a built-in font.
The default is 8x16, and, as of Linux 5.3, Terminus 32 bold for display
resolutions of 2560x1080 and higher.
These fonts cover the [IBM437][] character set.
[IBM437]: https://en.wikipedia.org/wiki/Code_page_437
''; '';
}; };
@ -151,7 +156,7 @@ in
printf "\033%%${if isUnicode then "G" else "@"}" >> /dev/console printf "\033%%${if isUnicode then "G" else "@"}" >> /dev/console
loadkmap < ${optimizedKeymap} loadkmap < ${optimizedKeymap}
${optionalString cfg.earlySetup '' ${optionalString (cfg.earlySetup && cfg.font != null) ''
setfont -C /dev/console $extraUtils/share/consolefonts/font.psf setfont -C /dev/console $extraUtils/share/consolefonts/font.psf
''} ''}
''); '');
@ -168,7 +173,7 @@ in
"${config.boot.initrd.systemd.package.kbd}/bin/setfont" "${config.boot.initrd.systemd.package.kbd}/bin/setfont"
"${config.boot.initrd.systemd.package.kbd}/bin/loadkeys" "${config.boot.initrd.systemd.package.kbd}/bin/loadkeys"
"${config.boot.initrd.systemd.package.kbd.gzip}/bin/gzip" # Fonts and keyboard layouts are compressed "${config.boot.initrd.systemd.package.kbd.gzip}/bin/gzip" # Fonts and keyboard layouts are compressed
] ++ optionals (hasPrefix builtins.storeDir cfg.font) [ ] ++ optionals (cfg.font != null && hasPrefix builtins.storeDir cfg.font) [
"${cfg.font}" "${cfg.font}"
] ++ optionals (hasPrefix builtins.storeDir cfg.keyMap) [ ] ++ optionals (hasPrefix builtins.storeDir cfg.keyMap) [
"${cfg.keyMap}" "${cfg.keyMap}"
@ -195,7 +200,7 @@ in
]; ];
}) })
(mkIf (cfg.earlySetup && !config.boot.initrd.systemd.enable) { (mkIf (cfg.earlySetup && cfg.font != null && !config.boot.initrd.systemd.enable) {
boot.initrd.extraUtilsCommands = '' boot.initrd.extraUtilsCommands = ''
mkdir -p $out/share/consolefonts mkdir -p $out/share/consolefonts
${if substring 0 1 cfg.font == "/" then '' ${if substring 0 1 cfg.font == "/" then ''

View file

@ -7,6 +7,19 @@ This module generates a package containing configuration files and link it in /e
Fontconfig reads files in folder name / file name order, so the number prepended to the configuration file name decide the order of parsing. Fontconfig reads files in folder name / file name order, so the number prepended to the configuration file name decide the order of parsing.
Low number means high priority. Low number means high priority.
NOTE: Please take extreme care when adjusting the default settings of this module.
People care a lot, and I mean A LOT, about their font rendering, and you will be
The Person That Broke It if it changes in a way people don't like.
See prior art:
- https://github.com/NixOS/nixpkgs/pull/194594
- https://github.com/NixOS/nixpkgs/pull/222236
- https://github.com/NixOS/nixpkgs/pull/222689
And do not repeat our mistakes.
- @K900, March 2023
*/ */
{ config, pkgs, lib, ... }: { config, pkgs, lib, ... }:
@ -218,6 +231,8 @@ let
paths = cfg.confPackages; paths = cfg.confPackages;
ignoreCollisions = true; ignoreCollisions = true;
}; };
fontconfigNote = "Consider manually configuring fonts.fontconfig according to personal preference.";
in in
{ {
imports = [ imports = [
@ -229,6 +244,8 @@ in
(mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "") (mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "") (mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "")
(mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options") (mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options")
(mkRemovedOptionModule [ "hardware" "video" "hidpi" "enable" ] fontconfigNote)
(mkRemovedOptionModule [ "fonts" "optimizeForVeryHighDPI" ] fontconfigNote)
] ++ lib.forEach [ "enable" "substitutions" "preset" ] ] ++ lib.forEach [ "enable" "substitutions" "preset" ]
(opt: lib.mkRemovedOptionModule [ "fonts" "fontconfig" "ultimate" "${opt}" ] '' (opt: lib.mkRemovedOptionModule [ "fonts" "fontconfig" "ultimate" "${opt}" ] ''
The fonts.fontconfig.ultimate module and configuration is obsolete. The fonts.fontconfig.ultimate module and configuration is obsolete.

View file

@ -3,29 +3,7 @@
with lib; with lib;
let let
# A scalable variant of the X11 "core" cursor cfg = config.fonts;
#
# If not running a fancy desktop environment, the cursor is likely set to
# the default `cursor.pcf` bitmap font. This is 17px wide, so it's very
# small and almost invisible on 4K displays.
fontcursormisc_hidpi = pkgs.xorg.fontxfree86type1.overrideAttrs (old:
let
# The scaling constant is 230/96: the scalable `left_ptr` glyph at
# about 23 points is rendered as 17px, on a 96dpi display.
# Note: the XLFD font size is in decipoints.
size = 2.39583 * config.services.xserver.dpi;
sizeString = builtins.head (builtins.split "\\." (toString size));
in
{
postInstall = ''
alias='cursor -xfree86-cursor-medium-r-normal--0-${sizeString}-0-0-p-0-adobe-fontspecific'
echo "$alias" > $out/lib/X11/fonts/Type1/fonts.alias
'';
});
hasHidpi =
config.hardware.video.hidpi.enable &&
config.services.xserver.dpi != null;
defaultFonts = defaultFonts =
[ pkgs.dejavu_fonts [ pkgs.dejavu_fonts
@ -35,14 +13,7 @@ let
pkgs.unifont pkgs.unifont
pkgs.noto-fonts-emoji pkgs.noto-fonts-emoji
]; ];
defaultXFonts =
[ (if hasHidpi then fontcursormisc_hidpi else pkgs.xorg.fontcursormisc)
pkgs.xorg.fontmiscmisc
];
in in
{ {
imports = [ imports = [
(mkRemovedOptionModule [ "fonts" "enableCoreFonts" ] "Use fonts.fonts = [ pkgs.corefonts ]; instead.") (mkRemovedOptionModule [ "fonts" "enableCoreFonts" ] "Use fonts.fonts = [ pkgs.corefonts ]; instead.")
@ -68,14 +39,9 @@ in
and families and reasonable coverage of Unicode. and families and reasonable coverage of Unicode.
''; '';
}; };
}; };
}; };
config = mkMerge [ config = { fonts.fonts = mkIf cfg.enableDefaultFonts defaultFonts; };
{ fonts.fonts = mkIf config.fonts.enableDefaultFonts defaultFonts; }
{ fonts.fonts = mkIf config.services.xserver.enable defaultXFonts; }
];
} }

View file

@ -82,12 +82,30 @@ in
{command}`cat /sys/class/block/zram*/comp_algorithm` {command}`cat /sys/class/block/zram*/comp_algorithm`
''; '';
}; };
writebackDevice = lib.mkOption {
default = null;
example = "/dev/zvol/tarta-zoot/swap-writeback";
type = lib.types.nullOr lib.types.path;
description = lib.mdDoc ''
Write incompressible pages to this device,
as there's no gain from keeping them in RAM.
'';
};
}; };
}; };
config = lib.mkIf cfg.enable { config = lib.mkIf cfg.enable {
assertions = [
{
assertion = cfg.writebackDevice == null || cfg.swapDevices <= 1;
message = "A single writeback device cannot be shared among multiple zram devices";
}
];
system.requiredKernelConfig = with config.lib.kernelConfig; [ system.requiredKernelConfig = with config.lib.kernelConfig; [
(isModule "ZRAM") (isModule "ZRAM")
]; ];
@ -112,6 +130,8 @@ in
zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size; zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
compression-algorithm = cfg.algorithm; compression-algorithm = cfg.algorithm;
swap-priority = cfg.priority; swap-priority = cfg.priority;
} // lib.optionalAttrs (cfg.writebackDevice != null) {
writeback-device = cfg.writebackDevice;
}; };
}) })
devices)); devices));

View file

@ -65,7 +65,7 @@ let
}; };
}; };
filterDTBs = src: if isNull cfg.filter filterDTBs = src: if cfg.filter == null
then "${src}/dtbs" then "${src}/dtbs"
else else
pkgs.runCommand "dtbs-filtered" {} '' pkgs.runCommand "dtbs-filtered" {} ''
@ -93,8 +93,8 @@ let
# Fill in `dtboFile` for each overlay if not set already. # Fill in `dtboFile` for each overlay if not set already.
# Existence of one of these is guarded by assertion below # Existence of one of these is guarded by assertion below
withDTBOs = xs: flip map xs (o: o // { dtboFile = withDTBOs = xs: flip map xs (o: o // { dtboFile =
if isNull o.dtboFile then if o.dtboFile == null then
if !isNull o.dtsFile then compileDTS o.name o.dtsFile if o.dtsFile != null then compileDTS o.name o.dtsFile
else compileDTS o.name (pkgs.writeText "dts" o.dtsText) else compileDTS o.name (pkgs.writeText "dts" o.dtsText)
else o.dtboFile; } ); else o.dtboFile; } );
@ -181,7 +181,7 @@ in
config = mkIf (cfg.enable) { config = mkIf (cfg.enable) {
assertions = let assertions = let
invalidOverlay = o: isNull o.dtsFile && isNull o.dtsText && isNull o.dtboFile; invalidOverlay = o: (o.dtsFile == null) && (o.dtsText == null) && (o.dtboFile == null);
in lib.singleton { in lib.singleton {
assertion = lib.all (o: !invalidOverlay o) cfg.overlays; assertion = lib.all (o: !invalidOverlay o) cfg.overlays;
message = '' message = ''

View file

@ -1,24 +0,0 @@
{ lib, pkgs, config, ...}:
with lib;
{
options.hardware.video.hidpi.enable = mkEnableOption (lib.mdDoc "Font/DPI configuration optimized for HiDPI displays");
config = mkIf config.hardware.video.hidpi.enable {
console.font = lib.mkDefault "${pkgs.terminus_font}/share/consolefonts/ter-v32n.psf.gz";
# Needed when typing in passwords for full disk encryption
console.earlySetup = mkDefault true;
boot.loader.systemd-boot.consoleMode = mkDefault "1";
# Grayscale anti-aliasing for fonts
fonts.fontconfig.antialias = mkDefault true;
fonts.fontconfig.subpixel = {
rgba = mkDefault "none";
lcdfilter = mkDefault "none";
};
# TODO Find reasonable defaults X11 & wayland
};
}

View file

@ -518,21 +518,6 @@ EOF
} }
} }
# For lack of a better way to determine it, guess whether we should use a
# bigger font for the console from the display mode on the first
# framebuffer. A way based on the physical size/actual DPI reported by
# the monitor would be nice, but I don't know how to do this without X :)
my $fb_modes_file = "/sys/class/graphics/fb0/modes";
if (-f $fb_modes_file && -r $fb_modes_file) {
my $modes = read_file($fb_modes_file);
$modes =~ m/([0-9]+)x([0-9]+)/;
my $console_width = $1, my $console_height = $2;
if ($console_width > 1920) {
push @attrs, "# high-resolution display";
push @attrs, 'hardware.video.hidpi.enable = lib.mkDefault true;';
}
}
# Generate the hardware configuration file. # Generate the hardware configuration file.

View file

@ -159,10 +159,7 @@ in
$desktopConfiguration $desktopConfiguration
# Configure keymap in X11 # Configure keymap in X11
# services.xserver.layout = "us"; # services.xserver.layout = "us";
# services.xserver.xkbOptions = { # services.xserver.xkbOptions = "eurosign:e,caps:escape";
# "eurosign:e";
# "caps:escape" # map caps to escape.
# };
# Enable CUPS to print documents. # Enable CUPS to print documents.
# services.printing.enable = true; # services.printing.enable = true;

View file

@ -338,7 +338,7 @@ in
lidarr = 306; lidarr = 306;
slurm = 307; slurm = 307;
kapacitor = 308; kapacitor = 308;
solr = 309; # solr = 309; removed 2023-03-16
alerta = 310; alerta = 310;
minetest = 311; minetest = 311;
rss2email = 312; rss2email = 312;
@ -648,7 +648,7 @@ in
lidarr = 306; lidarr = 306;
slurm = 307; slurm = 307;
kapacitor = 308; kapacitor = 308;
solr = 309; # solr = 309; removed 2023-03-16
alerta = 310; alerta = 310;
minetest = 311; minetest = 311;
rss2email = 312; rss2email = 312;

View file

@ -95,7 +95,6 @@
./hardware/video/bumblebee.nix ./hardware/video/bumblebee.nix
./hardware/video/capture/mwprocapture.nix ./hardware/video/capture/mwprocapture.nix
./hardware/video/displaylink.nix ./hardware/video/displaylink.nix
./hardware/video/hidpi.nix
./hardware/video/nvidia.nix ./hardware/video/nvidia.nix
./hardware/video/switcheroo-control.nix ./hardware/video/switcheroo-control.nix
./hardware/video/uvcvideo/default.nix ./hardware/video/uvcvideo/default.nix
@ -220,6 +219,7 @@
./programs/proxychains.nix ./programs/proxychains.nix
./programs/qdmr.nix ./programs/qdmr.nix
./programs/qt5ct.nix ./programs/qt5ct.nix
./programs/regreet.nix
./programs/rog-control-center.nix ./programs/rog-control-center.nix
./programs/rust-motd.nix ./programs/rust-motd.nix
./programs/screen.nix ./programs/screen.nix
@ -511,6 +511,7 @@
./services/hardware/usbmuxd.nix ./services/hardware/usbmuxd.nix
./services/hardware/usbrelayd.nix ./services/hardware/usbrelayd.nix
./services/hardware/vdr.nix ./services/hardware/vdr.nix
./services/hardware/keyd.nix
./services/home-automation/evcc.nix ./services/home-automation/evcc.nix
./services/home-automation/home-assistant.nix ./services/home-automation/home-assistant.nix
./services/home-automation/zigbee2mqtt.nix ./services/home-automation/zigbee2mqtt.nix
@ -734,6 +735,7 @@
./services/monitoring/nagios.nix ./services/monitoring/nagios.nix
./services/monitoring/netdata.nix ./services/monitoring/netdata.nix
./services/monitoring/parsedmarc.nix ./services/monitoring/parsedmarc.nix
./services/monitoring/prometheus/alertmanager-irc-relay.nix
./services/monitoring/prometheus/alertmanager.nix ./services/monitoring/prometheus/alertmanager.nix
./services/monitoring/prometheus/default.nix ./services/monitoring/prometheus/default.nix
./services/monitoring/prometheus/exporters.nix ./services/monitoring/prometheus/exporters.nix
@ -950,6 +952,7 @@
./services/networking/owamp.nix ./services/networking/owamp.nix
./services/networking/pdns-recursor.nix ./services/networking/pdns-recursor.nix
./services/networking/pdnsd.nix ./services/networking/pdnsd.nix
./services/networking/peroxide.nix
./services/networking/pixiecore.nix ./services/networking/pixiecore.nix
./services/networking/pleroma.nix ./services/networking/pleroma.nix
./services/networking/polipo.nix ./services/networking/polipo.nix
@ -1061,7 +1064,6 @@
./services/search/meilisearch.nix ./services/search/meilisearch.nix
./services/search/opensearch.nix ./services/search/opensearch.nix
./services/search/qdrant.nix ./services/search/qdrant.nix
./services/search/solr.nix
./services/security/aesmd.nix ./services/security/aesmd.nix
./services/security/certmgr.nix ./services/security/certmgr.nix
./services/security/cfssl.nix ./services/security/cfssl.nix
@ -1132,7 +1134,6 @@
./services/web-apps/atlassian/confluence.nix ./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix ./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix ./services/web-apps/atlassian/jira.nix
./services/web-apps/baget.nix
./services/web-apps/bookstack.nix ./services/web-apps/bookstack.nix
./services/web-apps/calibre-web.nix ./services/web-apps/calibre-web.nix
./services/web-apps/coder.nix ./services/web-apps/coder.nix

View file

@ -17,7 +17,7 @@ in {
type = types.listOf types.str; type = types.listOf types.str;
description = lib.mdDoc "Nix top-level packages to be compiled using CCache"; description = lib.mdDoc "Nix top-level packages to be compiled using CCache";
default = []; default = [];
example = [ "wxGTK30" "ffmpeg" "libav_all" ]; example = [ "wxGTK32" "ffmpeg" "libav_all" ];
}; };
}; };

View file

@ -0,0 +1,75 @@
{ lib
, pkgs
, config
, ...
}:
let
cfg = config.programs.regreet;
settingsFormat = pkgs.formats.toml { };
in
{
options.programs.regreet = {
enable = lib.mkEnableOption null // {
description = lib.mdDoc ''
Enable ReGreet, a clean and customizable greeter for greetd.
To use ReGreet, {option}`services.greetd` has to be enabled and
{option}`services.greetd.settings.default_session` should contain the
appropriate configuration to launch
{option}`config.programs.regreet.package`. For examples, see the
[ReGreet Readme](https://github.com/rharish101/ReGreet#set-as-default-session).
A minimal configuration that launches ReGreet in {command}`cage` is
enabled by this module by default.
'';
};
package = lib.mkPackageOptionMD pkgs [ "greetd" "regreet" ] { };
settings = lib.mkOption {
type = lib.types.either lib.types.path settingsFormat.type;
default = { };
description = lib.mdDoc ''
ReGreet configuration file. Refer
<https://github.com/rharish101/ReGreet/blob/main/regreet.sample.toml>
for options.
'';
};
extraCss = lib.mkOption {
type = lib.types.either lib.types.path lib.types.lines;
default = "";
description = lib.mdDoc ''
Extra CSS rules to apply on top of the GTK theme. Refer to
[GTK CSS Properties](https://docs.gtk.org/gtk4/css-properties.html) for
modifiable properties.
'';
};
};
config = lib.mkIf cfg.enable {
services.greetd = {
enable = lib.mkDefault true;
settings.default_session.command = lib.mkDefault "${lib.getExe pkgs.cage} -s -- ${lib.getExe cfg.package}";
};
environment.etc = {
"greetd/regreet.css" =
if lib.isPath cfg.extraCss
then {source = cfg.extraCss;}
else {text = cfg.extraCss;};
"greetd/regreet.toml".source =
if lib.isPath cfg.settings
then cfg.settings
else settingsFormat.generate "regreet.toml" cfg.settings;
};
systemd.tmpfiles.rules = let
user = config.services.greetd.settings.default_session.user;
in [
"d /var/log/regreet 0755 greeter ${user} - -"
"d /var/cache/regreet 0755 greeter ${user} - -"
];
};
}

View file

@ -9,10 +9,27 @@ let
settingsFile = settingsFormat.generate "starship.toml" cfg.settings; settingsFile = settingsFormat.generate "starship.toml" cfg.settings;
in { initOption =
if cfg.interactiveOnly then
"promptInit"
else
"shellInit";
in
{
options.programs.starship = { options.programs.starship = {
enable = mkEnableOption (lib.mdDoc "the Starship shell prompt"); enable = mkEnableOption (lib.mdDoc "the Starship shell prompt");
interactiveOnly = mkOption {
default = true;
example = false;
type = types.bool;
description = lib.mdDoc ''
Whether to enable starship only when the shell is interactive.
Some plugins require this to be set to false to function correctly.
'';
};
settings = mkOption { settings = mkOption {
inherit (settingsFormat) type; inherit (settingsFormat) type;
default = { }; default = { };
@ -25,21 +42,21 @@ in {
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
programs.bash.promptInit = '' programs.bash.${initOption} = ''
if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
export STARSHIP_CONFIG=${settingsFile} export STARSHIP_CONFIG=${settingsFile}
eval "$(${pkgs.starship}/bin/starship init bash)" eval "$(${pkgs.starship}/bin/starship init bash)"
fi fi
''; '';
programs.fish.promptInit = '' programs.fish.${initOption} = ''
if test "$TERM" != "dumb" -a \( -z "$INSIDE_EMACS" -o "$INSIDE_EMACS" = "vterm" \) if test "$TERM" != "dumb" -a \( -z "$INSIDE_EMACS" -o "$INSIDE_EMACS" = "vterm" \)
set -x STARSHIP_CONFIG ${settingsFile} set -x STARSHIP_CONFIG ${settingsFile}
eval (${pkgs.starship}/bin/starship init fish) eval (${pkgs.starship}/bin/starship init fish)
end end
''; '';
programs.zsh.promptInit = '' programs.zsh.${initOption} = ''
if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
export STARSHIP_CONFIG=${settingsFile} export STARSHIP_CONFIG=${settingsFile}
eval "$(${pkgs.starship}/bin/starship init zsh)" eval "$(${pkgs.starship}/bin/starship init zsh)"

View file

@ -44,6 +44,7 @@ with lib;
The hidepid module was removed, since the underlying machinery The hidepid module was removed, since the underlying machinery
is broken when using cgroups-v2. is broken when using cgroups-v2.
'') '')
(mkRemovedOptionModule [ "services" "baget" "enable" ] "The baget module was removed due to the upstream package being unmaintained.")
(mkRemovedOptionModule [ "services" "beegfs" ] "The BeeGFS module has been removed") (mkRemovedOptionModule [ "services" "beegfs" ] "The BeeGFS module has been removed")
(mkRemovedOptionModule [ "services" "beegfsEnable" ] "The BeeGFS module has been removed") (mkRemovedOptionModule [ "services" "beegfsEnable" ] "The BeeGFS module has been removed")
(mkRemovedOptionModule [ "services" "cgmanager" "enable"] "cgmanager was deprecated by lxc and therefore removed from nixpkgs.") (mkRemovedOptionModule [ "services" "cgmanager" "enable"] "cgmanager was deprecated by lxc and therefore removed from nixpkgs.")

View file

@ -19,7 +19,7 @@ let
]; ];
mkArgs = rule: mkArgs = rule:
if (isNull rule.args) then "" if (rule.args == null) then ""
else if (length rule.args == 0) then "args" else if (length rule.args == 0) then "args"
else "args ${concatStringsSep " " rule.args}"; else "args ${concatStringsSep " " rule.args}";
@ -27,9 +27,9 @@ let
let let
opts = mkOpts rule; opts = mkOpts rule;
as = optionalString (!isNull rule.runAs) "as ${rule.runAs}"; as = optionalString (rule.runAs != null) "as ${rule.runAs}";
cmd = optionalString (!isNull rule.cmd) "cmd ${rule.cmd}"; cmd = optionalString (rule.cmd != null) "cmd ${rule.cmd}";
args = mkArgs rule; args = mkArgs rule;
in in
@ -75,7 +75,9 @@ in
{file}`/etc/doas.conf` file. More specific rules should {file}`/etc/doas.conf` file. More specific rules should
come after more general ones in order to yield the expected behavior. come after more general ones in order to yield the expected behavior.
You can use `mkBefore` and/or `mkAfter` to ensure You can use `mkBefore` and/or `mkAfter` to ensure
this is the case when configuration options are merged. this is the case when configuration options are merged. Be aware that
this option cannot be used to override the behaviour allowing
passwordless operation for root.
''; '';
example = literalExpression '' example = literalExpression ''
[ [
@ -224,7 +226,9 @@ in
type = with types; lines; type = with types; lines;
default = ""; default = "";
description = lib.mdDoc '' description = lib.mdDoc ''
Extra configuration text appended to {file}`doas.conf`. Extra configuration text appended to {file}`doas.conf`. Be aware that
this option cannot be used to override the behaviour allowing
passwordless operation for root.
''; '';
}; };
}; };
@ -266,14 +270,14 @@ in
# completely replace the contents of this file, use # completely replace the contents of this file, use
# `environment.etc."doas.conf"`. # `environment.etc."doas.conf"`.
# "root" is allowed to do anything.
permit nopass keepenv root
# extraRules # extraRules
${concatStringsSep "\n" (lists.flatten (map mkRule cfg.extraRules))} ${concatStringsSep "\n" (lists.flatten (map mkRule cfg.extraRules))}
# extraConfig # extraConfig
${cfg.extraConfig} ${cfg.extraConfig}
# "root" is allowed to do anything.
permit nopass keepenv root
''; '';
preferLocalBuild = true; preferLocalBuild = true;
} }

View file

@ -793,7 +793,7 @@ let
}; };
})); }));
motd = if isNull config.users.motdFile motd = if config.users.motdFile == null
then pkgs.writeText "motd" config.users.motd then pkgs.writeText "motd" config.users.motd
else config.users.motdFile; else config.users.motdFile;
@ -1233,7 +1233,7 @@ in
config = { config = {
assertions = [ assertions = [
{ {
assertion = isNull config.users.motd || isNull config.users.motdFile; assertion = config.users.motd == null || config.users.motdFile == null;
message = '' message = ''
Only one of users.motd and users.motdFile can be set. Only one of users.motd and users.motdFile can be set.
''; '';

View file

@ -303,8 +303,8 @@ in
then if (backup.paths != null) then concatStringsSep " " backup.paths else "" then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
else "--files-from ${filesFromTmpFile}"; else "--files-from ${filesFromTmpFile}";
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [ pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
(resticCmd + " forget --prune --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.pruneOpts)) (resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
(resticCmd + " check --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.checkOpts)) (resticCmd + " check " + (concatStringsSep " " backup.checkOpts))
]; ];
# Helper functions for rclone remotes # Helper functions for rclone remotes
rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1; rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1;
@ -314,6 +314,7 @@ in
in in
nameValuePair "restic-backups-${name}" ({ nameValuePair "restic-backups-${name}" ({
environment = { environment = {
RESTIC_CACHE_DIR = "%C/restic-backups-${name}";
RESTIC_PASSWORD_FILE = backup.passwordFile; RESTIC_PASSWORD_FILE = backup.passwordFile;
RESTIC_REPOSITORY = backup.repository; RESTIC_REPOSITORY = backup.repository;
RESTIC_REPOSITORY_FILE = backup.repositoryFile; RESTIC_REPOSITORY_FILE = backup.repositoryFile;
@ -332,7 +333,7 @@ in
restartIfChanged = false; restartIfChanged = false;
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup --cache-dir=%C/restic-backups-${name} ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ]) ExecStart = (optionals (backupPaths != "") [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} ${backupPaths}" ])
++ pruneCmd; ++ pruneCmd;
User = backup.user; User = backup.user;
RuntimeDirectory = "restic-backups-${name}"; RuntimeDirectory = "restic-backups-${name}";

View file

@ -270,7 +270,7 @@ in
''; '';
})]); })]);
environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (!isNull cfg.etcClusterAdminKubeconfig) environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null)
clusterAdminKubeconfig; clusterAdminKubeconfig;
environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [ environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [

View file

@ -12,7 +12,7 @@ let
'' ''
mkdir -p $out/bin mkdir -p $out/bin
makeWrapper ${cfg.package}/bin/dgraph $out/bin/dgraph \ makeWrapper ${cfg.package}/bin/dgraph $out/bin/dgraph \
--set PATH '${lib.makeBinPath [ pkgs.nodejs ]}:$PATH' \ --prefix PATH : "${lib.makeBinPath [ pkgs.nodejs ]}" \
''; '';
securityOptions = { securityOptions = {
NoNewPrivileges = true; NoNewPrivileges = true;

View file

@ -0,0 +1,112 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.keyd;
settingsFormat = pkgs.formats.ini { };
in
{
options = {
services.keyd = {
enable = mkEnableOption (lib.mdDoc "keyd, a key remapping daemon");
ids = mkOption {
type = types.listOf types.string;
default = [ "*" ];
example = [ "*" "-0123:0456" ];
description = lib.mdDoc ''
Device identifiers, as shown by {manpage}`keyd(1)`.
'';
};
settings = mkOption {
type = settingsFormat.type;
default = { };
example = {
main = {
capslock = "overload(control, esc)";
rightalt = "layer(rightalt)";
};
rightalt = {
j = "down";
k = "up";
h = "left";
l = "right";
};
};
description = lib.mdDoc ''
Configuration, except `ids` section, that is written to {file}`/etc/keyd/default.conf`.
See <https://github.com/rvaiya/keyd> how to configure.
'';
};
};
};
config = mkIf cfg.enable {
environment.etc."keyd/default.conf".source = pkgs.runCommand "default.conf"
{
ids = ''
[ids]
${concatStringsSep "\n" cfg.ids}
'';
passAsFile = [ "ids" ];
} ''
cat $idsPath <(echo) ${settingsFormat.generate "keyd-main.conf" cfg.settings} >$out
'';
hardware.uinput.enable = lib.mkDefault true;
systemd.services.keyd = {
description = "Keyd remapping daemon";
documentation = [ "man:keyd(1)" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [
config.environment.etc."keyd/default.conf".source
];
# this is configurable in 2.4.2, later versions seem to remove this option.
# post-2.4.2 may need to set makeFlags in the derivation:
#
# makeFlags = [ "SOCKET_PATH/run/keyd/keyd.socket" ];
environment.KEYD_SOCKET = "/run/keyd/keyd.sock";
serviceConfig = {
ExecStart = "${pkgs.keyd}/bin/keyd";
Restart = "always";
DynamicUser = true;
SupplementaryGroups = [
config.users.groups.input.name
config.users.groups.uinput.name
];
RuntimeDirectory = "keyd";
# Hardening
CapabilityBoundingSet = "";
DeviceAllow = [
"char-input rw"
"/dev/uinput rw"
];
ProtectClock = true;
PrivateNetwork = true;
ProtectHome = true;
ProtectHostname = true;
PrivateUsers = true;
PrivateMounts = true;
RestrictNamespaces = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
MemoryDenyWriteExecute = true;
RestrictRealtime = true;
LockPersonality = true;
ProtectProc = "noaccess";
UMask = "0077";
};
};
};
}

View file

@ -32,6 +32,7 @@ in
systemd.packages = [ pkgs.supergfxctl ]; systemd.packages = [ pkgs.supergfxctl ];
systemd.services.supergfxd.wantedBy = [ "multi-user.target" ]; systemd.services.supergfxd.wantedBy = [ "multi-user.target" ];
systemd.services.supergfxd.path = [ pkgs.kmod ];
services.dbus.packages = [ pkgs.supergfxctl ]; services.dbus.packages = [ pkgs.supergfxctl ];
services.udev.packages = [ pkgs.supergfxctl ]; services.udev.packages = [ pkgs.supergfxctl ];

View file

@ -5,8 +5,8 @@ let
cfg = config.services.undervolt; cfg = config.services.undervolt;
mkPLimit = limit: window: mkPLimit = limit: window:
if (isNull limit && isNull window) then null if (limit == null && window == null) then null
else assert asserts.assertMsg (!isNull limit && !isNull window) "Both power limit and window must be set"; else assert asserts.assertMsg (limit != null && window != null) "Both power limit and window must be set";
"${toString limit} ${toString window}"; "${toString limit} ${toString window}";
cliArgs = lib.cli.toGNUCommandLine {} { cliArgs = lib.cli.toGNUCommandLine {} {
inherit (cfg) inherit (cfg)

View file

@ -362,7 +362,7 @@ in {
config = mkIf cfg.enable { config = mkIf cfg.enable {
assertions = [ assertions = [
{ {
assertion = cfg.openFirewall -> !isNull cfg.config; assertion = cfg.openFirewall -> cfg.config != null;
message = "openFirewall can only be used with a declarative config"; message = "openFirewall can only be used with a declarative config";
} }
]; ];

View file

@ -187,7 +187,7 @@ in
A configuration file automatically generated by NixOS. A configuration file automatically generated by NixOS.
''; '';
description = lib.mdDoc '' description = lib.mdDoc ''
Override the configuration file used by MySQL. By default, Override the configuration file used by logrotate. By default,
NixOS generates one automatically from [](#opt-services.logrotate.settings). NixOS generates one automatically from [](#opt-services.logrotate.settings).
''; '';
example = literalExpression '' example = literalExpression ''

View file

@ -132,6 +132,8 @@ in
$config['plugins'] = [${concatMapStringsSep "," (p: "'${p}'") cfg.plugins}]; $config['plugins'] = [${concatMapStringsSep "," (p: "'${p}'") cfg.plugins}];
$config['des_key'] = file_get_contents('/var/lib/roundcube/des_key'); $config['des_key'] = file_get_contents('/var/lib/roundcube/des_key');
$config['mime_types'] = '${pkgs.nginx}/conf/mime.types'; $config['mime_types'] = '${pkgs.nginx}/conf/mime.types';
# Roundcube uses PHP-FPM which has `PrivateTmp = true;`
$config['temp_dir'] = '/tmp';
$config['enable_spellcheck'] = ${if cfg.dicts == [] then "false" else "true"}; $config['enable_spellcheck'] = ${if cfg.dicts == [] then "false" else "true"};
# by default, spellchecking uses a third-party cloud services # by default, spellchecking uses a third-party cloud services
$config['spellcheck_engine'] = 'pspell'; $config['spellcheck_engine'] = 'pspell';

View file

@ -27,10 +27,7 @@ please refer to the
{ pkgs, lib, config, ... }: { pkgs, lib, config, ... }:
let let
fqdn = "${config.networking.hostName}.${config.networking.domain}"; fqdn = "${config.networking.hostName}.${config.networking.domain}";
clientConfig = { clientConfig."m.homeserver".base_url = "https://${fqdn}";
"m.homeserver".base_url = "https://${fqdn}";
"m.identity_server" = {};
};
serverConfig."m.server" = "${fqdn}:443"; serverConfig."m.server" = "${fqdn}:443";
mkWellKnown = data: '' mkWellKnown = data: ''
add_header Content-Type application/json; add_header Content-Type application/json;

View file

@ -365,6 +365,8 @@ in
]; ];
services.gitea.settings = { services.gitea.settings = {
"cron.update_checker".ENABLED = lib.mkDefault false;
database = mkMerge [ database = mkMerge [
{ {
DB_TYPE = cfg.database.type; DB_TYPE = cfg.database.type;

View file

@ -156,7 +156,7 @@ let
}; };
extra = {}; extra = {};
uploads.storage_path = cfg.statePath; uploads.storage_path = cfg.statePath;
pages = { pages = optionalAttrs cfg.pages.enable {
enabled = cfg.pages.enable; enabled = cfg.pages.enable;
port = 8090; port = 8090;
host = cfg.pages.settings.pages-domain; host = cfg.pages.settings.pages-domain;

View file

@ -238,7 +238,7 @@ in
PORTUNUS_SERVER_BINARY = "${cfg.package}/bin/portunus-server"; PORTUNUS_SERVER_BINARY = "${cfg.package}/bin/portunus-server";
PORTUNUS_SERVER_GROUP = cfg.group; PORTUNUS_SERVER_GROUP = cfg.group;
PORTUNUS_SERVER_USER = cfg.user; PORTUNUS_SERVER_USER = cfg.user;
PORTUNUS_SERVER_HTTP_LISTEN = "[::]:${toString cfg.port}"; PORTUNUS_SERVER_HTTP_LISTEN = "127.0.0.1:${toString cfg.port}";
PORTUNUS_SERVER_STATE_DIR = cfg.stateDir; PORTUNUS_SERVER_STATE_DIR = cfg.stateDir;
PORTUNUS_SLAPD_BINARY = "${cfg.ldap.package}/libexec/slapd"; PORTUNUS_SLAPD_BINARY = "${cfg.ldap.package}/libexec/slapd";
PORTUNUS_SLAPD_GROUP = cfg.ldap.group; PORTUNUS_SLAPD_GROUP = cfg.ldap.group;

View file

@ -77,6 +77,10 @@ in {
}; };
config = mkMerge [ config = mkMerge [
(mkIf cfg.enable { (mkIf cfg.enable {
# For `sssctl` to work.
environment.etc."sssd/sssd.conf".source = settingsFile;
environment.etc."sssd/conf.d".source = "${dataDir}/conf.d";
systemd.services.sssd = { systemd.services.sssd = {
description = "System Security Services Daemon"; description = "System Security Services Daemon";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
@ -101,6 +105,7 @@ in {
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile; EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
}; };
preStart = '' preStart = ''
mkdir -p "${dataDir}/conf.d"
[ -f ${settingsFile} ] && rm -f ${settingsFile} [ -f ${settingsFile} ] && rm -f ${settingsFile}
old_umask=$(umask) old_umask=$(umask)
umask 0177 umask 0177

View file

@ -0,0 +1,107 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.prometheus.alertmanagerIrcRelay;
configFormat = pkgs.formats.yaml { };
configFile = configFormat.generate "alertmanager-irc-relay.yml" cfg.settings;
in
{
options.services.prometheus.alertmanagerIrcRelay = {
enable = mkEnableOption (mdDoc "Alertmanager IRC Relay");
package = mkOption {
type = types.package;
default = pkgs.alertmanager-irc-relay;
defaultText = literalExpression "pkgs.alertmanager-irc-relay";
description = mdDoc "Alertmanager IRC Relay package to use.";
};
extraFlags = mkOption {
type = types.listOf types.str;
default = [];
description = mdDoc "Extra command line options to pass to alertmanager-irc-relay.";
};
settings = mkOption {
type = configFormat.type;
example = literalExpression ''
{
http_host = "localhost";
http_port = 8000;
irc_host = "irc.example.com";
irc_port = 7000;
irc_nickname = "myalertbot";
irc_channels = [
{ name = "#mychannel"; }
];
}
'';
description = mdDoc ''
Configuration for Alertmanager IRC Relay as a Nix attribute set.
For a reference, check out the
[example configuration](https://github.com/google/alertmanager-irc-relay#configuring-and-running-the-bot)
and the
[source code](https://github.com/google/alertmanager-irc-relay/blob/master/config.go).
Note: The webhook's URL MUST point to the IRC channel where the message
should be posted. For `#mychannel` from the example, this would be
`http://localhost:8080/mychannel`.
'';
};
};
config = mkIf cfg.enable {
systemd.services.alertmanager-irc-relay = {
description = "Alertmanager IRC Relay";
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
serviceConfig = {
ExecStart = ''
${cfg.package}/bin/alertmanager-irc-relay \
-config ${configFile} \
${escapeShellArgs cfg.extraFlags}
'';
DynamicUser = true;
NoNewPrivileges = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
ProtectHome = "tmpfs";
PrivateTmp = true;
PrivateDevices = true;
PrivateIPC = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallFilter = [
"@system-service"
"~@cpu-emulation"
"~@privileged"
"~@reboot"
"~@setuid"
"~@swap"
];
};
};
};
meta.maintainers = [ maintainers.oxzi ];
}

View file

@ -5,7 +5,7 @@ with lib;
let let
cfg = config.services.avahi; cfg = config.services.avahi;
yesNo = yes : if yes then "yes" else "no"; yesNo = yes: if yes then "yes" else "no";
avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" '' avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" ''
[server] [server]
@ -17,7 +17,8 @@ let
browse-domains=${concatStringsSep ", " browseDomains} browse-domains=${concatStringsSep ", " browseDomains}
use-ipv4=${yesNo ipv4} use-ipv4=${yesNo ipv4}
use-ipv6=${yesNo ipv6} use-ipv6=${yesNo ipv6}
${optionalString (interfaces!=null) "allow-interfaces=${concatStringsSep "," interfaces}"} ${optionalString (allowInterfaces!=null) "allow-interfaces=${concatStringsSep "," allowInterfaces}"}
${optionalString (denyInterfaces!=null) "deny-interfaces=${concatStringsSep "," denyInterfaces}"}
${optionalString (domainName!=null) "domain-name=${domainName}"} ${optionalString (domainName!=null) "domain-name=${domainName}"}
allow-point-to-point=${yesNo allowPointToPoint} allow-point-to-point=${yesNo allowPointToPoint}
${optionalString (cacheEntriesMax!=null) "cache-entries-max=${toString cacheEntriesMax}"} ${optionalString (cacheEntriesMax!=null) "cache-entries-max=${toString cacheEntriesMax}"}
@ -39,6 +40,10 @@ let
''; '';
in in
{ {
imports = [
(lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
];
options.services.avahi = { options.services.avahi = {
enable = mkOption { enable = mkOption {
type = types.bool; type = types.bool;
@ -91,7 +96,7 @@ in
description = lib.mdDoc "Whether to use IPv6."; description = lib.mdDoc "Whether to use IPv6.";
}; };
interfaces = mkOption { allowInterfaces = mkOption {
type = types.nullOr (types.listOf types.str); type = types.nullOr (types.listOf types.str);
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
@ -101,6 +106,17 @@ in
''; '';
}; };
denyInterfaces = mkOption {
type = types.nullOr (types.listOf types.str);
default = null;
description = lib.mdDoc ''
List of network interfaces that should be ignored by the
{command}`avahi-daemon`. Other unspecified interfaces will be used,
unless {option}`allowInterfaces` is set. This option takes precedence
over {option}`allowInterfaces`.
'';
};
openFirewall = mkOption { openFirewall = mkOption {
type = types.bool; type = types.bool;
default = true; default = true;
@ -134,7 +150,7 @@ in
extraServiceFiles = mkOption { extraServiceFiles = mkOption {
type = with types; attrsOf (either str path); type = with types; attrsOf (either str path);
default = {}; default = { };
example = literalExpression '' example = literalExpression ''
{ {
ssh = "''${pkgs.avahi}/etc/avahi/services/ssh.service"; ssh = "''${pkgs.avahi}/etc/avahi/services/ssh.service";
@ -236,7 +252,7 @@ in
isSystemUser = true; isSystemUser = true;
}; };
users.groups.avahi = {}; users.groups.avahi = { };
system.nssModules = optional cfg.nssmdns pkgs.nssmdns; system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [ system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
@ -246,10 +262,12 @@ in
environment.systemPackages = [ pkgs.avahi ]; environment.systemPackages = [ pkgs.avahi ];
environment.etc = (mapAttrs' (n: v: nameValuePair environment.etc = (mapAttrs'
"avahi/services/${n}.service" (n: v: nameValuePair
{ ${if types.path.check v then "source" else "text"} = v; } "avahi/services/${n}.service"
) cfg.extraServiceFiles); { ${if types.path.check v then "source" else "text"} = v; }
)
cfg.extraServiceFiles);
systemd.sockets.avahi-daemon = { systemd.sockets.avahi-daemon = {
description = "Avahi mDNS/DNS-SD Stack Activation Socket"; description = "Avahi mDNS/DNS-SD Stack Activation Socket";

View file

@ -94,7 +94,13 @@ in
${optionalString (ifaceSet != "") ''iifname { ${ifaceSet} } accept comment "trusted interfaces"''} ${optionalString (ifaceSet != "") ''iifname { ${ifaceSet} } accept comment "trusted interfaces"''}
# Some ICMPv6 types like NDP is untracked # Some ICMPv6 types like NDP is untracked
ct state vmap { invalid : drop, established : accept, related : accept, * : jump input-allow } comment "*: new and untracked" ct state vmap {
invalid : drop,
established : accept,
related : accept,
new : jump input-allow,
untracked: jump input-allow,
}
${optionalString cfg.logRefusedConnections '' ${optionalString cfg.logRefusedConnections ''
tcp flags syn / fin,syn,rst,ack log level info prefix "refused connection: " tcp flags syn / fin,syn,rst,ack log level info prefix "refused connection: "
@ -143,7 +149,13 @@ in
chain forward { chain forward {
type filter hook forward priority filter; policy drop; type filter hook forward priority filter; policy drop;
ct state vmap { invalid : drop, established : accept, related : accept, * : jump forward-allow } comment "*: new and untracked" ct state vmap {
invalid : drop,
established : accept,
related : accept,
new : jump forward-allow,
untracked : jump forward-allow,
}
} }

View file

@ -291,11 +291,11 @@ in {
''; '';
}; };
client_secret_file = mkOption { client_secret_path = mkOption {
type = types.nullOr types.path; type = types.nullOr types.path;
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
Path to OpenID Connect client secret file. Path to OpenID Connect client secret file. Expands environment variables in format ''${VAR}.
''; '';
}; };
@ -425,7 +425,7 @@ in {
(mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"]) (mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
@ -478,9 +478,6 @@ in {
export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})" export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
''} ''}
${optionalString (cfg.settings.oidc.client_secret_file != null) ''
export HEADSCALE_OIDC_CLIENT_SECRET="$(head -n1 ${escapeShellArg cfg.settings.oidc.client_secret_file})"
''}
exec ${cfg.package}/bin/headscale serve exec ${cfg.package}/bin/headscale serve
''; '';

View file

@ -4,6 +4,15 @@ with lib;
let let
cfg = config.services.jicofo; cfg = config.services.jicofo;
# HOCON is a JSON superset that some jitsi-meet components use for configuration
toHOCON = x: if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}")
else if isAttrs x && x ? __hocon_unquoted_string then x.__hocon_unquoted_string
else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}"
else if isList x then "[${ concatMapStringsSep "," toHOCON x }]"
else builtins.toJSON x;
configFile = pkgs.writeText "jicofo.conf" (toHOCON cfg.config);
in in
{ {
options.services.jicofo = with types; { options.services.jicofo = with types; {
@ -68,22 +77,34 @@ in
}; };
config = mkOption { config = mkOption {
type = attrsOf str; type = (pkgs.formats.json {}).type;
default = { }; default = { };
example = literalExpression '' example = literalExpression ''
{ {
"org.jitsi.jicofo.auth.URL" = "XMPP:jitsi-meet.example.com"; jicofo.bridge.max-bridge-participants = 42;
} }
''; '';
description = lib.mdDoc '' description = lib.mdDoc ''
Contents of the {file}`sip-communicator.properties` configuration file for jicofo. Contents of the {file}`jicofo.conf` configuration file.
''; '';
}; };
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.jicofo.config = mapAttrs (_: v: mkDefault v) { services.jicofo.config = {
"org.jitsi.jicofo.BRIDGE_MUC" = cfg.bridgeMuc; jicofo = {
bridge.brewery-jid = cfg.bridgeMuc;
xmpp = rec {
client = {
hostname = cfg.xmppHost;
username = cfg.userName;
domain = cfg.userDomain;
password = { __hocon_envvar = "JICOFO_AUTH_PASS"; };
xmpp-domain = if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain;
};
service = client;
};
};
}; };
users.groups.jitsi-meet = {}; users.groups.jitsi-meet = {};
@ -93,6 +114,7 @@ in
"-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "/etc/jitsi"; "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "/etc/jitsi";
"-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "jicofo"; "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "jicofo";
"-Djava.util.logging.config.file" = "/etc/jitsi/jicofo/logging.properties"; "-Djava.util.logging.config.file" = "/etc/jitsi/jicofo/logging.properties";
"-Dconfig.file" = configFile;
}; };
in in
{ {
@ -101,18 +123,13 @@ in
after = [ "network.target" ]; after = [ "network.target" ];
restartTriggers = [ restartTriggers = [
config.environment.etc."jitsi/jicofo/sip-communicator.properties".source configFile
]; ];
environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jicofoProps); environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jicofoProps);
script = '' script = ''
${pkgs.jicofo}/bin/jicofo \ export JICOFO_AUTH_PASS="$(<${cfg.userPasswordFile})"
--host=${cfg.xmppHost} \ exec "${pkgs.jicofo}/bin/jicofo"
--domain=${if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain} \
--secret=$(cat ${cfg.componentPasswordFile}) \
--user_name=${cfg.userName} \
--user_domain=${cfg.userDomain} \
--user_password=$(cat ${cfg.userPasswordFile})
''; '';
serviceConfig = { serviceConfig = {
@ -140,10 +157,7 @@ in
}; };
}; };
environment.etc."jitsi/jicofo/sip-communicator.properties".source = environment.etc."jitsi/jicofo/sip-communicator.properties".text = "";
pkgs.writeText "sip-communicator.properties" (
generators.toKeyValue {} cfg.config
);
environment.etc."jitsi/jicofo/logging.properties".source = environment.etc."jitsi/jicofo/logging.properties".source =
mkDefault "${pkgs.jicofo}/etc/jitsi/jicofo/logging.properties-journal"; mkDefault "${pkgs.jicofo}/etc/jitsi/jicofo/logging.properties-journal";
}; };

View file

@ -513,22 +513,22 @@ in {
${indentLines 2 devices} ${indentLines 2 devices}
} }
${optionalString (!isNull defaults) '' ${optionalString (defaults != null) ''
defaults { defaults {
${indentLines 2 defaults} ${indentLines 2 defaults}
} }
''} ''}
${optionalString (!isNull blacklist) '' ${optionalString (blacklist != null) ''
blacklist { blacklist {
${indentLines 2 blacklist} ${indentLines 2 blacklist}
} }
''} ''}
${optionalString (!isNull blacklist_exceptions) '' ${optionalString (blacklist_exceptions != null) ''
blacklist_exceptions { blacklist_exceptions {
${indentLines 2 blacklist_exceptions} ${indentLines 2 blacklist_exceptions}
} }
''} ''}
${optionalString (!isNull overrides) '' ${optionalString (overrides != null) ''
overrides { overrides {
${indentLines 2 overrides} ${indentLines 2 overrides}
} }

View file

@ -0,0 +1,131 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.peroxide;
settingsFormat = pkgs.formats.yaml { };
stateDir = "peroxide";
in
{
options.services.peroxide = {
enable = mkEnableOption (lib.mdDoc "enable");
package = mkPackageOptionMD pkgs "peroxide" {
default = [ "peroxide" ];
};
logLevel = mkOption {
# https://github.com/sirupsen/logrus#level-logging
type = types.enum [ "Panic" "Fatal" "Error" "Warning" "Info" "Debug" "Trace" ];
default = "Warning";
example = "Info";
description = lib.mdDoc "Only log messages of this priority or higher.";
};
settings = mkOption {
type = types.submodule {
freeformType = settingsFormat.type;
options = {
UserPortImap = mkOption {
type = types.port;
default = 1143;
description = lib.mdDoc "The port on which to listen for IMAP connections.";
};
UserPortSmtp = mkOption {
type = types.port;
default = 1025;
description = lib.mdDoc "The port on which to listen for SMTP connections.";
};
ServerAddress = mkOption {
type = types.str;
default = "[::0]";
example = "localhost";
description = lib.mdDoc "The address on which to listen for connections.";
};
};
};
default = { };
description = lib.mdDoc ''
Configuration for peroxide. See
[config.example.yaml](https://github.com/ljanyst/peroxide/blob/master/config.example.yaml)
for an example configuration.
'';
};
};
config = mkIf cfg.enable {
services.peroxide.settings = {
# peroxide deletes the cache directory on startup, which requires write
# permission on the parent directory, so we can't use
# /var/cache/peroxide
CacheDir = "/var/cache/peroxide/cache";
X509Key = mkDefault "/var/lib/${stateDir}/key.pem";
X509Cert = mkDefault "/var/lib/${stateDir}/cert.pem";
CookieJar = "/var/lib/${stateDir}/cookies.json";
CredentialsStore = "/var/lib/${stateDir}/credentials.json";
};
users.users.peroxide = {
isSystemUser = true;
group = "peroxide";
};
users.groups.peroxide = { };
systemd.services.peroxide = {
description = "Peroxide ProtonMail bridge";
requires = [ "network.target" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."peroxide.conf".source ];
serviceConfig = {
Type = "simple";
User = "peroxide";
LogsDirectory = "peroxide";
LogsDirectoryMode = "0750";
# Specify just "peroxide" so that the user has write permission, because
# peroxide deletes and recreates the cache directory on startup.
CacheDirectory = [ "peroxide" "peroxide/cache" ];
CacheDirectoryMode = "0700";
StateDirectory = stateDir;
StateDirectoryMode = "0700";
ExecStart = "${cfg.package}/bin/peroxide -log-file=/var/log/peroxide/peroxide.log -log-level ${cfg.logLevel}";
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
};
preStart = ''
# Create a self-signed certificate if no certificate exists.
if [[ ! -e "${cfg.settings.X509Key}" && ! -e "${cfg.settings.X509Cert}" ]]; then
${cfg.package}/bin/peroxide-cfg -action gen-x509 \
-x509-org 'N/A' \
-x509-cn 'nixos' \
-x509-cert "${cfg.settings.X509Cert}" \
-x509-key "${cfg.settings.X509Key}"
fi
'';
};
# https://github.com/ljanyst/peroxide/blob/master/peroxide.logrotate
services.logrotate.settings.peroxide = {
files = "/var/log/peroxide/peroxide.log";
rotate = 31;
frequency = "daily";
compress = true;
delaycompress = true;
missingok = true;
notifempty = true;
su = "peroxide peroxide";
postrotate = "systemctl reload peroxide";
};
environment.etc."peroxide.conf".source = settingsFormat.generate "peroxide.conf" cfg.settings;
environment.systemPackages = [ cfg.package ];
};
meta.maintainers = with maintainers; [ aanderse aidalgol ];
}

View file

@ -9,7 +9,7 @@ let
listToValue = concatMapStringsSep ", " (generators.mkValueStringDefault { }); listToValue = concatMapStringsSep ", " (generators.mkValueStringDefault { });
}; };
pkg = if isNull cfg.package then pkg = if cfg.package == null then
pkgs.radicale pkgs.radicale
else else
cfg.package; cfg.package;
@ -117,13 +117,13 @@ in {
} }
]; ];
warnings = optional (isNull cfg.package && versionOlder config.system.stateVersion "17.09") '' warnings = optional (cfg.package == null && versionOlder config.system.stateVersion "17.09") ''
The configuration and storage formats of your existing Radicale The configuration and storage formats of your existing Radicale
installation might be incompatible with the newest version. installation might be incompatible with the newest version.
For upgrade instructions see For upgrade instructions see
https://radicale.org/2.1.html#documentation/migration-from-1xx-to-2xx. https://radicale.org/2.1.html#documentation/migration-from-1xx-to-2xx.
Set services.radicale.package to suppress this warning. Set services.radicale.package to suppress this warning.
'' ++ optional (isNull cfg.package && versionOlder config.system.stateVersion "20.09") '' '' ++ optional (cfg.package == null && versionOlder config.system.stateVersion "20.09") ''
The configuration format of your existing Radicale installation might be The configuration format of your existing Radicale installation might be
incompatible with the newest version. For upgrade instructions see incompatible with the newest version. For upgrade instructions see
https://github.com/Kozea/Radicale/blob/3.0.6/NEWS.md#upgrade-checklist. https://github.com/Kozea/Radicale/blob/3.0.6/NEWS.md#upgrade-checklist.

View file

@ -19,7 +19,7 @@ let
else if true == v then "yes" else if true == v then "yes"
else if false == v then "no" else if false == v then "no"
else if isList v then concatStringsSep "," v else if isList v then concatStringsSep "," v
else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}"; else throw "unsupported type ${builtins.typeOf v}: ${(lib.generators.toPretty {}) v}";
# dont use the "=" operator # dont use the "=" operator
settingsFormat = (pkgs.formats.keyValue { settingsFormat = (pkgs.formats.keyValue {

View file

@ -8,7 +8,8 @@ let
configFileProvided = cfg.configFile != null; configFileProvided = cfg.configFile != null;
format = pkgs.formats.json { }; format = pkgs.formats.json { };
in { in
{
imports = [ imports = [
(mkRenamedOptionModule (mkRenamedOptionModule
[ "services" "yggdrasil" "config" ] [ "services" "yggdrasil" "config" ]
@ -21,7 +22,7 @@ in {
settings = mkOption { settings = mkOption {
type = format.type; type = format.type;
default = {}; default = { };
example = { example = {
Peers = [ Peers = [
"tcp://aa.bb.cc.dd:eeeee" "tcp://aa.bb.cc.dd:eeeee"
@ -45,7 +46,7 @@ in {
If no keys are specified then ephemeral keys are generated If no keys are specified then ephemeral keys are generated
and the Yggdrasil interface will have a random IPv6 address and the Yggdrasil interface will have a random IPv6 address
each time the service is started, this is the default. each time the service is started. This is the default.
If both {option}`configFile` and {option}`settings` If both {option}`configFile` and {option}`settings`
are supplied, they will be combined, with values from are supplied, they will be combined, with values from
@ -61,8 +62,13 @@ in {
default = null; default = null;
example = "/run/keys/yggdrasil.conf"; example = "/run/keys/yggdrasil.conf";
description = lib.mdDoc '' description = lib.mdDoc ''
A file which contains JSON configuration for yggdrasil. A file which contains JSON or HJSON configuration for yggdrasil. See
See the {option}`settings` option for more information. the {option}`settings` option for more information.
Note: This file must not be larger than 1 MB because it is passed to
the yggdrasil process via systemds LoadCredential mechanism. For
details, see <https://systemd.io/CREDENTIALS/> and `man 5
systemd.exec`.
''; '';
}; };
@ -77,20 +83,20 @@ in {
type = bool; type = bool;
default = false; default = false;
description = lib.mdDoc '' description = lib.mdDoc ''
Whether to open the UDP port used for multicast peer Whether to open the UDP port used for multicast peer discovery. The
discovery. The NixOS firewall blocks link-local NixOS firewall blocks link-local communication, so in order to make
communication, so in order to make local peering work you incoming local peering work you will also need to configure
will also need to set `LinkLocalTCPPort` in your `MulticastInterfaces` in your Yggdrasil configuration
yggdrasil configuration ({option}`settings` or ({option}`settings` or {option}`configFile`). You will then have to
{option}`configFile`) to a port number other than 0, add the ports that you configure there to your firewall configuration
and then add that port to ({option}`networking.firewall.allowedTCPPorts` or
{option}`networking.firewall.allowedTCPPorts`. {option}`networking.firewall.interfaces.<name>.allowedTCPPorts`).
''; '';
}; };
denyDhcpcdInterfaces = mkOption { denyDhcpcdInterfaces = mkOption {
type = listOf str; type = listOf str;
default = []; default = [ ];
example = [ "tap*" ]; example = [ "tap*" ];
description = lib.mdDoc '' description = lib.mdDoc ''
Disable the DHCP client for any interface whose name matches Disable the DHCP client for any interface whose name matches
@ -118,80 +124,102 @@ in {
}; };
}; };
config = mkIf cfg.enable (let binYggdrasil = cfg.package + "/bin/yggdrasil"; config = mkIf cfg.enable (
in { let
assertions = [{ binYggdrasil = "${cfg.package}/bin/yggdrasil";
assertion = config.networking.enableIPv6; binHjson = "${pkgs.hjson-go}/bin/hjson-cli";
message = "networking.enableIPv6 must be true for yggdrasil to work"; in
}]; {
assertions = [{
assertion = config.networking.enableIPv6;
message = "networking.enableIPv6 must be true for yggdrasil to work";
}];
system.activationScripts.yggdrasil = mkIf cfg.persistentKeys '' system.activationScripts.yggdrasil = mkIf cfg.persistentKeys ''
if [ ! -e ${keysPath} ] if [ ! -e ${keysPath} ]
then then
mkdir --mode=700 -p ${builtins.dirOf keysPath} mkdir --mode=700 -p ${builtins.dirOf keysPath}
${binYggdrasil} -genconf -json \ ${binYggdrasil} -genconf -json \
| ${pkgs.jq}/bin/jq \ | ${pkgs.jq}/bin/jq \
'to_entries|map(select(.key|endswith("Key")))|from_entries' \ 'to_entries|map(select(.key|endswith("Key")))|from_entries' \
> ${keysPath} > ${keysPath}
fi fi
''; '';
systemd.services.yggdrasil = { systemd.services.yggdrasil = {
description = "Yggdrasil Network Service"; description = "Yggdrasil Network Service";
after = [ "network-pre.target" ]; after = [ "network-pre.target" ];
wants = [ "network.target" ]; wants = [ "network.target" ];
before = [ "network.target" ]; before = [ "network.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
preStart = # This script first prepares the config file, then it starts Yggdrasil.
(if settingsProvided || configFileProvided || cfg.persistentKeys then # The preparation could also be done in ExecStartPre/preStart but only
"echo " # systemd versions >= v252 support reading credentials in ExecStartPre. As
# of February 2023, systemd v252 is not yet in the stable branch of NixOS.
#
# This could be changed in the future once systemd version v252 has
# reached NixOS but it does not have to be. Config file preparation is
# fast enough, it does not need elevated privileges, and `set -euo
# pipefail` should make sure that the service is not started if the
# preparation fails. Therefore, it is not necessary to move the
# preparation to ExecStartPre.
script = ''
set -euo pipefail
+ (lib.optionalString settingsProvided # prepare config file
"'${builtins.toJSON cfg.settings}'") ${(if settingsProvided || configFileProvided || cfg.persistentKeys then
+ (lib.optionalString configFileProvided "$(cat ${cfg.configFile})") "echo "
+ (lib.optionalString cfg.persistentKeys "$(cat ${keysPath})")
+ " | ${pkgs.jq}/bin/jq -s add | ${binYggdrasil} -normaliseconf -useconf"
else
"${binYggdrasil} -genconf") + " > /run/yggdrasil/yggdrasil.conf";
serviceConfig = { + (lib.optionalString settingsProvided
ExecStart = "'${builtins.toJSON cfg.settings}'")
"${binYggdrasil} -useconffile /run/yggdrasil/yggdrasil.conf"; + (lib.optionalString configFileProvided
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; "$(${binHjson} -c \"$CREDENTIALS_DIRECTORY/yggdrasil.conf\")")
Restart = "always"; + (lib.optionalString cfg.persistentKeys "$(cat ${keysPath})")
+ " | ${pkgs.jq}/bin/jq -s add | ${binYggdrasil} -normaliseconf -useconf"
else
"${binYggdrasil} -genconf") + " > /run/yggdrasil/yggdrasil.conf"}
DynamicUser = true; # start yggdrasil
StateDirectory = "yggdrasil"; ${binYggdrasil} -useconffile /run/yggdrasil/yggdrasil.conf
RuntimeDirectory = "yggdrasil"; '';
RuntimeDirectoryMode = "0750";
BindReadOnlyPaths = lib.optional configFileProvided cfg.configFile
++ lib.optional cfg.persistentKeys keysPath;
ReadWritePaths = "/run/yggdrasil";
AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE"; serviceConfig = {
CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE"; ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
MemoryDenyWriteExecute = true; Restart = "always";
ProtectControlGroups = true;
ProtectHome = "tmpfs";
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
RestrictNamespaces = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged @keyring" ];
} // (if (cfg.group != null) then {
Group = cfg.group;
} else {});
};
networking.dhcpcd.denyInterfaces = cfg.denyDhcpcdInterfaces; DynamicUser = true;
networking.firewall.allowedUDPPorts = mkIf cfg.openMulticastPort [ 9001 ]; StateDirectory = "yggdrasil";
RuntimeDirectory = "yggdrasil";
RuntimeDirectoryMode = "0750";
BindReadOnlyPaths = lib.optional cfg.persistentKeys keysPath;
LoadCredential =
mkIf configFileProvided "yggdrasil.conf:${cfg.configFile}";
# Make yggdrasilctl available on the command line. AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
environment.systemPackages = [ cfg.package ]; CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
}); MemoryDenyWriteExecute = true;
ProtectControlGroups = true;
ProtectHome = "tmpfs";
ProtectKernelModules = true;
ProtectKernelTunables = true;
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
RestrictNamespaces = true;
RestrictRealtime = true;
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged @keyring" ];
} // (if (cfg.group != null) then {
Group = cfg.group;
} else { });
};
networking.dhcpcd.denyInterfaces = cfg.denyDhcpcdInterfaces;
networking.firewall.allowedUDPPorts = mkIf cfg.openMulticastPort [ 9001 ];
# Make yggdrasilctl available on the command line.
environment.systemPackages = [ cfg.package ];
}
);
meta = { meta = {
doc = ./yggdrasil.md; doc = ./yggdrasil.md;
maintainers = with lib.maintainers; [ gazally ehmry ]; maintainers = with lib.maintainers; [ gazally ehmry ];

View file

@ -1,110 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.solr;
in
{
options = {
services.solr = {
enable = mkEnableOption (lib.mdDoc "Solr");
package = mkOption {
type = types.package;
default = pkgs.solr;
defaultText = literalExpression "pkgs.solr";
description = lib.mdDoc "Which Solr package to use.";
};
port = mkOption {
type = types.port;
default = 8983;
description = lib.mdDoc "Port on which Solr is ran.";
};
stateDir = mkOption {
type = types.path;
default = "/var/lib/solr";
description = lib.mdDoc "The solr home directory containing config, data, and logging files.";
};
extraJavaOptions = mkOption {
type = types.listOf types.str;
default = [];
description = lib.mdDoc "Extra command line options given to the java process running Solr.";
};
user = mkOption {
type = types.str;
default = "solr";
description = lib.mdDoc "User under which Solr is ran.";
};
group = mkOption {
type = types.str;
default = "solr";
description = lib.mdDoc "Group under which Solr is ran.";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ cfg.package ];
systemd.services.solr = {
after = [ "network.target" "remote-fs.target" "nss-lookup.target" "systemd-journald-dev-log.socket" ];
wantedBy = [ "multi-user.target" ];
environment = {
SOLR_HOME = "${cfg.stateDir}/data";
LOG4J_PROPS = "${cfg.stateDir}/log4j2.xml";
SOLR_LOGS_DIR = "${cfg.stateDir}/logs";
SOLR_PORT = "${toString cfg.port}";
};
path = with pkgs; [
gawk
procps
];
preStart = ''
mkdir -p "${cfg.stateDir}/data";
mkdir -p "${cfg.stateDir}/logs";
if ! test -e "${cfg.stateDir}/data/solr.xml"; then
install -D -m0640 ${cfg.package}/server/solr/solr.xml "${cfg.stateDir}/data/solr.xml"
install -D -m0640 ${cfg.package}/server/solr/zoo.cfg "${cfg.stateDir}/data/zoo.cfg"
fi
if ! test -e "${cfg.stateDir}/log4j2.xml"; then
install -D -m0640 ${cfg.package}/server/resources/log4j2.xml "${cfg.stateDir}/log4j2.xml"
fi
'';
serviceConfig = {
User = cfg.user;
Group = cfg.group;
ExecStart="${cfg.package}/bin/solr start -f -a \"${concatStringsSep " " cfg.extraJavaOptions}\"";
ExecStop="${cfg.package}/bin/solr stop";
};
};
users.users = optionalAttrs (cfg.user == "solr") {
solr = {
group = cfg.group;
home = cfg.stateDir;
createHome = true;
uid = config.ids.uids.solr;
};
};
users.groups = optionalAttrs (cfg.group == "solr") {
solr.gid = config.ids.gids.solr;
};
};
}

View file

@ -273,26 +273,16 @@ in
"fail2ban/filter.d".source = "${cfg.package}/etc/fail2ban/filter.d/*.conf"; "fail2ban/filter.d".source = "${cfg.package}/etc/fail2ban/filter.d/*.conf";
}; };
systemd.packages = [ cfg.package ];
systemd.services.fail2ban = { systemd.services.fail2ban = {
description = "Fail2ban Intrusion Prevention System";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
partOf = optional config.networking.firewall.enable "firewall.service"; partOf = optional config.networking.firewall.enable "firewall.service";
restartTriggers = [ fail2banConf jailConf pathsConf ]; restartTriggers = [ fail2banConf jailConf pathsConf ];
path = [ cfg.package cfg.packageFirewall pkgs.iproute2 ] ++ cfg.extraPackages; path = [ cfg.package cfg.packageFirewall pkgs.iproute2 ] ++ cfg.extraPackages;
unitConfig.Documentation = "man:fail2ban(1)";
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/fail2ban-server -xf start";
ExecStop = "${cfg.package}/bin/fail2ban-server stop";
ExecReload = "${cfg.package}/bin/fail2ban-server reload";
Type = "simple";
Restart = "on-failure";
PIDFile = "/run/fail2ban/fail2ban.pid";
# Capabilities # Capabilities
CapabilityBoundingSet = [ "CAP_AUDIT_READ" "CAP_DAC_READ_SEARCH" "CAP_NET_ADMIN" "CAP_NET_RAW" ]; CapabilityBoundingSet = [ "CAP_AUDIT_READ" "CAP_DAC_READ_SEARCH" "CAP_NET_ADMIN" "CAP_NET_RAW" ];
# Security # Security

View file

@ -132,7 +132,7 @@ in
requires = lib.mkIf (!(isPathType cfg.repository)) [ "network-online.target" ]; requires = lib.mkIf (!(isPathType cfg.repository)) [ "network-online.target" ];
environment.GIT_SSH_COMMAND = lib.mkIf (!(isNull cfg.sshKeyFile)) environment.GIT_SSH_COMMAND = lib.mkIf (cfg.sshKeyFile != null)
"${pkgs.openssh}/bin/ssh -i ${lib.escapeShellArg cfg.sshKeyFile}"; "${pkgs.openssh}/bin/ssh -i ${lib.escapeShellArg cfg.sshKeyFile}";
restartIfChanged = false; restartIfChanged = false;

View file

@ -1,170 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.baget;
defaultConfig = {
"PackageDeletionBehavior" = "Unlist";
"AllowPackageOverwrites" = false;
"Database" = {
"Type" = "Sqlite";
"ConnectionString" = "Data Source=baget.db";
};
"Storage" = {
"Type" = "FileSystem";
"Path" = "";
};
"Search" = {
"Type" = "Database";
};
"Mirror" = {
"Enabled" = false;
"PackageSource" = "https://api.nuget.org/v3/index.json";
};
"Logging" = {
"IncludeScopes" = false;
"Debug" = {
"LogLevel" = {
"Default" = "Warning";
};
};
"Console" = {
"LogLevel" = {
"Microsoft.Hosting.Lifetime" = "Information";
"Default" = "Warning";
};
};
};
};
configAttrs = recursiveUpdate defaultConfig cfg.extraConfig;
configFormat = pkgs.formats.json {};
configFile = configFormat.generate "appsettings.json" configAttrs;
in
{
options.services.baget = {
enable = mkEnableOption (lib.mdDoc "BaGet NuGet-compatible server");
apiKeyFile = mkOption {
type = types.path;
example = "/root/baget.key";
description = lib.mdDoc ''
Private API key for BaGet.
'';
};
extraConfig = mkOption {
type = configFormat.type;
default = {};
example = {
"Database" = {
"Type" = "PostgreSql";
"ConnectionString" = "Server=/run/postgresql;Port=5432;";
};
};
defaultText = literalExpression ''
{
"PackageDeletionBehavior" = "Unlist";
"AllowPackageOverwrites" = false;
"Database" = {
"Type" = "Sqlite";
"ConnectionString" = "Data Source=baget.db";
};
"Storage" = {
"Type" = "FileSystem";
"Path" = "";
};
"Search" = {
"Type" = "Database";
};
"Mirror" = {
"Enabled" = false;
"PackageSource" = "https://api.nuget.org/v3/index.json";
};
"Logging" = {
"IncludeScopes" = false;
"Debug" = {
"LogLevel" = {
"Default" = "Warning";
};
};
"Console" = {
"LogLevel" = {
"Microsoft.Hosting.Lifetime" = "Information";
"Default" = "Warning";
};
};
};
}
'';
description = lib.mdDoc ''
Extra configuration options for BaGet. Refer to <https://loic-sharma.github.io/BaGet/configuration/> for details.
Default value is merged with values from here.
'';
};
};
# implementation
config = mkIf cfg.enable {
systemd.services.baget = {
description = "BaGet server";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" ];
after = [ "network.target" "network-online.target" ];
path = [ pkgs.jq ];
serviceConfig = {
WorkingDirectory = "/var/lib/baget";
DynamicUser = true;
StateDirectory = "baget";
StateDirectoryMode = "0700";
LoadCredential = "api_key:${cfg.apiKeyFile}";
CapabilityBoundingSet = "";
NoNewPrivileges = true;
PrivateDevices = true;
PrivateTmp = true;
PrivateUsers = true;
PrivateMounts = true;
ProtectHome = true;
ProtectClock = true;
ProtectProc = "noaccess";
ProcSubset = "pid";
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
ProtectHostname = true;
RestrictSUIDSGID = true;
RestrictRealtime = true;
RestrictNamespaces = true;
LockPersonality = true;
RemoveIPC = true;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
SystemCallFilter = [ "@system-service" "~@privileged" ];
};
script = ''
jq --slurpfile apiKeys <(jq -R . "$CREDENTIALS_DIRECTORY/api_key") '.ApiKey = $apiKeys[0]' ${configFile} > appsettings.json
ln -snf ${pkgs.baget}/lib/BaGet/wwwroot wwwroot
exec ${pkgs.baget}/bin/BaGet
'';
};
};
}

View file

@ -16,7 +16,7 @@ let
if (any (str: k == str) secretKeys) then v if (any (str: k == str) secretKeys) then v
else if isString v then "'${v}'" else if isString v then "'${v}'"
else if isBool v then boolToString v else if isBool v then boolToString v
else if isNull v then "null" else if v == null then "null"
else toString v else toString v
; ;
in in

View file

@ -411,11 +411,14 @@ in
componentPasswordFile = "/var/lib/jitsi-meet/jicofo-component-secret"; componentPasswordFile = "/var/lib/jitsi-meet/jicofo-component-secret";
bridgeMuc = "jvbbrewery@internal.${cfg.hostName}"; bridgeMuc = "jvbbrewery@internal.${cfg.hostName}";
config = mkMerge [{ config = mkMerge [{
"org.jitsi.jicofo.ALWAYS_TRUST_MODE_ENABLED" = "true"; jicofo.xmpp.service.disable-certificate-verification = true;
jicofo.xmpp.client.disable-certificate-verification = true;
#} (lib.mkIf cfg.jibri.enable { #} (lib.mkIf cfg.jibri.enable {
} (lib.mkIf (config.services.jibri.enable || cfg.jibri.enable) { } (lib.mkIf (config.services.jibri.enable || cfg.jibri.enable) {
"org.jitsi.jicofo.jibri.BREWERY" = "JibriBrewery@internal.${cfg.hostName}"; jicofo.jibri = {
"org.jitsi.jicofo.jibri.PENDING_TIMEOUT" = "90"; brewery-jid = "JibriBrewery@internal.${cfg.hostName}";
pending-timeout = "90";
};
})]; })];
}; };

View file

@ -34,6 +34,24 @@ in
options.services.limesurvey = { options.services.limesurvey = {
enable = mkEnableOption (lib.mdDoc "Limesurvey web application"); enable = mkEnableOption (lib.mdDoc "Limesurvey web application");
encryptionKey = mkOption {
type = types.str;
default = "E17687FC77CEE247F0E22BB3ECF27FDE8BEC310A892347EC13013ABA11AA7EB5";
description = lib.mdDoc ''
This is a 32-byte key used to encrypt variables in the database.
You _must_ change this from the default value.
'';
};
encryptionNonce = mkOption {
type = types.str;
default = "1ACC8555619929DB91310BE848025A427B0F364A884FFA77";
description = lib.mdDoc ''
This is a 24-byte nonce used to encrypt variables in the database.
You _must_ change this from the default value.
'';
};
database = { database = {
type = mkOption { type = mkOption {
type = types.enum [ "mysql" "pgsql" "odbc" "mssql" ]; type = types.enum [ "mysql" "pgsql" "odbc" "mssql" ];
@ -42,6 +60,12 @@ in
description = lib.mdDoc "Database engine to use."; description = lib.mdDoc "Database engine to use.";
}; };
dbEngine = mkOption {
type = types.enum [ "MyISAM" "InnoDB" ];
default = "InnoDB";
description = lib.mdDoc "Database storage engine to use.";
};
host = mkOption { host = mkOption {
type = types.str; type = types.str;
default = "localhost"; default = "localhost";
@ -180,6 +204,8 @@ in
config = { config = {
tempdir = "${stateDir}/tmp"; tempdir = "${stateDir}/tmp";
uploaddir = "${stateDir}/upload"; uploaddir = "${stateDir}/upload";
encryptionnonce = cfg.encryptionNonce;
encryptionsecretboxkey = cfg.encryptionKey;
force_ssl = mkIf (cfg.virtualHost.addSSL || cfg.virtualHost.forceSSL || cfg.virtualHost.onlySSL) "on"; force_ssl = mkIf (cfg.virtualHost.addSSL || cfg.virtualHost.forceSSL || cfg.virtualHost.onlySSL) "on";
config.defaultlang = "en"; config.defaultlang = "en";
}; };
@ -200,6 +226,8 @@ in
services.phpfpm.pools.limesurvey = { services.phpfpm.pools.limesurvey = {
inherit user group; inherit user group;
phpPackage = pkgs.php80;
phpEnv.DBENGINE = "${cfg.database.dbEngine}";
phpEnv.LIMESURVEY_CONFIG = "${limesurveyConfig}"; phpEnv.LIMESURVEY_CONFIG = "${limesurveyConfig}";
settings = { settings = {
"listen.owner" = config.services.httpd.user; "listen.owner" = config.services.httpd.user;
@ -256,11 +284,12 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
before = [ "phpfpm-limesurvey.service" ]; before = [ "phpfpm-limesurvey.service" ];
after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service"; after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
environment.DBENGINE = "${cfg.database.dbEngine}";
environment.LIMESURVEY_CONFIG = limesurveyConfig; environment.LIMESURVEY_CONFIG = limesurveyConfig;
script = '' script = ''
# update or install the database as required # update or install the database as required
${pkgs.php}/bin/php ${pkg}/share/limesurvey/application/commands/console.php updatedb || \ ${pkgs.php80}/bin/php ${pkg}/share/limesurvey/application/commands/console.php updatedb || \
${pkgs.php}/bin/php ${pkg}/share/limesurvey/application/commands/console.php install admin password admin admin@example.com verbose ${pkgs.php80}/bin/php ${pkg}/share/limesurvey/application/commands/console.php install admin password admin admin@example.com verbose
''; '';
serviceConfig = { serviceConfig = {
User = user; User = user;

View file

@ -5,7 +5,7 @@ self-hostable cloud platform. The server setup can be automated using
[services.nextcloud](#opt-services.nextcloud.enable). A [services.nextcloud](#opt-services.nextcloud.enable). A
desktop client is packaged at `pkgs.nextcloud-client`. desktop client is packaged at `pkgs.nextcloud-client`.
The current default by NixOS is `nextcloud25` which is also the latest The current default by NixOS is `nextcloud26` which is also the latest
major version available. major version available.
## Basic usage {#module-services-nextcloud-basic-usage} ## Basic usage {#module-services-nextcloud-basic-usage}

View file

@ -204,7 +204,7 @@ in {
package = mkOption { package = mkOption {
type = types.package; type = types.package;
description = lib.mdDoc "Which package to use for the Nextcloud instance."; description = lib.mdDoc "Which package to use for the Nextcloud instance.";
relatedPackages = [ "nextcloud24" "nextcloud25" ]; relatedPackages = [ "nextcloud24" "nextcloud25" "nextcloud26" ];
}; };
phpPackage = mkOption { phpPackage = mkOption {
type = types.package; type = types.package;
@ -514,6 +514,27 @@ in {
`http://hostname.domain/bucket` instead. `http://hostname.domain/bucket` instead.
''; '';
}; };
sseCKeyFile = mkOption {
type = types.nullOr types.path;
default = null;
example = "/var/nextcloud-objectstore-s3-sse-c-key";
description = lib.mdDoc ''
If provided this is the full path to a file that contains the key
to enable [server-side encryption with customer-provided keys][1]
(SSE-C).
The file must contain a random 32-byte key encoded as a base64
string, e.g. generated with the command
```
openssl rand 32 | base64
```
Must be readable by user `nextcloud`.
[1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
'';
};
}; };
}; };
}; };
@ -652,7 +673,7 @@ in {
config = mkIf cfg.enable (mkMerge [ config = mkIf cfg.enable (mkMerge [
{ warnings = let { warnings = let
latest = 25; latest = 26;
upgradeWarning = major: nixos: upgradeWarning = major: nixos:
'' ''
A legacy Nextcloud install (from before NixOS ${nixos}) may be installed. A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
@ -667,20 +688,6 @@ in {
`services.nextcloud.package`. `services.nextcloud.package`.
''; '';
# FIXME(@Ma27) remove as soon as nextcloud properly supports
# mariadb >=10.6.
isUnsupportedMariadb =
# All currently supported Nextcloud versions are affected (https://github.com/nextcloud/server/issues/25436).
(versionOlder cfg.package.version "24")
# This module uses mysql
&& (cfg.config.dbtype == "mysql")
# MySQL is managed via NixOS
&& config.services.mysql.enable
# We're using MariaDB
&& (getName config.services.mysql.package) == "mariadb-server"
# MariaDB is at least 10.6 and thus not supported
&& (versionAtLeast (getVersion config.services.mysql.package) "10.6");
in (optional (cfg.poolConfig != null) '' in (optional (cfg.poolConfig != null) ''
Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release. Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
Please migrate your configuration to config.services.nextcloud.poolSettings. Please migrate your configuration to config.services.nextcloud.poolSettings.
@ -688,6 +695,7 @@ in {
++ (optional (versionOlder cfg.package.version "23") (upgradeWarning 22 "22.05")) ++ (optional (versionOlder cfg.package.version "23") (upgradeWarning 22 "22.05"))
++ (optional (versionOlder cfg.package.version "24") (upgradeWarning 23 "22.05")) ++ (optional (versionOlder cfg.package.version "24") (upgradeWarning 23 "22.05"))
++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11")) ++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
++ (optional cfg.enableBrokenCiphersForSSE '' ++ (optional cfg.enableBrokenCiphersForSSE ''
You're using PHP's openssl extension built against OpenSSL 1.1 for Nextcloud. You're using PHP's openssl extension built against OpenSSL 1.1 for Nextcloud.
This is only necessary if you're using Nextcloud's server-side encryption. This is only necessary if you're using Nextcloud's server-side encryption.
@ -704,18 +712,7 @@ in {
See <https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html#disabling-encryption> on how to achieve this. See <https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html#disabling-encryption> on how to achieve this.
For more context, here is the implementing pull request: https://github.com/NixOS/nixpkgs/pull/198470 For more context, here is the implementing pull request: https://github.com/NixOS/nixpkgs/pull/198470
'') '');
++ (optional isUnsupportedMariadb ''
You seem to be using MariaDB at an unsupported version (i.e. at least 10.6)!
Please note that this isn't supported officially by Nextcloud. You can either
* Switch to `pkgs.mysql`
* Downgrade MariaDB to at least 10.5
* Work around Nextcloud's problems by specifying `innodb_read_only_compressed=0`
For further context, please read
https://help.nextcloud.com/t/update-to-next-cloud-21-0-2-has-get-an-error/117028/15
'');
services.nextcloud.package = with pkgs; services.nextcloud.package = with pkgs;
mkDefault ( mkDefault (
@ -726,12 +723,13 @@ in {
`pkgs.nextcloud`. `pkgs.nextcloud`.
'' ''
else if versionOlder stateVersion "22.11" then nextcloud24 else if versionOlder stateVersion "22.11" then nextcloud24
else nextcloud25 else if versionOlder stateVersion "23.05" then nextcloud25
else nextcloud26
); );
services.nextcloud.phpPackage = services.nextcloud.phpPackage =
if versionOlder cfg.package.version "24" then pkgs.php80 if versionOlder cfg.package.version "26" then pkgs.php81
else pkgs.php81; else pkgs.php82;
} }
{ assertions = [ { assertions = [
@ -773,6 +771,7 @@ in {
'use_ssl' => ${boolToString s3.useSsl}, 'use_ssl' => ${boolToString s3.useSsl},
${optionalString (s3.region != null) "'region' => '${s3.region}',"} ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
'use_path_style' => ${boolToString s3.usePathStyle}, 'use_path_style' => ${boolToString s3.usePathStyle},
${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
], ],
] ]
''; '';
@ -958,6 +957,9 @@ in {
''; '';
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
serviceConfig.User = "nextcloud"; serviceConfig.User = "nextcloud";
# On Nextcloud ≥ 26, it is not necessary to patch the database files to prevent
# an automatic creation of the database user.
environment.NC_setup_create_db_user = lib.mkIf (nextcloudGreaterOrEqualThan "26") "false";
}; };
nextcloud-cron = { nextcloud-cron = {
after = [ "nextcloud-setup.service" ]; after = [ "nextcloud-setup.service" ];
@ -1009,14 +1011,6 @@ in {
name = cfg.config.dbuser; name = cfg.config.dbuser;
ensurePermissions = { "${cfg.config.dbname}.*" = "ALL PRIVILEGES"; }; ensurePermissions = { "${cfg.config.dbname}.*" = "ALL PRIVILEGES"; };
}]; }];
# FIXME(@Ma27) Nextcloud isn't compatible with mariadb 10.6,
# this is a workaround.
# See https://help.nextcloud.com/t/update-to-next-cloud-21-0-2-has-get-an-error/117028/22
settings = mkIf (versionOlder cfg.package.version "24") {
mysqld = {
innodb_read_only_compressed = 0;
};
};
initialScript = pkgs.writeText "mysql-init" '' initialScript = pkgs.writeText "mysql-init" ''
CREATE USER '${cfg.config.dbname}'@'localhost' IDENTIFIED BY '${builtins.readFile( cfg.config.dbpassFile )}'; CREATE USER '${cfg.config.dbname}'@'localhost' IDENTIFIED BY '${builtins.readFile( cfg.config.dbpassFile )}';
CREATE DATABASE IF NOT EXISTS ${cfg.config.dbname}; CREATE DATABASE IF NOT EXISTS ${cfg.config.dbname};

View file

@ -10,12 +10,11 @@ let
format = pkgs.formats.ini { format = pkgs.formats.ini {
mkKeyValue = key: value: mkKeyValue = key: value:
let let
value' = if builtins.isNull value then value' = lib.optionalString (value != null)
"" (if builtins.isBool value then
else if builtins.isBool value then if value == true then "true" else "false"
if value == true then "true" else "false" else
else toString value);
toString value;
in "${key} = ${value'}"; in "${key} = ${value'}";
}; };

View file

@ -60,7 +60,7 @@ in
''; '';
}; };
rootCredentialsFile = mkOption { rootCredentialsFile = mkOption {
type = types.nullOr types.path; type = types.nullOr types.path;
default = null; default = null;
description = lib.mdDoc '' description = lib.mdDoc ''
@ -96,29 +96,62 @@ in
config = mkIf cfg.enable { config = mkIf cfg.enable {
warnings = optional ((cfg.accessKey != "") || (cfg.secretKey != "")) "services.minio.`accessKey` and services.minio.`secretKey` are deprecated, please use services.minio.`rootCredentialsFile` instead."; warnings = optional ((cfg.accessKey != "") || (cfg.secretKey != "")) "services.minio.`accessKey` and services.minio.`secretKey` are deprecated, please use services.minio.`rootCredentialsFile` instead.";
systemd.tmpfiles.rules = [ systemd = lib.mkMerge [{
"d '${cfg.configDir}' - minio minio - -" tmpfiles.rules = [
] ++ (map (x: "d '" + x + "' - minio minio - - ") cfg.dataDir); "d '${cfg.configDir}' - minio minio - -"
] ++ (map (x: "d '" + x + "' - minio minio - - ") cfg.dataDir);
systemd.services.minio = { services.minio = {
description = "Minio Object Storage"; description = "Minio Object Storage";
after = [ "network-online.target" ]; after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --console-address ${cfg.consoleAddress} --config-dir=${cfg.configDir} ${toString cfg.dataDir}"; ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --console-address ${cfg.consoleAddress} --config-dir=${cfg.configDir} ${toString cfg.dataDir}";
Type = "simple"; Type = "simple";
User = "minio"; User = "minio";
Group = "minio"; Group = "minio";
LimitNOFILE = 65536; LimitNOFILE = 65536;
EnvironmentFile = if (cfg.rootCredentialsFile != null) then cfg.rootCredentialsFile EnvironmentFile =
else if ((cfg.accessKey != "") || (cfg.secretKey != "")) then (legacyCredentials cfg) if (cfg.rootCredentialsFile != null) then cfg.rootCredentialsFile
else null; else if ((cfg.accessKey != "") || (cfg.secretKey != "")) then (legacyCredentials cfg)
else null;
};
environment = {
MINIO_REGION = "${cfg.region}";
MINIO_BROWSER = "${if cfg.browser then "on" else "off"}";
};
}; };
environment = { }
MINIO_REGION = "${cfg.region}";
MINIO_BROWSER = "${if cfg.browser then "on" else "off"}"; (lib.mkIf (cfg.rootCredentialsFile != null) {
}; # The service will fail if the credentials file is missing
}; services.minio.unitConfig.ConditionPathExists = cfg.rootCredentialsFile;
# The service will not restart if the credentials file has
# been changed. This can cause stale root credentials.
paths.minio-root-credentials = {
wantedBy = [ "multi-user.target" ];
pathConfig = {
PathChanged = [ cfg.rootCredentialsFile ];
Unit = "minio-restart.service";
};
};
services.minio-restart = {
description = "Restart MinIO";
script = ''
systemctl restart minio.service
'';
serviceConfig = {
Type = "oneshot";
Restart = "on-failure";
RestartSec = 5;
};
};
})];
users.users.minio = { users.users.minio = {
group = "minio"; group = "minio";

View file

@ -184,8 +184,8 @@ let
brotli_types ${lib.concatStringsSep " " compressMimeTypes}; brotli_types ${lib.concatStringsSep " " compressMimeTypes};
''} ''}
# https://docs.nginx.com/nginx/admin-guide/web-server/compression/
${optionalString cfg.recommendedGzipSettings '' ${optionalString cfg.recommendedGzipSettings ''
# https://docs.nginx.com/nginx/admin-guide/web-server/compression/
gzip on; gzip on;
gzip_static on; gzip_static on;
gzip_vary on; gzip_vary on;
@ -195,6 +195,14 @@ let
gzip_types ${lib.concatStringsSep " " compressMimeTypes}; gzip_types ${lib.concatStringsSep " " compressMimeTypes};
''} ''}
${optionalString cfg.recommendedZstdSettings ''
zstd on;
zstd_comp_level 9;
zstd_min_length 256;
zstd_static on;
zstd_types ${lib.concatStringsSep " " compressMimeTypes};
''}
${optionalString cfg.recommendedProxySettings '' ${optionalString cfg.recommendedProxySettings ''
proxy_redirect off; proxy_redirect off;
proxy_connect_timeout ${cfg.proxyTimeout}; proxy_connect_timeout ${cfg.proxyTimeout};
@ -490,6 +498,16 @@ in
''; '';
}; };
recommendedZstdSettings = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
Enable recommended zstd settings. Learn more about compression in Zstd format [here](https://github.com/tokers/zstd-nginx-module).
This adds `pkgs.nginxModules.zstd` to `services.nginx.additionalModules`.
'';
};
proxyTimeout = mkOption { proxyTimeout = mkOption {
type = types.str; type = types.str;
default = "60s"; default = "60s";
@ -1015,7 +1033,8 @@ in
groups = config.users.groups; groups = config.users.groups;
}) dependentCertNames; }) dependentCertNames;
services.nginx.additionalModules = optional cfg.recommendedBrotliSettings pkgs.nginxModules.brotli; services.nginx.additionalModules = optional cfg.recommendedBrotliSettings pkgs.nginxModules.brotli
++ lib.optional cfg.recommendedZstdSettings pkgs.nginxModules.zstd;
systemd.services.nginx = { systemd.services.nginx = {
description = "Nginx Web Server"; description = "Nginx Web Server";

View file

@ -81,99 +81,90 @@ let
in in
{ {
options.services.xserver.desktopManager.plasma5 = { options = {
enable = mkOption { services.xserver.desktopManager.plasma5 = {
type = types.bool; enable = mkOption {
default = false; type = types.bool;
description = lib.mdDoc "Enable the Plasma 5 (KDE 5) desktop environment."; default = false;
}; description = lib.mdDoc "Enable the Plasma 5 (KDE 5) desktop environment.";
};
phononBackend = mkOption { phononBackend = mkOption {
type = types.enum [ "gstreamer" "vlc" ]; type = types.enum [ "gstreamer" "vlc" ];
default = "vlc"; default = "vlc";
example = "gstreamer"; example = "gstreamer";
description = lib.mdDoc "Phonon audio backend to install."; description = lib.mdDoc "Phonon audio backend to install.";
}; };
supportDDC = mkOption { useQtScaling = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = lib.mdDoc '' description = lib.mdDoc "Enable HiDPI scaling in Qt.";
Support setting monitor brightness via DDC. };
This is not needed for controlling brightness of the internal monitor runUsingSystemd = mkOption {
of a laptop and as it is considered experimental by upstream, it is description = lib.mdDoc "Use systemd to manage the Plasma session";
disabled by default. type = types.bool;
''; default = true;
}; };
useQtScaling = mkOption { notoPackage = mkPackageOptionMD pkgs "Noto fonts" {
type = types.bool; default = [ "noto-fonts" ];
default = false; example = "noto-fonts-lgc-plus";
description = lib.mdDoc "Enable HiDPI scaling in Qt."; };
};
runUsingSystemd = mkOption { # Internally allows configuring kdeglobals globally
description = lib.mdDoc "Use systemd to manage the Plasma session"; kdeglobals = mkOption {
type = types.bool; internal = true;
default = true; default = {};
}; type = kdeConfigurationType;
};
excludePackages = mkOption { # Internally allows configuring kwin globally
description = lib.mdDoc "List of default packages to exclude from the configuration"; kwinrc = mkOption {
type = types.listOf types.package; internal = true;
default = []; default = {};
example = literalExpression "[ pkgs.plasma5Packages.oxygen ]"; type = kdeConfigurationType;
}; };
notoPackage = mkPackageOptionMD pkgs "Noto fonts" { mobile.enable = mkOption {
default = [ "noto-fonts" ]; type = types.bool;
example = "noto-fonts-lgc-plus"; default = false;
}; description = lib.mdDoc ''
Enable support for running the Plasma Mobile shell.
'';
};
# Internally allows configuring kdeglobals globally mobile.installRecommendedSoftware = mkOption {
kdeglobals = mkOption { type = types.bool;
internal = true; default = true;
default = {}; description = lib.mdDoc ''
type = kdeConfigurationType; Installs software recommended for use with Plasma Mobile, but which
}; is not strictly required for Plasma Mobile to run.
'';
};
# Internally allows configuring kwin globally bigscreen.enable = mkOption {
kwinrc = mkOption { type = types.bool;
internal = true; default = false;
default = {}; description = lib.mdDoc ''
type = kdeConfigurationType; Enable support for running the Plasma Bigscreen session.
}; '';
};
mobile.enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Enable support for running the Plasma Mobile shell.
'';
};
mobile.installRecommendedSoftware = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Installs software recommended for use with Plasma Mobile, but which
is not strictly required for Plasma Mobile to run.
'';
};
bigscreen.enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Enable support for running the Plasma Bigscreen session.
'';
}; };
environment.plasma5.excludePackages = mkOption {
description = lib.mdDoc "List of default packages to exclude from the configuration";
type = types.listOf types.package;
default = [];
example = literalExpression "[ pkgs.plasma5Packages.oxygen ]";
};
}; };
imports = [ imports = [
(mkRemovedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "enableQt4Support" ] "Phonon no longer supports Qt 4.") (mkRemovedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "enableQt4Support" ] "Phonon no longer supports Qt 4.")
(mkRemovedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "supportDDC" ] "DDC/CI is no longer supported upstream.")
(mkRenamedOptionModule [ "services" "xserver" "desktopManager" "kde5" ] [ "services" "xserver" "desktopManager" "plasma5" ]) (mkRenamedOptionModule [ "services" "xserver" "desktopManager" "kde5" ] [ "services" "xserver" "desktopManager" "plasma5" ])
(mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "excludePackages" ] [ "environment" "plasma5" "excludePackages" ])
]; ];
config = mkMerge [ config = mkMerge [
@ -201,12 +192,6 @@ in
}; };
}; };
# DDC support
boot.kernelModules = lib.optional cfg.supportDDC "i2c_dev";
services.udev.extraRules = lib.optionalString cfg.supportDDC ''
KERNEL=="i2c-[0-9]*", TAG+="uaccess"
'';
environment.systemPackages = environment.systemPackages =
with libsForQt5; with libsForQt5;
with plasma5; with kdeGear; with kdeFrameworks; with plasma5; with kdeGear; with kdeFrameworks;
@ -301,7 +286,7 @@ in
]; ];
in in
requiredPackages requiredPackages
++ utils.removePackagesByName optionalPackages cfg.excludePackages ++ utils.removePackagesByName optionalPackages config.environment.plasma5.excludePackages
# Phonon audio backend # Phonon audio backend
++ lib.optional (cfg.phononBackend == "gstreamer") libsForQt5.phonon-backend-gstreamer ++ lib.optional (cfg.phononBackend == "gstreamer") libsForQt5.phonon-backend-gstreamer
@ -455,7 +440,7 @@ in
khelpcenter khelpcenter
print-manager print-manager
]; ];
in requiredPackages ++ utils.removePackagesByName optionalPackages cfg.excludePackages; in requiredPackages ++ utils.removePackagesByName optionalPackages config.environment.plasma5.excludePackages;
systemd.user.services = { systemd.user.services = {
plasma-run-with-systemd = { plasma-run-with-systemd = {

View file

@ -138,6 +138,26 @@ let
concatMapStringsSep "\n" (line: prefix + line) (splitString "\n" str); concatMapStringsSep "\n" (line: prefix + line) (splitString "\n" str);
indent = prefixStringLines " "; indent = prefixStringLines " ";
# A scalable variant of the X11 "core" cursor
#
# If not running a fancy desktop environment, the cursor is likely set to
# the default `cursor.pcf` bitmap font. This is 17px wide, so it's very
# small and almost invisible on 4K displays.
fontcursormisc_hidpi = pkgs.xorg.fontxfree86type1.overrideAttrs (old:
let
# The scaling constant is 230/96: the scalable `left_ptr` glyph at
# about 23 points is rendered as 17px, on a 96dpi display.
# Note: the XLFD font size is in decipoints.
size = 2.39583 * cfg.dpi;
sizeString = builtins.head (builtins.split "\\." (toString size));
in
{
postInstall = ''
alias='cursor -xfree86-cursor-medium-r-normal--0-${sizeString}-0-0-p-0-adobe-fontspecific'
echo "$alias" > $out/lib/X11/fonts/Type1/fonts.alias
'';
});
in in
{ {
@ -576,6 +596,15 @@ in
Whether to terminate X upon server reset. Whether to terminate X upon server reset.
''; '';
}; };
upscaleDefaultCursor = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Upscale the default X cursor to be more visible on high-density displays.
Requires `config.services.xserver.dpi` to be set.
'';
};
}; };
}; };
@ -627,6 +656,10 @@ in
+ "${toString (length primaryHeads)} heads set to primary: " + "${toString (length primaryHeads)} heads set to primary: "
+ concatMapStringsSep ", " (x: x.output) primaryHeads; + concatMapStringsSep ", " (x: x.output) primaryHeads;
}) })
{
assertion = cfg.upscaleDefaultCursor -> cfg.dpi != null;
message = "Specify `config.services.xserver.dpi` to upscale the default cursor.";
}
]; ];
environment.etc = environment.etc =
@ -851,6 +884,10 @@ in
''; '';
fonts.enableDefaultFonts = mkDefault true; fonts.enableDefaultFonts = mkDefault true;
fonts.fonts = [
(if cfg.upscaleDefaultCursor then fontcursormisc_hidpi else pkgs.xorg.fontcursormisc)
pkgs.xorg.fontmiscmisc
];
}; };

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,11 @@
# This jobset is used to generate a NixOS channel that contains a # This jobset is used to generate a NixOS channel that contains a
# small subset of Nixpkgs, mostly useful for servers that need fast # small subset of Nixpkgs, mostly useful for servers that need fast
# security updates. # security updates.
#
# Individual jobs can be tested by running:
#
# nix-build nixos/release-small.nix -A <jobname>
#
{ nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; } { nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; }
, stableBranch ? false , stableBranch ? false
, supportedSystems ? [ "aarch64-linux" "x86_64-linux" ] # no i686-linux , supportedSystems ? [ "aarch64-linux" "x86_64-linux" ] # no i686-linux

View file

@ -346,6 +346,7 @@ in {
keter = handleTest ./keter.nix {}; keter = handleTest ./keter.nix {};
kexec = handleTest ./kexec.nix {}; kexec = handleTest ./kexec.nix {};
keycloak = discoverTests (import ./keycloak.nix); keycloak = discoverTests (import ./keycloak.nix);
keyd = handleTest ./keyd.nix {};
keymap = handleTest ./keymap.nix {}; keymap = handleTest ./keymap.nix {};
knot = handleTest ./knot.nix {}; knot = handleTest ./knot.nix {};
komga = handleTest ./komga.nix {}; komga = handleTest ./komga.nix {};
@ -487,6 +488,7 @@ in {
nomad = handleTest ./nomad.nix {}; nomad = handleTest ./nomad.nix {};
non-default-filesystems = handleTest ./non-default-filesystems.nix {}; non-default-filesystems = handleTest ./non-default-filesystems.nix {};
noto-fonts = handleTest ./noto-fonts.nix {}; noto-fonts = handleTest ./noto-fonts.nix {};
noto-fonts-cjk-qt-default-weight = handleTest ./noto-fonts-cjk-qt-default-weight.nix {};
novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {}; novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
nscd = handleTest ./nscd.nix {}; nscd = handleTest ./nscd.nix {};
nsd = handleTest ./nsd.nix {}; nsd = handleTest ./nsd.nix {};
@ -528,6 +530,7 @@ in {
peerflix = handleTest ./peerflix.nix {}; peerflix = handleTest ./peerflix.nix {};
peering-manager = handleTest ./web-apps/peering-manager.nix {}; peering-manager = handleTest ./web-apps/peering-manager.nix {};
peertube = handleTestOn ["x86_64-linux"] ./web-apps/peertube.nix {}; peertube = handleTestOn ["x86_64-linux"] ./web-apps/peertube.nix {};
peroxide = handleTest ./peroxide.nix {};
pgadmin4 = handleTest ./pgadmin4.nix {}; pgadmin4 = handleTest ./pgadmin4.nix {};
pgjwt = handleTest ./pgjwt.nix {}; pgjwt = handleTest ./pgjwt.nix {};
pgmanage = handleTest ./pgmanage.nix {}; pgmanage = handleTest ./pgmanage.nix {};
@ -610,6 +613,7 @@ in {
searx = handleTest ./searx.nix {}; searx = handleTest ./searx.nix {};
service-runner = handleTest ./service-runner.nix {}; service-runner = handleTest ./service-runner.nix {};
sfxr-qt = handleTest ./sfxr-qt.nix {}; sfxr-qt = handleTest ./sfxr-qt.nix {};
sgtpuzzles = handleTest ./sgtpuzzles.nix {};
shadow = handleTest ./shadow.nix {}; shadow = handleTest ./shadow.nix {};
shadowsocks = handleTest ./shadowsocks {}; shadowsocks = handleTest ./shadowsocks {};
shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix {}; shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix {};
@ -623,7 +627,6 @@ in {
soapui = handleTest ./soapui.nix {}; soapui = handleTest ./soapui.nix {};
sogo = handleTest ./sogo.nix {}; sogo = handleTest ./sogo.nix {};
solanum = handleTest ./solanum.nix {}; solanum = handleTest ./solanum.nix {};
solr = handleTest ./solr.nix {};
sonarr = handleTest ./sonarr.nix {}; sonarr = handleTest ./sonarr.nix {};
sourcehut = handleTest ./sourcehut.nix {}; sourcehut = handleTest ./sourcehut.nix {};
spacecookie = handleTest ./spacecookie.nix {}; spacecookie = handleTest ./spacecookie.nix {};

View file

@ -93,7 +93,7 @@ let
api_token = server.succeed( api_token = server.succeed(
"curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens " "curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens "
+ "-H 'Accept: application/json' -H 'Content-Type: application/json' -d " + "-H 'Accept: application/json' -H 'Content-Type: application/json' -d "
+ "'{\"name\":\"token\"}' | jq '.sha1' | xargs echo -n" + "'{\"name\":\"token\",\"scopes\":[\"all\"]}' | jq '.sha1' | xargs echo -n"
) )
server.succeed( server.succeed(

View file

@ -9,6 +9,6 @@ import ./make-test-python.nix ({ pkgs, ... }: {
testScript = { nodes, ... }: '' testScript = { nodes, ... }: ''
webserver.wait_for_unit("gollum") webserver.wait_for_unit("gollum")
webserver.wait_for_open_port(${toString nodes.webserver.config.services.gollum.port}) webserver.wait_for_open_port(${toString nodes.webserver.services.gollum.port})
''; '';
}) })

View file

@ -0,0 +1,82 @@
# The test template is taken from the `./keymap.nix`
{ system ? builtins.currentSystem
, config ? { }
, pkgs ? import ../.. { inherit system config; }
}:
with import ../lib/testing-python.nix { inherit system pkgs; };
let
readyFile = "/tmp/readerReady";
resultFile = "/tmp/readerResult";
testReader = pkgs.writeScript "test-input-reader" ''
rm -f ${resultFile} ${resultFile}.tmp
logger "testReader: START: Waiting for $1 characters, expecting '$2'."
touch ${readyFile}
read -r -N $1 chars
rm -f ${readyFile}
if [ "$chars" == "$2" ]; then
logger -s "testReader: PASS: Got '$2' as expected." 2>${resultFile}.tmp
else
logger -s "testReader: FAIL: Expected '$2' but got '$chars'." 2>${resultFile}.tmp
fi
# rename after the file is written to prevent a race condition
mv ${resultFile}.tmp ${resultFile}
'';
mkKeyboardTest = name: { settings, test }: with pkgs.lib; makeTest {
inherit name;
nodes.machine = {
services.keyd = {
enable = true;
inherit settings;
};
};
testScript = ''
import shlex
machine.wait_for_unit("keyd.service")
def run_test_case(cmd, test_case_name, inputs, expected):
with subtest(test_case_name):
assert len(inputs) == len(expected)
machine.execute("rm -f ${readyFile} ${resultFile}")
# set up process that expects all the keys to be entered
machine.succeed(
"{} {} {} {} >&2 &".format(
cmd,
"${testReader}",
len(inputs),
shlex.quote("".join(expected)),
)
)
# wait for reader to be ready
machine.wait_for_file("${readyFile}")
# send all keys
for key in inputs:
machine.send_key(key)
# wait for result and check
machine.wait_for_file("${resultFile}")
machine.succeed("grep -q 'PASS:' ${resultFile}")
test = ${builtins.toJSON test}
run_test_case("openvt -sw --", "${name}", test["press"], test["expect"])
'';
};
in
pkgs.lib.mapAttrs mkKeyboardTest {
swap-ab_and_ctrl-as-shift = {
test.press = [ "a" "ctrl-b" "c" ];
test.expect = [ "b" "A" "c" ];
settings.main = {
"a" = "b";
"b" = "a";
"control" = "oneshot(shift)";
};
};
}

View file

@ -13,6 +13,8 @@ import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
}; };
testScript = '' testScript = ''
machine.start(allow_reboot = True)
machine.wait_for_unit("multi-user.target") machine.wait_for_unit("multi-user.target")
machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'") machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
machine.screenshot("postboot") machine.screenshot("postboot")
@ -53,7 +55,14 @@ import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
machine.screenshot("getty") machine.screenshot("getty")
with subtest("Check whether ctrl-alt-delete works"): with subtest("Check whether ctrl-alt-delete works"):
machine.send_key("ctrl-alt-delete") boot_id1 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip()
machine.wait_for_shutdown() assert boot_id1 != ""
machine.reboot()
boot_id2 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip()
assert boot_id2 != ""
assert boot_id1 != boot_id2
''; '';
}) })

View file

@ -1,5 +1,5 @@
import ./make-test-python.nix ({ pkgs, ...} : import ./make-test-python.nix ({ pkgs, ... }:
let let
accessKey = "BKIKJAA5BMMU2RHO6IBB"; accessKey = "BKIKJAA5BMMU2RHO6IBB";
secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12"; secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
minioPythonScript = pkgs.writeScript "minio-test.py" '' minioPythonScript = pkgs.writeScript "minio-test.py" ''
@ -18,41 +18,55 @@ let
sio.seek(0) sio.seek(0)
minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain') minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain')
''; '';
in { rootCredentialsFile = "/etc/nixos/minio-root-credentials";
name = "minio"; credsPartial = pkgs.writeText "minio-credentials-partial" ''
meta = with pkgs.lib.maintainers; { MINIO_ROOT_USER=${accessKey}
maintainers = [ bachp ]; '';
}; credsFull = pkgs.writeText "minio-credentials-full" ''
MINIO_ROOT_USER=${accessKey}
nodes = { MINIO_ROOT_PASSWORD=${secretKey}
machine = { pkgs, ... }: { '';
services.minio = { in
enable = true; {
rootCredentialsFile = pkgs.writeText "minio-credentials" '' name = "minio";
MINIO_ROOT_USER=${accessKey} meta = with pkgs.lib.maintainers; {
MINIO_ROOT_PASSWORD=${secretKey} maintainers = [ bachp ];
'';
};
environment.systemPackages = [ pkgs.minio-client ];
# Minio requires at least 1GiB of free disk space to run.
virtualisation.diskSize = 4 * 1024;
}; };
};
testScript = '' nodes = {
start_all() machine = { pkgs, ... }: {
machine.wait_for_unit("minio.service") services.minio = {
machine.wait_for_open_port(9000) enable = true;
inherit rootCredentialsFile;
};
environment.systemPackages = [ pkgs.minio-client ];
# Create a test bucket on the server # Minio requires at least 1GiB of free disk space to run.
machine.succeed( virtualisation.diskSize = 4 * 1024;
"mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4" };
) };
machine.succeed("mc mb minio/test-bucket")
machine.succeed("${minioPythonScript}") testScript = ''
assert "test-bucket" in machine.succeed("mc ls minio") import time
assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt")
machine.shutdown() start_all()
''; # simulate manually editing root credentials file
}) machine.wait_for_unit("multi-user.target")
machine.copy_from_host("${credsPartial}", "${rootCredentialsFile}")
time.sleep(3)
machine.copy_from_host("${credsFull}", "${rootCredentialsFile}")
machine.wait_for_unit("minio.service")
machine.wait_for_open_port(9000)
# Create a test bucket on the server
machine.succeed(
"mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
)
machine.succeed("mc mb minio/test-bucket")
machine.succeed("${minioPythonScript}")
assert "test-bucket" in machine.succeed("mc ls minio")
assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt")
machine.shutdown()
'';
})

View file

@ -3,7 +3,7 @@
import ./make-test-python.nix ({ pkgs, ...} : { import ./make-test-python.nix ({ pkgs, ...} : {
name = "netdata"; name = "netdata";
meta = with pkgs.lib.maintainers; { meta = with pkgs.lib.maintainers; {
maintainers = [ cransom ]; maintainers = [ cransom raitobezarius ];
}; };
nodes = { nodes = {

View file

@ -26,4 +26,4 @@ foldl
}; };
}) })
{ } { }
[ 24 25 ] [ 24 25 26 ]

View file

@ -55,6 +55,7 @@ in {
nextcloudwithopenssl1.wait_for_unit("multi-user.target") nextcloudwithopenssl1.wait_for_unit("multi-user.target")
nextcloudwithopenssl1.succeed("nextcloud-occ status") nextcloudwithopenssl1.succeed("nextcloud-occ status")
nextcloudwithopenssl1.succeed("curl -sSf http://nextcloudwithopenssl1/login") nextcloudwithopenssl1.succeed("curl -sSf http://nextcloudwithopenssl1/login")
nextcloud_version = ${toString nextcloudVersion}
with subtest("With OpenSSL 1 SSE can be enabled and used"): with subtest("With OpenSSL 1 SSE can be enabled and used"):
nextcloudwithopenssl1.succeed("nextcloud-occ app:enable encryption") nextcloudwithopenssl1.succeed("nextcloud-occ app:enable encryption")
@ -71,7 +72,9 @@ in {
nextcloudwithopenssl1.succeed("nextcloud-occ status") nextcloudwithopenssl1.succeed("nextcloud-occ status")
with subtest("Existing encrypted files cannot be read, but new files can be added"): with subtest("Existing encrypted files cannot be read, but new files can be added"):
nextcloudwithopenssl1.fail("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file >&2") # This will succed starting NC26 because of their custom implementation of openssl_seal
read_existing_file_test = nextcloudwithopenssl1.fail if nextcloud_version < 26 else nextcloudwithopenssl1.succeed
read_existing_file_test("${withRcloneEnv3} ${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file >&2")
nextcloudwithopenssl1.succeed("nextcloud-occ encryption:disable") nextcloudwithopenssl1.succeed("nextcloud-occ encryption:disable")
nextcloudwithopenssl1.succeed("${copySharedFile3}") nextcloudwithopenssl1.succeed("${copySharedFile3}")
nextcloudwithopenssl1.succeed("grep bye /var/lib/nextcloud/data/root/files/test-shared-file2") nextcloudwithopenssl1.succeed("grep bye /var/lib/nextcloud/data/root/files/test-shared-file2")

View file

@ -0,0 +1,30 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "noto-fonts-cjk-qt";
meta.maintainers = with lib.maintainers; [ oxalica ];
nodes.machine = {
imports = [ ./common/x11.nix ];
fonts = {
enableDefaultFonts = false;
fonts = [ pkgs.noto-fonts-cjk-sans ];
};
};
testScript =
let
script = pkgs.writers.writePython3 "qt-default-weight" {
libraries = [ pkgs.python3Packages.pyqt6 ];
} ''
from PyQt6.QtWidgets import QApplication
from PyQt6.QtGui import QFont, QRawFont
app = QApplication([])
f = QRawFont.fromFont(QFont("Noto Sans CJK SC", 20))
assert f.styleName() == "Regular", f.styleName()
'';
in ''
machine.wait_for_x()
machine.succeed("${script}")
'';
})

View file

@ -0,0 +1,16 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "peroxide";
meta.maintainers = with lib.maintainers; [ aidalgol ];
nodes.machine =
{ config, pkgs, ... }: {
networking.hostName = "nixos";
services.peroxide.enable = true;
};
testScript = ''
machine.wait_for_unit("peroxide.service")
machine.wait_for_open_port(1143) # IMAP
machine.wait_for_open_port(1025) # SMTP
'';
})

View file

@ -20,6 +20,7 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
}; in { }; in {
pomerium = { pkgs, lib, ... }: { pomerium = { pkgs, lib, ... }: {
imports = [ (base "192.168.1.1") ]; imports = [ (base "192.168.1.1") ];
environment.systemPackages = with pkgs; [ chromium ];
services.pomerium = { services.pomerium = {
enable = true; enable = true;
settings = { settings = {
@ -98,5 +99,11 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
pomerium.succeed( pomerium.succeed(
"curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep 'hello I am login page'" "curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep 'hello I am login page'"
) )
with subtest("ui"):
pomerium.succeed(
# check for a string that only appears if the UI is displayed correctly
"chromium --no-sandbox --headless --disable-gpu --dump-dom --host-resolver-rules='MAP login.required 127.0.0.1:80' http://login.required/.pomerium | grep 'contact your administrator'"
)
''; '';
}) })

View file

@ -100,7 +100,7 @@ import ./make-test-python.nix (
"${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots", "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} snapshots",
'${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots"', '${pkgs.restic}/bin/restic -r ${remoteFromFileRepository} -p ${passwordFile} snapshots"',
"${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots", "${pkgs.restic}/bin/restic -r ${rcloneRepository} -p ${passwordFile} snapshots",
"grep 'backup .* /opt' /tmp/fake-restic.log", "grep 'backup.* /opt' /tmp/fake-restic.log",
) )
server.succeed( server.succeed(
# set up # set up
@ -129,8 +129,8 @@ import ./make-test-python.nix (
# test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
"systemctl start restic-backups-custompackage.service", "systemctl start restic-backups-custompackage.service",
"grep 'backup .* /opt' /tmp/fake-restic.log", "grep 'backup.* /opt' /tmp/fake-restic.log",
"grep 'check .* --some-check-option' /tmp/fake-restic.log", "grep 'check.* --some-check-option' /tmp/fake-restic.log",
# test that we can create four snapshots in remotebackup and rclonebackup # test that we can create four snapshots in remotebackup and rclonebackup
"timedatectl set-time '2017-12-13 13:45'", "timedatectl set-time '2017-12-13 13:45'",

View file

@ -0,0 +1,34 @@
import ./make-test-python.nix ({ pkgs, ...} :
{
name = "sgtpuzzles";
meta = with pkgs.lib.maintainers; {
maintainers = [ tomfitzhenry ];
};
nodes.machine = { ... }:
{
imports = [
./common/x11.nix
];
services.xserver.enable = true;
environment.systemPackages = with pkgs; [
sgtpuzzles
];
};
enableOCR = true;
testScript = { nodes, ... }:
''
start_all()
machine.wait_for_x()
machine.execute("mines >&2 &")
machine.wait_for_window("Mines")
machine.wait_for_text("Marked")
machine.screenshot("mines")
'';
})

View file

@ -1,56 +0,0 @@
import ./make-test-python.nix ({ pkgs, ... }:
{
name = "solr";
meta.maintainers = [ pkgs.lib.maintainers.aanderse ];
nodes.machine =
{ config, pkgs, ... }:
{
# Ensure the virtual machine has enough memory for Solr to avoid the following error:
#
# OpenJDK 64-Bit Server VM warning:
# INFO: os::commit_memory(0x00000000e8000000, 402653184, 0)
# failed; error='Cannot allocate memory' (errno=12)
#
# There is insufficient memory for the Java Runtime Environment to continue.
# Native memory allocation (mmap) failed to map 402653184 bytes for committing reserved memory.
virtualisation.memorySize = 2000;
services.solr.enable = true;
};
testScript = ''
start_all()
machine.wait_for_unit("solr.service")
machine.wait_for_open_port(8983)
machine.succeed("curl --fail http://localhost:8983/solr/")
# adapted from pkgs.solr/examples/films/README.txt
machine.succeed("sudo -u solr solr create -c films")
assert '"status":0' in machine.succeed(
"""
curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:application/json' --data-binary '{
"add-field" : {
"name":"name",
"type":"text_general",
"multiValued":false,
"stored":true
},
"add-field" : {
"name":"initial_release_date",
"type":"pdate",
"stored":true
}
}'
"""
)
machine.succeed(
"sudo -u solr post -c films ${pkgs.solr}/example/films/films.json"
)
assert '"name":"Batman Begins"' in machine.succeed(
"curl http://localhost:8983/solr/films/query?q=name:batman"
)
'';
})

View file

@ -13,5 +13,6 @@ import ./make-test-python.nix ({ pkgs, ... }:
start_all() start_all()
machine.wait_for_unit("multi-user.target") machine.wait_for_unit("multi-user.target")
machine.wait_for_unit("sssd.service") machine.wait_for_unit("sssd.service")
machine.succeed("sssctl config-check")
''; '';
}) })

View file

@ -26,7 +26,7 @@ import ./make-test-python.nix ({ pkgs, ...} : {
}; };
testScript = { nodes, ... }: let testScript = { nodes, ... }: let
user = nodes.machine.config.users.users.alice; user = nodes.machine.users.users.alice;
in '' in ''
machine.wait_for_x() machine.wait_for_x()
machine.wait_for_file("${user.home}/.Xauthority") machine.wait_for_file("${user.home}/.Xauthority")

View file

@ -1,18 +1,36 @@
import ./make-test-python.nix { import ./make-test-python.nix {
name = "zram-generator"; name = "zram-generator";
nodes.machine = { ... }: { nodes = {
zramSwap = { single = { ... }: {
enable = true; virtualisation = {
priority = 10; emptyDiskImages = [ 512 ];
algorithm = "lz4"; };
swapDevices = 2; zramSwap = {
memoryPercent = 30; enable = true;
memoryMax = 10 * 1024 * 1024; priority = 10;
algorithm = "lz4";
swapDevices = 1;
memoryPercent = 30;
memoryMax = 10 * 1024 * 1024;
writebackDevice = "/dev/vdb";
};
};
machine = { ... }: {
zramSwap = {
enable = true;
priority = 10;
algorithm = "lz4";
swapDevices = 2;
memoryPercent = 30;
memoryMax = 10 * 1024 * 1024;
};
}; };
}; };
testScript = '' testScript = ''
single.wait_for_unit("systemd-zram-setup@zram0.service")
machine.wait_for_unit("systemd-zram-setup@zram0.service") machine.wait_for_unit("systemd-zram-setup@zram0.service")
machine.wait_for_unit("systemd-zram-setup@zram1.service") machine.wait_for_unit("systemd-zram-setup@zram1.service")
zram = machine.succeed("zramctl --noheadings --raw") zram = machine.succeed("zramctl --noheadings --raw")

View file

@ -58,14 +58,14 @@
}: }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "ardour"; pname = "ardour";
version = "7.1"; version = "7.3";
# We can't use `fetchFromGitea` here, as attempting to fetch release archives from git.ardour.org # We can't use `fetchFromGitea` here, as attempting to fetch release archives from git.ardour.org
# result in an empty archive. See https://tracker.ardour.org/view.php?id=7328 for more info. # result in an empty archive. See https://tracker.ardour.org/view.php?id=7328 for more info.
src = fetchgit { src = fetchgit {
url = "git://git.ardour.org/ardour/ardour.git"; url = "git://git.ardour.org/ardour/ardour.git";
rev = version; rev = version;
hash = "sha256-eLF9n71tjdPA+ks0B8UonmPZqRgcZEA7ok79+m9PioU="; hash = "sha256-fDZGmKQ6qgENkq8NY/J67Jym+IXoOYs8DT4xyPXLcC4=";
}; };
bundledContent = fetchzip { bundledContent = fetchzip {

View file

@ -20,7 +20,7 @@
, cddbSupport ? true, libcddb ? null , cddbSupport ? true, libcddb ? null
, cdioSupport ? true, libcdio ? null, libcdio-paranoia ? null , cdioSupport ? true, libcdio ? null, libcdio-paranoia ? null
, cueSupport ? true, libcue ? null , cueSupport ? true, libcue ? null
, discidSupport ? (!stdenv.isDarwin), libdiscid ? null , discidSupport ? false, libdiscid ? null
, ffmpegSupport ? true, ffmpeg ? null , ffmpegSupport ? true, ffmpeg ? null
, flacSupport ? true, flac ? null , flacSupport ? true, flac ? null
, madSupport ? true, libmad ? null , madSupport ? true, libmad ? null

View file

@ -10,13 +10,13 @@
# gcc only supports objc on darwin # gcc only supports objc on darwin
buildGoModule.override { stdenv = clangStdenv; } rec { buildGoModule.override { stdenv = clangStdenv; } rec {
pname = "go-musicfox"; pname = "go-musicfox";
version = "3.7.3"; version = "3.7.5";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "anhoder"; owner = "anhoder";
repo = pname; repo = pname;
rev = "v${version}"; rev = "v${version}";
hash = "sha256-aM7IJGRRY2V2Rovj042ctg5254EUw1bTuoRCp9Za1FY="; hash = "sha256-+0s+MCFLw527gFj7pfiYGfKYihthFjRLPeto2SbALw0=";
}; };
deleteVendor = true; deleteVendor = true;
@ -28,7 +28,7 @@ buildGoModule.override { stdenv = clangStdenv; } rec {
ldflags = [ ldflags = [
"-s" "-s"
"-w" "-w"
"-X go-musicfox/pkg/constants.AppVersion=${version}" "-X github.com/go-musicfox/go-musicfox/pkg/constants.AppVersion=${version}"
]; ];
nativeBuildInputs = [ nativeBuildInputs = [

View file

@ -10,6 +10,7 @@
, qtquickcontrols2 , qtquickcontrols2
, qttools , qttools
, qtwebengine , qtwebengine
, stdenv
}: }:
mkDerivation rec { mkDerivation rec {
@ -61,6 +62,7 @@ mkDerivation rec {
meta = with lib; { meta = with lib; {
inherit (qtbase.meta) platforms; inherit (qtbase.meta) platforms;
broken = stdenv.isDarwin; # test build fails, but the project is not maintained anymore
description = "Cloud music integration for your desktop"; description = "Cloud music integration for your desktop";
homepage = "https://gitlab.com/ColinDuquesnoy/MellowPlayer"; homepage = "https://gitlab.com/ColinDuquesnoy/MellowPlayer";

View file

@ -0,0 +1,40 @@
{
lib,
fetchFromGitHub,
rustPlatform,
stdenv,
libusb1,
AppKit,
IOKit,
pkg-config,
}:
rustPlatform.buildRustPackage rec {
pname = "minidsp";
version = "0.1.9";
src = fetchFromGitHub {
owner = "mrene";
repo = "minidsp-rs";
# v0.1.9 tag is out of date, cargo lock fixed in next commit on main
rev = "b03a95a05917f20b9c3153c03e4e99dd943d9f6f";
hash = "sha256-uZBrX3VCCpr7AY82PgR596mncL5wWDK7bpx2m/jCJBE=";
};
cargoHash = "sha256-0PyojyimxnwEtHA98Npf4eHvycjuXdPrrIFilVuEnQk=";
cargoBuildFlags = ["-p minidsp -p minidsp-daemon"];
buildInputs =
lib.optionals stdenv.isLinux [libusb1]
++ lib.optionals stdenv.isDarwin [AppKit IOKit];
nativeBuildInputs = lib.optionals stdenv.isLinux [pkg-config];
meta = with lib; {
description = "A control interface for some MiniDSP products";
homepage = "https://github.com/mrene/minidsp-rs";
license = licenses.asl20;
platforms = platforms.linux ++ platforms.darwin;
maintainers = [maintainers.adamcstephens];
};
}

View file

@ -7,13 +7,13 @@
python3Packages.buildPythonApplication rec { python3Packages.buildPythonApplication rec {
pname = "mpdevil"; pname = "mpdevil";
version = "1.10.1"; version = "1.10.2";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "SoongNoonien"; owner = "SoongNoonien";
repo = pname; repo = pname;
rev = "v${version}"; rev = "v${version}";
sha256 = "sha256-w31e8cJvdep/ZzmDBCfdCZotrPunQBl1cTTWjs3sE1w="; sha256 = "sha256-zLCL64yX7i/mtUf8CkgrSwb6zZ7vhR1Dw8eUH/vgFT4=";
}; };
format = "other"; format = "other";
@ -50,6 +50,6 @@ python3Packages.buildPythonApplication rec {
homepage = "https://github.com/SoongNoonien/mpdevil"; homepage = "https://github.com/SoongNoonien/mpdevil";
license = licenses.gpl3Plus; license = licenses.gpl3Plus;
platforms = platforms.linux; platforms = platforms.linux;
maintainers = with maintainers; [ ]; maintainers = with maintainers; [ apfelkuchen6 ];
}; };
} }

View file

@ -8,13 +8,13 @@
mkDerivation rec { mkDerivation rec {
pname = "musescore"; pname = "musescore";
version = "4.0.1"; version = "4.0.2";
src = fetchFromGitHub { src = fetchFromGitHub {
owner = "musescore"; owner = "musescore";
repo = "MuseScore"; repo = "MuseScore";
rev = "v${version}"; rev = "v${version}";
sha256 = "sha256-Xhjjm/pYcjfZE632eP2jujqUAmzdYNa81EPrvS5UKnQ="; sha256 = "sha256-3NSHUdTyAC/WOhkB6yBrqtV3LV4Hl1m3poB3ojtJMfs=";
}; };
patches = [ patches = [
# See https://github.com/musescore/MuseScore/issues/15571 # See https://github.com/musescore/MuseScore/issues/15571

View file

@ -11,11 +11,11 @@
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "ocenaudio"; pname = "ocenaudio";
version = "3.11.21"; version = "3.11.22";
src = fetchurl { src = fetchurl {
url = "https://www.ocenaudio.com/downloads/index.php/ocenaudio_debian9_64.deb?version=${version}"; url = "https://www.ocenaudio.com/downloads/index.php/ocenaudio_debian9_64.deb?version=${version}";
sha256 = "sha256-nItqx3g4W3s1phHe6F8EtOL4nwJQ0XnKB8Ujg71/Q3Q="; sha256 = "sha256-mmPFASc2ARI1ht9SYhFsDjTkWfhxXdc2zEi5rvfanZc=";
}; };
nativeBuildInputs = [ nativeBuildInputs = [
@ -49,7 +49,7 @@ stdenv.mkDerivation rec {
homepage = "https://www.ocenaudio.com"; homepage = "https://www.ocenaudio.com";
sourceProvenance = with sourceTypes; [ binaryNativeCode ]; sourceProvenance = with sourceTypes; [ binaryNativeCode ];
license = licenses.unfree; license = licenses.unfree;
platforms = platforms.linux; platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ onny ]; maintainers = with maintainers; [ onny ];
}; };
} }

Some files were not shown because too many files have changed in this diff Show more