Project import generated by Copybara.

GitOrigin-RevId: 3c5319ad3aa51551182ac82ea17ab1c6b0f0df89
This commit is contained in:
Default email 2023-03-04 15:14:45 +03:00
parent a861c3f460
commit 4d5a95770c
2519 changed files with 42801 additions and 43424 deletions

View file

@ -104,9 +104,7 @@
# Python-related code and docs # Python-related code and docs
/maintainers/scripts/update-python-libraries @FRidh /maintainers/scripts/update-python-libraries @FRidh
/pkgs/top-level/python-packages.nix @FRidh @jonringer
/pkgs/development/interpreters/python @FRidh /pkgs/development/interpreters/python @FRidh
/pkgs/development/python-modules @FRidh @jonringer
/doc/languages-frameworks/python.section.md @FRidh @mweinelt /doc/languages-frameworks/python.section.md @FRidh @mweinelt
/pkgs/development/tools/poetry2nix @adisbladis /pkgs/development/tools/poetry2nix @adisbladis
/pkgs/development/interpreters/python/hooks @FRidh @jonringer /pkgs/development/interpreters/python/hooks @FRidh @jonringer

View file

@ -24,7 +24,7 @@ jobs:
with: with:
ref: ${{ github.event.pull_request.head.sha }} ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs - name: Create backport PRs
uses: korthout/backport-action@v1.1.0 uses: korthout/backport-action@v1.2.0
with: with:
# Config README: https://github.com/korthout/backport-action#backport-action # Config README: https://github.com/korthout/backport-action#backport-action
pull_description: |- pull_description: |-

View file

@ -19,7 +19,7 @@ jobs:
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback # we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
- uses: cachix/cachix-action@v12 - uses: cachix/cachix-action@v12
with: with:
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere. # This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.

View file

@ -28,16 +28,14 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
with: with:
# nixpkgs commit is pinned so that it doesn't break # nixpkgs commit is pinned so that it doesn't break
# editorconfig-checker 2.4.0 # editorconfig-checker 2.4.0
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/c473cc8714710179df205b153f4e9fa007107ff9.tar.gz nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/c473cc8714710179df205b153f4e9fa007107ff9.tar.gz
- name: install editorconfig-checker
run: nix-env -iA editorconfig-checker -f '<nixpkgs>'
- name: Checking EditorConfig - name: Checking EditorConfig
run: | run: |
cat "$HOME/changed_files" | xargs -r editorconfig-checker -disable-indent-size cat "$HOME/changed_files" | nix-shell -p editorconfig-checker --run 'xargs -r editorconfig-checker -disable-indent-size'
- if: ${{ failure() }} - if: ${{ failure() }}
run: | run: |
echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again." echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again."

View file

@ -18,7 +18,7 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true

View file

@ -18,7 +18,7 @@ jobs:
with: with:
# pull_request_target checks out the base branch by default # pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge ref: refs/pull/${{ github.event.pull_request.number }}/merge
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true

View file

@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
with: with:
# explicitly enable sandbox # explicitly enable sandbox
extra_nix_config: sandbox = true extra_nix_config: sandbox = true

View file

@ -17,7 +17,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: cachix/install-nix-action@v19 - uses: cachix/install-nix-action@v20
with: with:
nix_path: nixpkgs=channel:nixpkgs-unstable nix_path: nixpkgs=channel:nixpkgs-unstable
- name: setup - name: setup

View file

@ -6,3 +6,5 @@ functions/library/locations.xml
highlightjs highlightjs
manual-full.xml manual-full.xml
out out
result
result-*

View file

@ -101,6 +101,7 @@ in
diskSize = "auto"; diskSize = "auto";
additionalSpace = "0M"; # Defaults to 512M. additionalSpace = "0M"; # Defaults to 512M.
copyChannel = false; copyChannel = false;
memSize = 2048; # Qemu VM memory size in megabytes. Defaults to 1024M.
} }
``` ```

View file

@ -6,6 +6,7 @@
This chapter describes several special builders. This chapter describes several special builders.
</para> </para>
<xi:include href="special/fhs-environments.section.xml" /> <xi:include href="special/fhs-environments.section.xml" />
<xi:include href="special/makesetuphook.section.xml" />
<xi:include href="special/mkshell.section.xml" /> <xi:include href="special/mkshell.section.xml" />
<xi:include href="special/darwin-builder.section.xml" /> <xi:include href="special/darwin-builder.section.xml" />
</chapter> </chapter>

View file

@ -0,0 +1,37 @@
# pkgs.makeSetupHook {#sec-pkgs.makeSetupHook}
`pkgs.makeSetupHook` is a builder that produces hooks that go in to `nativeBuildInputs`
## Usage {#sec-pkgs.makeSetupHook-usage}
```nix
pkgs.makeSetupHook {
name = "something-hook";
propagatedBuildInputs = [ pkgs.commandsomething ];
depsTargetTargetPropagated = [ pkgs.libsomething ];
} ./script.sh
```
#### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash
```nix
pkgs.makeSetupHook {
name = "run-hello-hook";
propagatedBuildInputs = [ pkgs.hello ];
substitutions = { shell = "${pkgs.bash}/bin/bash"; };
passthru.tests.greeting = callPackage ./test { };
meta.platforms = lib.platforms.linux;
} (writeScript "run-hello-hook.sh" ''
#!@shell@
hello
'')
```
## Attributes
* `name` Set the name of the hook.
* `propagatedBuildInputs` Runtime dependencies (such as binaries) of the hook.
* `depsTargetTargetPropagated` Non-binary dependencies.
* `meta`
* `passthru`
* `substitutions` Variables for `substituteAll`

View file

@ -56,11 +56,11 @@ See the `zlib` example:
zlib = (pkgs.zlib.override { zlib = (pkgs.zlib.override {
stdenv = pkgs.emscriptenStdenv; stdenv = pkgs.emscriptenStdenv;
}).overrideDerivation }).overrideAttrs
(old: rec { (old: rec {
buildInputs = old.buildInputs ++ [ pkg-config ]; buildInputs = old.buildInputs ++ [ pkg-config ];
# we need to reset this setting! # we need to reset this setting!
NIX_CFLAGS_COMPILE=""; env = (old.env or { }) // { NIX_CFLAGS_COMPILE = ""; };
configurePhase = '' configurePhase = ''
# FIXME: Some tests require writing at $HOME # FIXME: Some tests require writing at $HOME
HOME=$TMPDIR HOME=$TMPDIR

View file

@ -1019,7 +1019,7 @@ buildPythonPackage rec {
The `buildPythonPackage` mainly does four things: The `buildPythonPackage` mainly does four things:
* In the `buildPhase`, it calls `${python.interpreter} setup.py bdist_wheel` to * In the `buildPhase`, it calls `${python.pythonForBuild.interpreter} setup.py bdist_wheel` to
build a wheel binary zipfile. build a wheel binary zipfile.
* In the `installPhase`, it installs the wheel file using `pip install *.whl`. * In the `installPhase`, it installs the wheel file using `pip install *.whl`.
* In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to * In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to
@ -1546,7 +1546,7 @@ of such package using the feature is `pkgs/tools/X11/xpra/default.nix`.
As workaround install it as an extra `preInstall` step: As workaround install it as an extra `preInstall` step:
```shell ```shell
${python.interpreter} setup.py install_data --install-dir=$out --root=$out ${python.pythonForBuild.interpreter} setup.py install_data --install-dir=$out --root=$out
sed -i '/ = data\_files/d' setup.py sed -i '/ = data\_files/d' setup.py
``` ```
@ -1821,6 +1821,11 @@ hosted on GitHub, exporting a `GITHUB_API_TOKEN` is highly recommended.
Updating packages in bulk leads to lots of breakages, which is why a Updating packages in bulk leads to lots of breakages, which is why a
stabilization period on the `python-unstable` branch is required. stabilization period on the `python-unstable` branch is required.
If a package is fragile and often breaks during these bulks updates, it
may be reasonable to set `passthru.skipBulkUpdate = true` in the
derivation. This decision should not be made on a whim and should
always be supported by a qualifying comment.
Once the branch is sufficiently stable it should normally be merged Once the branch is sufficiently stable it should normally be merged
into the `staging` branch. into the `staging` branch.

View file

@ -1329,7 +1329,7 @@ bin/blib.a(bios_console.o): In function `bios_handle_cup':
Adds the `-O2 -D_FORTIFY_SOURCE=2` compiler options. During code generation the compiler knows a great deal of information about buffer sizes (where possible), and attempts to replace insecure unlimited length buffer function calls with length-limited ones. This is especially useful for old, crufty code. Additionally, format strings in writable memory that contain `%n` are blocked. If an application depends on such a format string, it will need to be worked around. Adds the `-O2 -D_FORTIFY_SOURCE=2` compiler options. During code generation the compiler knows a great deal of information about buffer sizes (where possible), and attempts to replace insecure unlimited length buffer function calls with length-limited ones. This is especially useful for old, crufty code. Additionally, format strings in writable memory that contain `%n` are blocked. If an application depends on such a format string, it will need to be worked around.
Additionally, some warnings are enabled which might trigger build failures if compiler warnings are treated as errors in the package build. In this case, set `NIX_CFLAGS_COMPILE` to `-Wno-error=warning-type`. Additionally, some warnings are enabled which might trigger build failures if compiler warnings are treated as errors in the package build. In this case, set `env.NIX_CFLAGS_COMPILE` to `-Wno-error=warning-type`.
This needs to be turned off or fixed for errors similar to: This needs to be turned off or fixed for errors similar to:

View file

@ -1,4 +1,7 @@
{ " " = 32; { "\t" = 9;
"\n" = 10;
"\r" = 13;
" " = 32;
"!" = 33; "!" = 33;
"\"" = 34; "\"" = 34;
"#" = 35; "#" = 35;

View file

@ -213,7 +213,14 @@ rec {
outputSpecified = true; outputSpecified = true;
drvPath = assert condition; drv.${outputName}.drvPath; drvPath = assert condition; drv.${outputName}.drvPath;
outPath = assert condition; drv.${outputName}.outPath; outPath = assert condition; drv.${outputName}.outPath;
}; } //
# TODO: give the derivation control over the outputs.
# `overrideAttrs` may not be the only attribute that needs
# updating when switching outputs.
lib.optionalAttrs (passthru?overrideAttrs) {
# TODO: also add overrideAttrs when overrideAttrs is not custom, e.g. when not splicing.
overrideAttrs = f: (passthru.overrideAttrs f).${outputName};
};
}; };
outputsList = map outputToAttrListElement outputs; outputsList = map outputToAttrListElement outputs;

View file

@ -109,6 +109,8 @@ rec {
traceSeqN 2 { a.b.c = 3; } null traceSeqN 2 { a.b.c = 3; } null
trace: { a = { b = {}; }; } trace: { a = { b = {}; }; }
=> null => null
Type: traceSeqN :: Int -> a -> b -> b
*/ */
traceSeqN = depth: x: y: traceSeqN = depth: x: y:
let snip = v: if isList v then noQuotes "[]" v let snip = v: if isList v then noQuotes "[]" v
@ -173,17 +175,63 @@ rec {
# -- TESTING -- # -- TESTING --
/* Evaluate a set of tests. A test is an attribute set `{expr, /* Evaluates a set of tests.
expected}`, denoting an expression and its expected result. The
result is a list of failed tests, each represented as `{name, A test is an attribute set `{expr, expected}`,
expected, actual}`, denoting the attribute name of the failing denoting an expression and its expected result.
test and its expected and actual results.
The result is a `list` of __failed tests__, each represented as
`{name, expected, result}`,
- expected
- What was passed as `expected`
- result
- The actual `result` of the test
Used for regression testing of the functions in lib; see Used for regression testing of the functions in lib; see
tests.nix for an example. Only tests having names starting with tests.nix for more examples.
"test" are run.
Add attr { tests = ["testName"]; } to run these tests only. Important: Only attributes that start with `test` are executed.
- If you want to run only a subset of the tests add the attribute `tests = ["testName"];`
Example:
runTests {
testAndOk = {
expr = lib.and true false;
expected = false;
};
testAndFail = {
expr = lib.and true false;
expected = true;
};
}
->
[
{
name = "testAndFail";
expected = true;
result = false;
}
]
Type:
runTests :: {
tests = [ String ];
${testName} :: {
expr :: a;
expected :: a;
};
}
->
[
{
name :: String;
expected :: a;
result :: a;
}
]
*/ */
runTests = runTests =
# Tests to run # Tests to run

View file

@ -100,7 +100,7 @@ let
escapeShellArg escapeShellArgs escapeShellArg escapeShellArgs
isStorePath isStringLike isStorePath isStringLike
isValidPosixName toShellVar toShellVars isValidPosixName toShellVar toShellVars
escapeRegex escapeXML replaceChars lowerChars escapeRegex escapeURL escapeXML replaceChars lowerChars
upperChars toLower toUpper addContextFrom splitString upperChars toLower toUpper addContextFrom splitString
removePrefix removeSuffix versionOlder versionAtLeast removePrefix removeSuffix versionOlder versionAtLeast
getName getVersion getName getVersion

View file

@ -109,6 +109,11 @@ in mkLicense lset) ({
fullName = "Apache License 2.0"; fullName = "Apache License 2.0";
}; };
asl20-llvm = {
spdxId = "Apache-2.0 WITH LLVM-exception";
fullName = "Apache License 2.0 with LLVM Exceptions";
};
bitstreamVera = { bitstreamVera = {
spdxId = "Bitstream-Vera"; spdxId = "Bitstream-Vera";
fullName = "Bitstream Vera Font License"; fullName = "Bitstream Vera Font License";
@ -657,11 +662,6 @@ in mkLicense lset) ({
url = "https://opensource.franz.com/preamble.html"; url = "https://opensource.franz.com/preamble.html";
}; };
llvm-exception = {
spdxId = "LLVM-exception";
fullName = "LLVM Exception"; # LLVM exceptions to the Apache 2.0 License
};
lppl12 = { lppl12 = {
spdxId = "LPPL-1.2"; spdxId = "LPPL-1.2";
fullName = "LaTeX Project Public License v1.2"; fullName = "LaTeX Project Public License v1.2";

View file

@ -36,6 +36,9 @@ let
inherit (lib.types) inherit (lib.types)
mkOptionType mkOptionType
; ;
inherit (lib.lists)
last
;
prioritySuggestion = '' prioritySuggestion = ''
Use `lib.mkForce value` or `lib.mkDefault value` to change the priority on any of these definitions. Use `lib.mkForce value` or `lib.mkDefault value` to change the priority on any of these definitions.
''; '';
@ -107,17 +110,28 @@ rec {
/* Creates an Option attribute set for an option that specifies the /* Creates an Option attribute set for an option that specifies the
package a module should use for some purpose. package a module should use for some purpose.
The package is specified as a list of strings representing its attribute path in nixpkgs. Type: mkPackageOption :: pkgs -> (string|[string]) ->
{ default? :: [string], example? :: null|string|[string], extraDescription? :: string } ->
option
Because of this, you need to pass nixpkgs itself as the first argument. The package is specified in the third argument under `default` as a list of strings
representing its attribute path in nixpkgs (or another package set).
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
The second argument is the name of the option, used in the description "The <name> package to use.". The second argument may be either a string or a list of strings.
It provides the display name of the package in the description of the generated option
(using only the last element if the passed value is a list)
and serves as the fallback value for the `default` argument.
You can also pass an example value, either a literal string or a package's attribute path. To include extra information in the description, pass `extraDescription` to
append arbitrary text to the generated description.
You can also pass an `example` value, either a literal string or an attribute path.
You can omit the default path if the name of the option is also attribute path in nixpkgs. The default argument can be omitted if the provided name is
an attribute of pkgs (if name is a string) or a
valid attribute path in pkgs (if name is a list).
Type: mkPackageOption :: pkgs -> string -> { default :: [string]; example :: null | string | [string]; } -> option If you wish to explicitly provide no default, pass `null` as `default`.
Example: Example:
mkPackageOption pkgs "hello" { } mkPackageOption pkgs "hello" { }
@ -129,27 +143,46 @@ rec {
example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])"; example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
} }
=> { _type = "option"; default = «derivation /nix/store/jxx55cxsjrf8kyh3fp2ya17q99w7541r-ghc-8.10.7.drv»; defaultText = { ... }; description = "The GHC package to use."; example = { ... }; type = { ... }; } => { _type = "option"; default = «derivation /nix/store/jxx55cxsjrf8kyh3fp2ya17q99w7541r-ghc-8.10.7.drv»; defaultText = { ... }; description = "The GHC package to use."; example = { ... }; type = { ... }; }
Example:
mkPackageOption pkgs [ "python39Packages" "pytorch" ] {
extraDescription = "This is an example and doesn't actually do anything.";
}
=> { _type = "option"; default = «derivation /nix/store/gvqgsnc4fif9whvwd9ppa568yxbkmvk8-python3.9-pytorch-1.10.2.drv»; defaultText = { ... }; description = "The pytorch package to use. This is an example and doesn't actually do anything."; type = { ... }; }
*/ */
mkPackageOption = mkPackageOption =
# Package set (a specific version of nixpkgs) # Package set (a specific version of nixpkgs or a subset)
pkgs: pkgs:
# Name for the package, shown in option description # Name for the package, shown in option description
name: name:
{ default ? [ name ], example ? null }: {
let default' = if !isList default then [ default ] else default; # The attribute path where the default package is located
default ? name,
# A string or an attribute path to use as an example
example ? null,
# Additional text to include in the option description
extraDescription ? "",
}:
let
name' = if isList name then last name else name;
default' = if isList default then default else [ default ];
defaultPath = concatStringsSep "." default';
defaultValue = attrByPath default'
(throw "${defaultPath} cannot be found in pkgs") pkgs;
in mkOption { in mkOption {
defaultText = literalExpression ("pkgs." + defaultPath);
type = lib.types.package; type = lib.types.package;
description = "The ${name} package to use."; description = "The ${name'} package to use."
default = attrByPath default' + (if extraDescription == "" then "" else " ") + extraDescription;
(throw "${concatStringsSep "." default'} cannot be found in pkgs") pkgs; ${if default != null then "default" else null} = defaultValue;
defaultText = literalExpression ("pkgs." + concatStringsSep "." default');
${if example != null then "example" else null} = literalExpression ${if example != null then "example" else null} = literalExpression
(if isList example then "pkgs." + concatStringsSep "." example else example); (if isList example then "pkgs." + concatStringsSep "." example else example);
}; };
/* Like mkPackageOption, but emit an mdDoc description instead of DocBook. */ /* Like mkPackageOption, but emit an mdDoc description instead of DocBook. */
mkPackageOptionMD = args: name: extra: mkPackageOptionMD = pkgs: name: extra:
let option = mkPackageOption args name extra; let option = mkPackageOption pkgs name extra;
in option // { description = lib.mdDoc option.description; }; in option // { description = lib.mdDoc option.description; };
/* This option accepts anything, but it does not produce any result. /* This option accepts anything, but it does not produce any result.

View file

@ -34,6 +34,8 @@ rec {
unsafeDiscardStringContext unsafeDiscardStringContext
; ;
asciiTable = import ./ascii-table.nix;
/* Concatenate a list of strings. /* Concatenate a list of strings.
Type: concatStrings :: [string] -> string Type: concatStrings :: [string] -> string
@ -327,9 +329,7 @@ rec {
=> 40 => 40
*/ */
charToInt = let charToInt = c: builtins.getAttr c asciiTable;
table = import ./ascii-table.nix;
in c: builtins.getAttr c table;
/* Escape occurrence of the elements of `list` in `string` by /* Escape occurrence of the elements of `list` in `string` by
prefixing it with a backslash. prefixing it with a backslash.
@ -355,6 +355,21 @@ rec {
*/ */
escapeC = list: replaceStrings list (map (c: "\\x${ toLower (lib.toHexString (charToInt c))}") list); escapeC = list: replaceStrings list (map (c: "\\x${ toLower (lib.toHexString (charToInt c))}") list);
/* Escape the string so it can be safely placed inside a URL
query.
Type: escapeURL :: string -> string
Example:
escapeURL "foo/bar baz"
=> "foo%2Fbar%20baz"
*/
escapeURL = let
unreserved = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
toEscape = builtins.removeAttrs asciiTable unreserved;
in
replaceStrings (builtins.attrNames toEscape) (lib.mapAttrsToList (_: c: "%${fixedWidthString 2 "0" (lib.toHexString c)}") toEscape);
/* Quote string to be used safely within the Bourne shell. /* Quote string to be used safely within the Bourne shell.
Type: escapeShellArg :: string -> string Type: escapeShellArg :: string -> string

View file

@ -347,6 +347,15 @@ runTests {
expected = "Hello\\x20World"; expected = "Hello\\x20World";
}; };
testEscapeURL = testAllTrue [
("" == strings.escapeURL "")
("Hello" == strings.escapeURL "Hello")
("Hello%20World" == strings.escapeURL "Hello World")
("Hello%2FWorld" == strings.escapeURL "Hello/World")
("42%25" == strings.escapeURL "42%")
("%20%3F%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%09%3A%2F%40%24%27%28%29%2A%2C%3B" == strings.escapeURL " ?&=#+%!<>#\"{}|\\^[]`\t:/@$'()*,;")
];
testToInt = testAllTrue [ testToInt = testAllTrue [
# Naive # Naive
(123 == toInt "123") (123 == toInt "123")

View file

@ -2442,6 +2442,12 @@
githubId = 5394722; githubId = 5394722;
name = "Spencer Baugh"; name = "Spencer Baugh";
}; };
cathalmullan = {
email = "contact@cathal.dev";
github = "CathalMullan";
githubId = 37139470;
name = "Cathal Mullan";
};
catouc = { catouc = {
email = "catouc@philipp.boeschen.me"; email = "catouc@philipp.boeschen.me";
github = "catouc"; github = "catouc";
@ -2872,6 +2878,13 @@
githubId = 718298; githubId = 718298;
name = "Michael Livshin"; name = "Michael Livshin";
}; };
CobaltCause = {
name = "Charles Hall";
email = "charles@computer.surgery";
github = "CobaltCause";
githubId = 7003738;
matrix = "@charles:computer.surgery";
};
cobbal = { cobbal = {
email = "andrew.cobb@gmail.com"; email = "andrew.cobb@gmail.com";
github = "cobbal"; github = "cobbal";
@ -3588,6 +3601,13 @@
githubId = 62989; githubId = 62989;
name = "Demyan Rogozhin"; name = "Demyan Rogozhin";
}; };
dennajort = {
email = "gosselinjb@gmail.com";
matrix = "@dennajort:matrix.org";
github = "dennajort";
githubId = 1536838;
name = "Jean-Baptiste Gosselin";
};
derchris = { derchris = {
email = "derchris@me.com"; email = "derchris@me.com";
github = "derchrisuk"; github = "derchrisuk";
@ -3650,6 +3670,12 @@
github = "Dettorer"; github = "Dettorer";
githubId = 2761682; githubId = 2761682;
}; };
developer-guy = {
name = "Batuhan Apaydın";
email = "developerguyn@gmail.com";
github = "developer-guy";
githubId = 16693043;
};
devhell = { devhell = {
email = ''"^"@regexmail.net''; email = ''"^"@regexmail.net'';
github = "devhell"; github = "devhell";
@ -4010,6 +4036,11 @@
githubId = 1931963; githubId = 1931963;
name = "David Sferruzza"; name = "David Sferruzza";
}; };
dsymbol = {
name = "dsymbol";
github = "dsymbol";
githubId = 88138099;
};
dtzWill = { dtzWill = {
email = "w@wdtz.org"; email = "w@wdtz.org";
github = "dtzWill"; github = "dtzWill";
@ -6890,6 +6921,12 @@
githubId = 10786794; githubId = 10786794;
name = "Markus Hihn"; name = "Markus Hihn";
}; };
jessemoore = {
email = "jesse@jessemoore.dev";
github = "jesseDMoore1994";
githubId = 30251156;
name = "Jesse Moore";
};
jethro = { jethro = {
email = "jethrokuan95@gmail.com"; email = "jethrokuan95@gmail.com";
github = "jethrokuan"; github = "jethrokuan";
@ -7175,6 +7212,12 @@
github = "joepie91"; github = "joepie91";
githubId = 1663259; githubId = 1663259;
}; };
joerdav = {
email = "joe.davidson.21111@gmail.com";
github = "joerdav";
name = "Joe Davidson";
githubId = 19927761;
};
joesalisbury = { joesalisbury = {
email = "salisbury.joseph@gmail.com"; email = "salisbury.joseph@gmail.com";
github = "JosephSalisbury"; github = "JosephSalisbury";
@ -8913,6 +8956,9 @@
github = "Ma27"; github = "Ma27";
githubId = 6025220; githubId = 6025220;
name = "Maximilian Bosch"; name = "Maximilian Bosch";
keys = [{
fingerprint = "62B9 9C26 F046 721E 26B0 04F6 D006 A998 C6AB FDF1";
}];
}; };
ma9e = { ma9e = {
email = "sean@lfo.team"; email = "sean@lfo.team";
@ -8990,6 +9036,12 @@
githubId = 1238350; githubId = 1238350;
name = "Matthias Herrmann"; name = "Matthias Herrmann";
}; };
mahmoudk1000 = {
email = "mahmoudk1000@gmail.com";
github = "mahmoudk1000";
githubId = 24735185;
name = "Mahmoud Ayman";
};
majesticmullet = { majesticmullet = {
email = "hoccthomas@gmail.com.au"; email = "hoccthomas@gmail.com.au";
github = "MajesticMullet"; github = "MajesticMullet";
@ -9644,6 +9696,12 @@
github = "michaelBelsanti"; github = "michaelBelsanti";
githubId = 62124625; githubId = 62124625;
}; };
michaelgrahamevans = {
email = "michaelgrahamevans@gmail.com";
name = "Michael Evans";
github = "michaelgrahamevans";
githubId = 5932424;
};
michaelpj = { michaelpj = {
email = "michaelpj@gmail.com"; email = "michaelpj@gmail.com";
github = "michaelpj"; github = "michaelpj";
@ -10095,6 +10153,12 @@
githubId = 3073833; githubId = 3073833;
name = "Massimo Redaelli"; name = "Massimo Redaelli";
}; };
mrfreezeex = {
email = "arthur@cri.epita.fr";
github = "MrFreezeex";
name = "Arthur Outhenin-Chalandre";
githubId = 3845213;
};
mrityunjaygr8 = { mrityunjaygr8 = {
email = "mrityunjaysaxena1996@gmail.com"; email = "mrityunjaysaxena1996@gmail.com";
github = "mrityunjaygr8"; github = "mrityunjaygr8";
@ -11623,6 +11687,12 @@
fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E"; fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E";
}]; }];
}; };
pinkcreeper100 = {
email = "benmoreosm@gmail.com";
github = "pinkcreeper100";
githubId = 35699052;
name = "Oliver Samuel Morris";
};
pinpox = { pinpox = {
email = "mail@pablo.tools"; email = "mail@pablo.tools";
github = "pinpox"; github = "pinpox";
@ -11878,6 +11948,12 @@
githubId = 146413; githubId = 146413;
name = "Tobias Poschwatta"; name = "Tobias Poschwatta";
}; };
PowerUser64 = {
email = "blakelysnorth@gmail.com";
github = "PowerUser64";
githubId = 24578572;
name = "Blake North";
};
ppenguin = { ppenguin = {
name = "Jeroen Versteeg"; name = "Jeroen Versteeg";
email = "hieronymusv@gmail.com"; email = "hieronymusv@gmail.com";
@ -12168,6 +12244,7 @@
github = "alyssais"; github = "alyssais";
githubId = 2768870; githubId = 2768870;
name = "Alyssa Ross"; name = "Alyssa Ross";
matrix = "@qyliss:fairydust.space";
keys = [{ keys = [{
fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97"; fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97";
}]; }];
@ -12822,6 +12899,7 @@
email = "rrbutani+nix@gmail.com"; email = "rrbutani+nix@gmail.com";
github = "rrbutani"; github = "rrbutani";
githubId = 7833358; githubId = 7833358;
matrix = "@rbutani:matrix.org";
keys = [{ keys = [{
fingerprint = "7DCA 5615 8AB2 621F 2F32 9FF4 1C7C E491 479F A273"; fingerprint = "7DCA 5615 8AB2 621F 2F32 9FF4 1C7C E491 479F A273";
}]; }];
@ -12857,6 +12935,12 @@
githubId = 61306; githubId = 61306;
name = "Rene Treffer"; name = "Rene Treffer";
}; };
rubyowo = {
name = "Rei Star";
email = "perhaps-you-know@what-is.ml";
github = "rubyowo";
githubId = 105302757;
};
rumpelsepp = { rumpelsepp = {
name = "Stefan Tatschner"; name = "Stefan Tatschner";
email = "stefan@rumpelsepp.org"; email = "stefan@rumpelsepp.org";
@ -14802,6 +14886,12 @@
githubId = 1634990; githubId = 1634990;
name = "Tom McLaughlin"; name = "Tom McLaughlin";
}; };
thornycrackers = {
email = "codyfh@gmail.com";
github = "thornycrackers";
githubId = 4313010;
name = "Cody Hiar";
};
thoughtpolice = { thoughtpolice = {
email = "aseipp@pobox.com"; email = "aseipp@pobox.com";
github = "thoughtpolice"; github = "thoughtpolice";
@ -15312,6 +15402,11 @@
github = "unrooted"; github = "unrooted";
githubId = 30440603; githubId = 30440603;
}; };
unsolvedcypher = {
name = "Matthew M";
github = "UnsolvedCypher";
githubId = 3170853;
};
uralbash = { uralbash = {
email = "root@uralbash.ru"; email = "root@uralbash.ru";
github = "uralbash"; github = "uralbash";
@ -15892,6 +15987,15 @@
fingerprint = "DA03 D6C6 3F58 E796 AD26 E99B 366A 2940 479A 06FC"; fingerprint = "DA03 D6C6 3F58 E796 AD26 E99B 366A 2940 479A 06FC";
}]; }];
}; };
williamvds = {
email = "nixpkgs@williamvds.me";
github = "williamvds";
githubId = 26379999;
name = "William Vigolo";
keys = [{
fingerprint = "9848 B216 BCBE 29BB 1C6A E0D5 7A4D F5A8 CDBD 49C7";
}];
};
willibutz = { willibutz = {
email = "willibutz@posteo.de"; email = "willibutz@posteo.de";
github = "WilliButz"; github = "WilliButz";
@ -16016,6 +16120,12 @@
github = "wr0belj"; github = "wr0belj";
githubId = 40501814; githubId = 40501814;
}; };
wraithm = {
name = "Matthew Wraith";
email = "wraithm@gmail.com";
github = "wraithm";
githubId = 1512913;
};
wrmilling = { wrmilling = {
name = "Winston R. Milling"; name = "Winston R. Milling";
email = "Winston@Milli.ng"; email = "Winston@Milli.ng";

View file

@ -26,6 +26,7 @@ Because step 1) is quite expensive and takes roughly ~5 minutes the result is ca
{-# LANGUAGE TupleSections #-} {-# LANGUAGE TupleSections #-}
{-# LANGUAGE ViewPatterns #-} {-# LANGUAGE ViewPatterns #-}
{-# OPTIONS_GHC -Wall #-} {-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE DataKinds #-}
import Control.Monad (forM_, (<=<)) import Control.Monad (forM_, (<=<))
import Control.Monad.Trans (MonadIO (liftIO)) import Control.Monad.Trans (MonadIO (liftIO))
@ -54,17 +55,22 @@ import Data.Time (defaultTimeLocale, formatTime, getCurrentTime)
import Data.Time.Clock (UTCTime) import Data.Time.Clock (UTCTime)
import GHC.Generics (Generic) import GHC.Generics (Generic)
import Network.HTTP.Req ( import Network.HTTP.Req (
GET (GET), GET (GET),
NoReqBody (NoReqBody), HttpResponse (HttpResponseBody),
defaultHttpConfig, NoReqBody (NoReqBody),
header, Option,
https, Req,
jsonResponse, Scheme (Https),
req, bsResponse,
responseBody, defaultHttpConfig,
responseTimeout, header,
runReq, https,
(/:), jsonResponse,
req,
responseBody,
responseTimeout,
runReq,
(/:),
) )
import System.Directory (XdgDirectory (XdgCache), getXdgDirectory) import System.Directory (XdgDirectory (XdgCache), getXdgDirectory)
import System.Environment (getArgs) import System.Environment (getArgs)
@ -76,6 +82,10 @@ import Control.Exception (evaluate)
import qualified Data.IntMap.Strict as IntMap import qualified Data.IntMap.Strict as IntMap
import qualified Data.IntSet as IntSet import qualified Data.IntSet as IntSet
import Data.Bifunctor (second) import Data.Bifunctor (second)
import Data.Data (Proxy)
import Data.ByteString (ByteString)
import qualified Data.ByteString.Char8 as ByteString
import Distribution.Simple.Utils (safeLast, fromUTF8BS)
newtype JobsetEvals = JobsetEvals newtype JobsetEvals = JobsetEvals
{ evals :: Seq Eval { evals :: Seq Eval
@ -123,17 +133,31 @@ showT = Text.pack . show
getBuildReports :: IO () getBuildReports :: IO ()
getBuildReports = runReq defaultHttpConfig do getBuildReports = runReq defaultHttpConfig do
evalMay <- Seq.lookup 0 . evals <$> myReq (https "hydra.nixos.org" /: "jobset" /: "nixpkgs" /: "haskell-updates" /: "evals") mempty evalMay <- Seq.lookup 0 . evals <$> hydraJSONQuery mempty ["jobset", "nixpkgs", "haskell-updates", "evals"]
eval@Eval{id} <- maybe (liftIO $ fail "No Evalution found") pure evalMay eval@Eval{id} <- maybe (liftIO $ fail "No Evalution found") pure evalMay
liftIO . putStrLn $ "Fetching evaluation " <> show id <> " from Hydra. This might take a few minutes..." liftIO . putStrLn $ "Fetching evaluation " <> show id <> " from Hydra. This might take a few minutes..."
buildReports :: Seq Build <- myReq (https "hydra.nixos.org" /: "eval" /: showT id /: "builds") (responseTimeout 600000000) buildReports :: Seq Build <- hydraJSONQuery (responseTimeout 600000000) ["eval", showT id, "builds"]
liftIO do liftIO do
fileName <- reportFileName fileName <- reportFileName
putStrLn $ "Finished fetching all builds from Hydra, saving report as " <> fileName putStrLn $ "Finished fetching all builds from Hydra, saving report as " <> fileName
now <- getCurrentTime now <- getCurrentTime
encodeFile fileName (eval, now, buildReports) encodeFile fileName (eval, now, buildReports)
where
myReq query option = responseBody <$> req GET query NoReqBody jsonResponse (header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option) hydraQuery :: HttpResponse a => Proxy a -> Option 'Https -> [Text] -> Req (HttpResponseBody a)
hydraQuery responseType option query =
responseBody
<$> req
GET
(foldl' (/:) (https "hydra.nixos.org") query)
NoReqBody
responseType
(header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
hydraJSONQuery :: FromJSON a => Option 'Https -> [Text] -> Req a
hydraJSONQuery = hydraQuery jsonResponse
hydraPlainQuery :: [Text] -> Req ByteString
hydraPlainQuery = hydraQuery bsResponse mempty
hydraEvalCommand :: FilePath hydraEvalCommand :: FilePath
hydraEvalCommand = "hydra-eval-jobs" hydraEvalCommand = "hydra-eval-jobs"
@ -326,23 +350,24 @@ instance Functor (Table row col) where
instance Foldable (Table row col) where instance Foldable (Table row col) where
foldMap f (Table a) = foldMap f a foldMap f (Table a) = foldMap f a
getBuildState :: Build -> BuildState
getBuildState Build{finished, buildstatus} = case (finished, buildstatus) of
(0, _) -> Unfinished
(_, Just 0) -> Success
(_, Just 1) -> Failed
(_, Just 2) -> DependencyFailed
(_, Just 3) -> HydraFailure
(_, Just 4) -> Canceled
(_, Just 7) -> TimedOut
(_, Just 11) -> OutputLimitExceeded
(_, i) -> Unknown i
buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary
buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary
where where
unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru) unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru)
toSummary Build{finished, buildstatus, job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult state id))) maintainers reverseDeps unbrokenReverseDeps) toSummary build@Build{job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult (getBuildState build) id))) maintainers reverseDeps unbrokenReverseDeps)
where where
state :: BuildState
state = case (finished, buildstatus) of
(0, _) -> Unfinished
(_, Just 0) -> Success
(_, Just 1) -> Failed
(_, Just 2) -> DependencyFailed
(_, Just 3) -> HydraFailure
(_, Just 4) -> Canceled
(_, Just 7) -> TimedOut
(_, Just 11) -> OutputLimitExceeded
(_, i) -> Unknown i
packageName = fromMaybe job (Text.stripSuffix ("." <> system) job) packageName = fromMaybe job (Text.stripSuffix ("." <> system) job)
splitted = nonEmpty $ Text.splitOn "." packageName splitted = nonEmpty $ Text.splitOn "." packageName
name = maybe packageName NonEmpty.last splitted name = maybe packageName NonEmpty.last splitted
@ -486,8 +511,23 @@ printMaintainerPing = do
printMarkBrokenList :: IO () printMarkBrokenList :: IO ()
printMarkBrokenList = do printMarkBrokenList = do
(_, _, buildReport) <- readBuildReports (_, fetchTime, buildReport) <- readBuildReports
forM_ buildReport \Build{buildstatus, job} -> runReq defaultHttpConfig $ forM_ buildReport \build@Build{job, id} ->
case (buildstatus, Text.splitOn "." job) of case (getBuildState build, Text.splitOn "." job) of
(Just 1, ["haskellPackages", name, "x86_64-linux"]) -> putStrLn $ " - " <> Text.unpack name (Failed, ["haskellPackages", name, "x86_64-linux"]) -> do
-- Fetch build log from hydra to figure out the cause of the error.
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]
-- We use the last probable error cause found in the build log file.
let error_message = fromMaybe " failure " $ safeLast $ mapMaybe probableErrorCause build_log
liftIO $ putStrLn $ " - " <> Text.unpack name <> " # " <> error_message <> " in job https://hydra.nixos.org/build/" <> show id <> " at " <> formatTime defaultTimeLocale "%Y-%m-%d" fetchTime
_ -> pure () _ -> pure ()
{- | This function receives a line from a Nix Haskell builder build log and returns a possible error cause.
| We might need to add other causes in the future if errors happen in unusual parts of the builder.
-}
probableErrorCause :: ByteString -> Maybe String
probableErrorCause "Setup: Encountered missing or private dependencies:" = Just "dependency missing"
probableErrorCause "running tests" = Just "test failure"
probableErrorCause build_line | ByteString.isPrefixOf "Building" build_line = Just ("failure building " <> fromUTF8BS (fst $ ByteString.breakSubstring " for" $ ByteString.drop 9 build_line))
probableErrorCause build_line | ByteString.isSuffixOf "Phase" build_line = Just ("failure in " <> fromUTF8BS build_line)
probableErrorCause _ = Nothing

View file

@ -11,6 +11,9 @@
# Related scripts are update-hackage.sh, for updating the snapshot of the # Related scripts are update-hackage.sh, for updating the snapshot of the
# Hackage database used by hackage2nix, and update-cabal2nix-unstable.sh, # Hackage database used by hackage2nix, and update-cabal2nix-unstable.sh,
# for updating the version of hackage2nix used to perform this task. # for updating the version of hackage2nix used to perform this task.
#
# Note that this script doesn't gcroot anything, so it may be broken by an
# unfortunately timed nix-store --gc.
set -euo pipefail set -euo pipefail
@ -20,15 +23,21 @@ HACKAGE2NIX="${HACKAGE2NIX:-hackage2nix}"
# See: https://github.com/NixOS/nixpkgs/pull/122023 # See: https://github.com/NixOS/nixpkgs/pull/122023
export LC_ALL=C.UTF-8 export LC_ALL=C.UTF-8
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
echo "Obtaining Hackage data"
extraction_derivation='with import ./. {}; runCommandLocal "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"' extraction_derivation='with import ./. {}; runCommandLocal "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"'
unpacked_hackage="$(nix-build -E "$extraction_derivation" --no-out-link)" unpacked_hackage="$(nix-build -E "$extraction_derivation" --no-out-link)"
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
echo "Generating compiler configuration"
compiler_config="$(nix-build -A haskellPackages.cabal2nix-unstable.compilerConfig --no-out-link)"
echo "Starting hackage2nix to regenerate pkgs/development/haskell-modules/hackage-packages.nix ..." echo "Starting hackage2nix to regenerate pkgs/development/haskell-modules/hackage-packages.nix ..."
"$HACKAGE2NIX" \ "$HACKAGE2NIX" \
--hackage "$unpacked_hackage" \ --hackage "$unpacked_hackage" \
--preferred-versions <(for n in "$unpacked_hackage"/*/preferred-versions; do cat "$n"; echo; done) \ --preferred-versions <(for n in "$unpacked_hackage"/*/preferred-versions; do cat "$n"; echo; done) \
--nixpkgs "$PWD" \ --nixpkgs "$PWD" \
--config "$compiler_config" \
--config "$config_dir/main.yaml" \ --config "$config_dir/main.yaml" \
--config "$config_dir/stackage.yaml" \ --config "$config_dir/stackage.yaml" \
--config "$config_dir/broken.yaml" \ --config "$config_dir/broken.yaml" \

View file

@ -32,7 +32,7 @@ lmpfrlib,,,,,5.3,alexshpilkin
loadkit,,,,,,alerque loadkit,,,,,,alerque
lpeg,,,,,,vyp lpeg,,,,,,vyp
lpeg_patterns,,,,,, lpeg_patterns,,,,,,
lpeglabel,,,,,, lpeglabel,,,,1.6.0,,
lpty,,,,,, lpty,,,,,,
lrexlib-gnu,,,,,, lrexlib-gnu,,,,,,
lrexlib-pcre,,,,,,vyp lrexlib-pcre,,,,,,vyp

1 name src ref server version luaversion maintainers
32 loadkit alerque
33 lpeg vyp
34 lpeg_patterns
35 lpeglabel 1.6.0
36 lpty
37 lrexlib-gnu
38 lrexlib-pcre vyp

View file

@ -1,5 +1,3 @@
#!/bin/sh #!/usr/bin/env nix-shell
build=`nix-build -E "with import (fetchTarball "channel:nixpkgs-unstable") {}; python3.withPackages(ps: with ps; [ packaging requests toolz ])"` #!nix-shell -I nixpkgs=channel:nixpkgs-unstable -i bash -p "python3.withPackages (ps: with ps; [ packaging requests ])" -p nix-prefetch-git
python=${build}/bin/python exec python3 pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py $@
exec ${python} pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py $@

View file

@ -1,3 +1,10 @@
/*
To run:
nix-shell maintainers/scripts/update.nix
See https://nixos.org/manual/nixpkgs/unstable/#var-passthru-updateScript
*/
{ package ? null { package ? null
, maintainer ? null , maintainer ? null
, predicate ? null , predicate ? null
@ -8,8 +15,6 @@
, commit ? null , commit ? null
}: }:
# TODO: add assert statements
let let
pkgs = import ./../../default.nix ( pkgs = import ./../../default.nix (
if include-overlays == false then if include-overlays == false then

View file

@ -81,6 +81,7 @@ with lib.maintainers; {
# Verify additions to this team with at least one already existing member of the team. # Verify additions to this team with at least one already existing member of the team.
members = [ members = [
cdepillabout cdepillabout
wraithm
]; ];
scope = "Group registration for packages maintained by Bitnomial."; scope = "Group registration for packages maintained by Bitnomial.";
shortName = "Bitnomial employees"; shortName = "Bitnomial employees";

View file

@ -135,28 +135,32 @@ let
} }
''; '';
prepareManualFromMD = ''
cp -r --no-preserve=all $inputs/* .
substituteInPlace ./manual.md \
--replace '@NIXOS_VERSION@' "${version}"
substituteInPlace ./configuration/configuration.md \
--replace \
'@MODULE_CHAPTERS@' \
${lib.escapeShellArg (lib.concatMapStringsSep "\n" (p: "${p.value}") config.meta.doc)}
substituteInPlace ./nixos-options.md \
--replace \
'@NIXOS_OPTIONS_JSON@' \
${optionsDoc.optionsJSON}/share/doc/nixos/options.json
substituteInPlace ./development/writing-nixos-tests.section.md \
--replace \
'@NIXOS_TEST_OPTIONS_JSON@' \
${testOptionsDoc.optionsJSON}/share/doc/nixos/options.json
'';
manual-combined = runCommand "nixos-manual-combined" manual-combined = runCommand "nixos-manual-combined"
{ inputs = lib.sourceFilesBySuffices ./. [ ".xml" ".md" ]; { inputs = lib.sourceFilesBySuffices ./. [ ".xml" ".md" ];
nativeBuildInputs = [ pkgs.nixos-render-docs pkgs.libxml2.bin pkgs.libxslt.bin ]; nativeBuildInputs = [ pkgs.nixos-render-docs pkgs.libxml2.bin pkgs.libxslt.bin ];
meta.description = "The NixOS manual as plain docbook XML"; meta.description = "The NixOS manual as plain docbook XML";
} }
'' ''
cp -r --no-preserve=all $inputs/* . ${prepareManualFromMD}
substituteInPlace ./manual.md \
--replace '@NIXOS_VERSION@' "${version}"
substituteInPlace ./configuration/configuration.md \
--replace \
'@MODULE_CHAPTERS@' \
${lib.escapeShellArg (lib.concatMapStringsSep "\n" (p: "${p.value}") config.meta.doc)}
substituteInPlace ./nixos-options.md \
--replace \
'@NIXOS_OPTIONS_JSON@' \
${optionsDoc.optionsJSON}/share/doc/nixos/options.json
substituteInPlace ./development/writing-nixos-tests.section.md \
--replace \
'@NIXOS_TEST_OPTIONS_JSON@' \
${testOptionsDoc.optionsJSON}/share/doc/nixos/options.json
nixos-render-docs -j $NIX_BUILD_CORES manual docbook \ nixos-render-docs -j $NIX_BUILD_CORES manual docbook \
--manpage-urls ${manpageUrls} \ --manpage-urls ${manpageUrls} \
@ -193,7 +197,14 @@ in rec {
# Generate the NixOS manual. # Generate the NixOS manual.
manualHTML = runCommand "nixos-manual-html" manualHTML = runCommand "nixos-manual-html"
{ nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin ]; { nativeBuildInputs =
if allowDocBook then [
buildPackages.libxml2.bin
buildPackages.libxslt.bin
] else [
buildPackages.nixos-render-docs
];
inputs = lib.optionals (! allowDocBook) (lib.sourceFilesBySuffices ./. [ ".md" ]);
meta.description = "The NixOS manual in HTML format"; meta.description = "The NixOS manual in HTML format";
allowedReferences = ["out"]; allowedReferences = ["out"];
} }
@ -201,23 +212,44 @@ in rec {
# Generate the HTML manual. # Generate the HTML manual.
dst=$out/share/doc/nixos dst=$out/share/doc/nixos
mkdir -p $dst mkdir -p $dst
xsltproc \
${manualXsltprocOptions} \
--stringparam id.warnings "1" \
--nonet --output $dst/ \
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
${manual-combined}/manual-combined.xml \
|& tee xsltproc.out
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
rm xsltproc.out
mkdir -p $dst/images/callouts
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
cp ${../../../doc/style.css} $dst/style.css cp ${../../../doc/style.css} $dst/style.css
cp ${../../../doc/overrides.css} $dst/overrides.css cp ${../../../doc/overrides.css} $dst/overrides.css
cp -r ${pkgs.documentation-highlighter} $dst/highlightjs cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
${if allowDocBook then ''
xsltproc \
${manualXsltprocOptions} \
--stringparam id.warnings "1" \
--nonet --output $dst/ \
${docbook_xsl_ns}/xml/xsl/docbook/xhtml/chunktoc.xsl \
${manual-combined}/manual-combined.xml \
|& tee xsltproc.out
grep "^ID recommended on" xsltproc.out &>/dev/null && echo "error: some IDs are missing" && false
rm xsltproc.out
mkdir -p $dst/images/callouts
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
'' else ''
${prepareManualFromMD}
# TODO generator is set like this because the docbook/md manual compare workflow will
# trigger if it's different
nixos-render-docs -j $NIX_BUILD_CORES manual html \
--manpage-urls ${manpageUrls} \
--revision ${lib.escapeShellArg revision} \
--generator "DocBook XSL Stylesheets V${docbook_xsl_ns.version}" \
--stylesheet style.css \
--stylesheet overrides.css \
--stylesheet highlightjs/mono-blue.css \
--script ./highlightjs/highlight.pack.js \
--script ./highlightjs/loader.js \
--toc-depth 1 \
--chunk-toc-depth 1 \
./manual.md \
$dst/index.html
''}
mkdir -p $out/nix-support mkdir -p $out/nix-support
echo "nix-build out $out" >> $out/nix-support/hydra-build-products echo "nix-build out $out" >> $out/nix-support/hydra-build-products
echo "doc manual $dst" >> $out/nix-support/hydra-build-products echo "doc manual $dst" >> $out/nix-support/hydra-build-products

View file

@ -101,11 +101,24 @@ Creates an Option attribute set for an option that specifies the package a modul
**Note**: You shouldnt necessarily make package options for all of your modules. You can always overwrite a specific package throughout nixpkgs by using [nixpkgs overlays](https://nixos.org/manual/nixpkgs/stable/#chap-overlays). **Note**: You shouldnt necessarily make package options for all of your modules. You can always overwrite a specific package throughout nixpkgs by using [nixpkgs overlays](https://nixos.org/manual/nixpkgs/stable/#chap-overlays).
The default package is specified as a list of strings representing its attribute path in nixpkgs. Because of this, you need to pass nixpkgs itself as the first argument. The package is specified in the third argument under `default` as a list of strings
representing its attribute path in nixpkgs (or another package set).
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
The second argument is the name of the option, used in the description "The \<name\> package to use.". You can also pass an example value, either a literal string or a package's attribute path. The second argument may be either a string or a list of strings.
It provides the display name of the package in the description of the generated option
(using only the last element if the passed value is a list)
and serves as the fallback value for the `default` argument.
You can omit the default path if the name of the option is also attribute path in nixpkgs. To include extra information in the description, pass `extraDescription` to
append arbitrary text to the generated description.
You can also pass an `example` value, either a literal string or an attribute path.
The default argument can be omitted if the provided name is
an attribute of pkgs (if name is a string) or a
valid attribute path in pkgs (if name is a list).
If you wish to explicitly provide no default, pass `null` as `default`.
During the transition to CommonMark documentation `mkPackageOption` creates an option with a DocBook description attribute, once the transition is completed it will create a CommonMark description instead. `mkPackageOptionMD` always creates an option with a CommonMark description attribute and will be removed some time after the transition is completed. During the transition to CommonMark documentation `mkPackageOption` creates an option with a DocBook description attribute, once the transition is completed it will create a CommonMark description instead. `mkPackageOptionMD` always creates an option with a CommonMark description attribute and will be removed some time after the transition is completed.
@ -142,6 +155,21 @@ lib.mkOption {
``` ```
::: :::
::: {#ex-options-declarations-util-mkPackageOption-extraDescription .example}
```nix
mkPackageOption pkgs [ "python39Packages" "pytorch" ] {
extraDescription = "This is an example and doesn't actually do anything.";
}
# is like
lib.mkOption {
type = lib.types.package;
default = pkgs.python39Packages.pytorch;
defaultText = lib.literalExpression "pkgs.python39Packages.pytorch";
description = "The pytorch package to use. This is an example and doesn't actually do anything.";
}
```
:::
## Extensible Option Types {#sec-option-declarations-eot} ## Extensible Option Types {#sec-option-declarations-eot}
Extensible option types is a feature that allow to extend certain types Extensible option types is a feature that allow to extend certain types

View file

@ -428,7 +428,7 @@ Use the following commands:
UEFI systems UEFI systems
: You must select a boot-loader, either system-boot or GRUB. The recommended : You must select a boot-loader, either systemd-boot or GRUB. The recommended
option is systemd-boot: set the option [](#opt-boot.loader.systemd-boot.enable) option is systemd-boot: set the option [](#opt-boot.loader.systemd-boot.enable)
to `true`. `nixos-generate-config` should do this automatically to `true`. `nixos-generate-config` should do this automatically
for new configurations when booted in UEFI mode. for new configurations when booted in UEFI mode.
@ -441,10 +441,10 @@ Use the following commands:
If you want to use GRUB, set [](#opt-boot.loader.grub.device) to `nodev` and If you want to use GRUB, set [](#opt-boot.loader.grub.device) to `nodev` and
[](#opt-boot.loader.grub.efiSupport) to `true`. [](#opt-boot.loader.grub.efiSupport) to `true`.
With system-boot, you should not need any special configuration to detect With systemd-boot, you should not need any special configuration to detect
other installed systems. With GRUB, set [](#opt-boot.loader.grub.useOSProber) other installed systems. With GRUB, set [](#opt-boot.loader.grub.useOSProber)
to `true`, but this will only detect windows partitions, not other linux to `true`, but this will only detect windows partitions, not other Linux
distributions. If you dual boot another linux distribution, use system-boot distributions. If you dual boot another Linux distribution, use systemd-boot
instead. instead.
If you need to configure networking for your machine the If you need to configure networking for your machine the

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-build-vms 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-build-vms \&8 "NixOS System Manager's Manual"
.el .Dt nixos-build-vms 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-build-vms .Nm nixos-build-vms
.Nd build a network of virtual machines from a network of NixOS configurations .Nd build a network of virtual machines from a network of NixOS configurations

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-enter 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-enter \&8 "NixOS System Manager's Manual"
.el .Dt nixos-enter 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-enter .Nm nixos-enter
.Nd run a command in a NixOS chroot environment .Nd run a command in a NixOS chroot environment

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-generate-config 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-generate-config \&8 "NixOS System Manager's Manual"
.el .Dt nixos-generate-config 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-generate-config .Nm nixos-generate-config
.Nd generate NixOS configuration modules .Nd generate NixOS configuration modules

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-install 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-install \&8 "NixOS System Manager's Manual"
.el .Dt nixos-install 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-install .Nm nixos-install
.Nd install bootloader and NixOS .Nd install bootloader and NixOS

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-option 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-option \&8 "NixOS System Manager's Manual"
.el .Dt nixos-option 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-option .Nm nixos-option
.Nd inspect a NixOS configuration .Nd inspect a NixOS configuration

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs the OS in the title by default, taking it from .Dt nixos-rebuild 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-rebuild \&8 "NixOS System Manager's Manual"
.el .Dt nixos-rebuild 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-rebuild .Nm nixos-rebuild
.Nd reconfigure a NixOS machine .Nd reconfigure a NixOS machine

View file

@ -1,10 +1,6 @@
.Dd January 1, 1980 .Dd January 1, 1980
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from .Dt nixos-version 8
.\" doc-default-operating-system. mandoc doesn't have this register set by default, .Os
.\" so we can use it as a groff/mandoc switch.
.ie ddoc-default-operating-system .Dt nixos-version \&8 "NixOS System Manager's Manual"
.el .Dt nixos-version 8
.Os NixOS
.Sh NAME .Sh NAME
.Nm nixos-version .Nm nixos-version
.Nd show the NixOS version .Nd show the NixOS version

View file

@ -47,7 +47,10 @@ development/development.md
contributing-to-this-manual.chapter.md contributing-to-this-manual.chapter.md
``` ```
```{=include=} appendix ```{=include=} appendix html:into-file=//options.html
nixos-options.md nixos-options.md
```
```{=include=} appendix html:into-file=//release-notes.html
release-notes/release-notes.md release-notes/release-notes.md
``` ```

View file

@ -8,6 +8,10 @@ In addition to numerous new and upgraded packages, this release has the followin
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- Core version changes:
- default linux: 5.15 -\> 6.1, all supported kernels available
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed. - Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
- KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed. - KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed.
@ -78,6 +82,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories. - `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update. - The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services. - The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
@ -103,7 +109,7 @@ In addition to numerous new and upgraded packages, this release has the followin
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2. - The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
- `teleport` has been upgraded to major version 11. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and [release notes](https://goteleport.com/docs/changelog/#1100). - `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation. - The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
@ -115,6 +121,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name. - The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`. - Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`. - In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
@ -132,12 +140,18 @@ In addition to numerous new and upgraded packages, this release has the followin
[upstream's release notes](https://github.com/iputils/iputils/releases/tag/20221126) [upstream's release notes](https://github.com/iputils/iputils/releases/tag/20221126)
for more details and available replacements. for more details and available replacements.
- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use.
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
## Other Notable Changes {#sec-release-23.05-notable-changes} ## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. --> <!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc). - `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
- Pantheon now defaults to Mutter 42 and GNOME settings daemon 42, all Pantheon packages are now tracking elementary OS 7 updates.
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules) - The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
- The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package) - The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package)
@ -162,6 +176,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- NixOS now defaults to using nsncd (a non-caching reimplementation in Rust) as NSS lookup dispatcher, instead of the buggy and deprecated glibc-provided nscd. If you need to switch back, set `services.nscd.enableNsncd = false`, but please open an issue in nixpkgs so your issue can be fixed. - NixOS now defaults to using nsncd (a non-caching reimplementation in Rust) as NSS lookup dispatcher, instead of the buggy and deprecated glibc-provided nscd. If you need to switch back, set `services.nscd.enableNsncd = false`, but please open an issue in nixpkgs so your issue can be fixed.
- `services.borgmatic` now allows for multiple configurations, placed in `/etc/borgmatic.d/`, you can define them with `services.borgmatic.configurations`.
- The `dnsmasq` service now takes configuration via the - The `dnsmasq` service now takes configuration via the
`services.dnsmasq.settings` attribute set. The option `services.dnsmasq.settings` attribute set. The option
`services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches `services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches
@ -255,8 +271,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `unifi-poller` package and corresponding NixOS module have been renamed to `unpoller` to match upstream. - The `unifi-poller` package and corresponding NixOS module have been renamed to `unpoller` to match upstream.
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
- The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting. - The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting.
- `openjdk` from version 11 and above is not build with `openjfx` (i.e.: JavaFX) support by default anymore. You can re-enable it by overriding, e.g.: `openjdk11.override { enableJavaFX = true; };`.
- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package. - [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
- `tvbrowser-bin` was removed, and now `tvbrowser` is built from source. - `tvbrowser-bin` was removed, and now `tvbrowser` is built from source.
@ -266,3 +286,5 @@ In addition to numerous new and upgraded packages, this release has the followin
- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path. - The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.
- The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed. - The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed.
- `k3s` can now be configured with an EnvironmentFile for its systemd service, allowing secrets to be provided without ending up in the Nix Store.

View file

@ -154,6 +154,9 @@ To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$imag
, # Shell code executed after the VM has finished. , # Shell code executed after the VM has finished.
postVM ? "" postVM ? ""
, # Guest memory size
memSize ? 1024
, # Copy the contents of the Nix store to the root of the image and , # Copy the contents of the Nix store to the root of the image and
# skip further setup. Incompatible with `contents`, # skip further setup. Incompatible with `contents`,
# `installBootLoader` and `configFile`. # `installBootLoader` and `configFile`.
@ -525,7 +528,7 @@ let format' = format; in let
"-drive if=pflash,format=raw,unit=1,file=$efiVars" "-drive if=pflash,format=raw,unit=1,file=$efiVars"
] ]
); );
memSize = 1024; inherit memSize;
} '' } ''
export PATH=${binPath}:$PATH export PATH=${binPath}:$PATH

View file

@ -73,6 +73,9 @@
, # Shell code executed after the VM has finished. , # Shell code executed after the VM has finished.
postVM ? "" postVM ? ""
, # Guest memory size
memSize ? 1024
, name ? "nixos-disk-image" , name ? "nixos-disk-image"
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw. , # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
@ -242,6 +245,7 @@ let
{ {
QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report" QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report"
+ " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report"; + " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
inherit memSize;
preVM = '' preVM = ''
PATH=$PATH:${pkgs.qemu_kvm}/bin PATH=$PATH:${pkgs.qemu_kvm}/bin
mkdir $out mkdir $out

View file

@ -215,10 +215,12 @@ foreach my $u (@{$spec->{users}}) {
} else { } else {
$u->{uid} = allocUid($name, $u->{isSystemUser}) if !defined $u->{uid}; $u->{uid} = allocUid($name, $u->{isSystemUser}) if !defined $u->{uid};
if (defined $u->{initialPassword}) { if (!defined $u->{hashedPassword}) {
$u->{hashedPassword} = hashPassword($u->{initialPassword}); if (defined $u->{initialPassword}) {
} elsif (defined $u->{initialHashedPassword}) { $u->{hashedPassword} = hashPassword($u->{initialPassword});
$u->{hashedPassword} = $u->{initialHashedPassword}; } elsif (defined $u->{initialHashedPassword}) {
$u->{hashedPassword} = $u->{initialHashedPassword};
}
} }
} }

View file

@ -273,6 +273,9 @@ let
{command}`passwd` command. Otherwise, it's {command}`passwd` command. Otherwise, it's
equivalent to setting the {option}`hashedPassword` option. equivalent to setting the {option}`hashedPassword` option.
Note that the {option}`hashedPassword` option will override
this option if both are set.
${hashedPasswordDescription} ${hashedPasswordDescription}
''; '';
}; };
@ -291,6 +294,9 @@ let
is world-readable in the Nix store, so it should only be is world-readable in the Nix store, so it should only be
used for guest accounts or passwords that will be changed used for guest accounts or passwords that will be changed
promptly. promptly.
Note that the {option}`password` option will override this
option if both are set.
''; '';
}; };

View file

@ -0,0 +1,16 @@
{ config, lib, pkgs, ... }:
let
cfg = config.hardware.keyboard.qmk;
inherit (lib) mdDoc mkEnableOption mkIf;
in
{
options.hardware.keyboard.qmk = {
enable = mkEnableOption (mdDoc "non-root access to the firmware of QMK keyboards");
};
config = mkIf cfg.enable {
services.udev.packages = [ pkgs.qmk-udev-rules ];
};
}

View file

@ -1,16 +1,16 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
cfg = config.hardware.keyboard.teck; cfg = config.hardware.keyboard.teck;
inherit (lib) mdDoc mkEnableOption mkIf;
in in
{ {
options.hardware.keyboard.teck = { options.hardware.keyboard.teck = {
enable = mkEnableOption (lib.mdDoc "non-root access to the firmware of TECK keyboards"); enable = mkEnableOption (mdDoc "non-root access to the firmware of TECK keyboards");
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {
services.udev.packages = [ pkgs.teck-udev-rules ]; services.udev.packages = [ pkgs.teck-udev-rules ];
}; };
} }

View file

@ -1,13 +1,14 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib;
let let
cfg = config.hardware.keyboard.uhk; cfg = config.hardware.keyboard.uhk;
inherit (lib) mdDoc mkEnableOption mkIf;
in in
{ {
options.hardware.keyboard.uhk = { options.hardware.keyboard.uhk = {
enable = mkEnableOption (lib.mdDoc '' enable = mkEnableOption (mdDoc ''
non-root access to the firmware of UHK keyboards. non-root access to the firmware of UHK keyboards.
You need it when you want to flash a new firmware on the keyboard. You need it when you want to flash a new firmware on the keyboard.
Access to the keyboard is granted to users in the "input" group. Access to the keyboard is granted to users in the "input" group.
You may want to install the uhk-agent package. You may want to install the uhk-agent package.

View file

@ -1,21 +1,18 @@
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
let let
inherit (lib) mkOption mkIf types;
cfg = config.hardware.keyboard.zsa; cfg = config.hardware.keyboard.zsa;
inherit (lib) mkEnableOption mkIf mdDoc;
in in
{ {
options.hardware.keyboard.zsa = { options.hardware.keyboard.zsa = {
enable = mkOption { enable = mkEnableOption (mdDoc ''
type = types.bool; udev rules for keyboards from ZSA like the ErgoDox EZ, Planck EZ and Moonlander Mark I.
default = false; You need it when you want to flash a new configuration on the keyboard
description = lib.mdDoc '' or use their live training in the browser.
Enables udev rules for keyboards from ZSA like the ErgoDox EZ, Planck EZ and Moonlander Mark I. You may want to install the wally-cli package.
You need it when you want to flash a new configuration on the keyboard '');
or use their live training in the browser.
You may want to install the wally-cli package.
'';
};
}; };
config = mkIf cfg.enable { config = mkIf cfg.enable {

View file

@ -461,7 +461,9 @@ in
# If requested enable modesetting via kernel parameter. # If requested enable modesetting via kernel parameter.
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1" boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1" ++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"; ++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
# proprietary driver is not compiled with support for X86_KERNEL_IBT
++ optional (!cfg.open && config.boot.kernelPackages.kernel.kernelAtLeast "6.2") "ibt=off";
services.udev.extraRules = services.udev.extraRules =
'' ''

View file

@ -0,0 +1,49 @@
# To build, use:
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-powerpc64le.nix -A config.system.build.sdImage
{ config, lib, pkgs, ... }:
{
imports = [
../../profiles/base.nix
../../profiles/installation-device.nix
./sd-image.nix
];
boot.loader = {
# powerpc64le-linux typically uses petitboot
grub.enable = false;
generic-extlinux-compatible = {
# petitboot is not does not support all of the extlinux extensions to
# syslinux, but its parser is very forgiving; it essentially ignores
# whatever it doesn't understand. See below for a filename adjustment.
enable = true;
};
};
boot.consoleLogLevel = lib.mkDefault 7;
boot.kernelParams = [ "console=hvc0" ];
sdImage = {
populateFirmwareCommands = "";
populateRootCommands = ''
mkdir -p ./files/boot
${config.boot.loader.generic-extlinux-compatible.populateCmd} \
-c ${config.system.build.toplevel} \
-d ./files/boot
''
# https://github.com/open-power/petitboot/blob/master/discover/syslinux-parser.c
# petitboot will look in these paths (plus all-caps versions of them):
# /boot/syslinux/syslinux.cfg
# /syslinux/syslinux.cfg
# /syslinux.cfg
+ ''
mv ./files/boot/extlinux ./files/boot/syslinux
mv ./files/boot/syslinux/extlinux.conf ./files/boot/syslinux/syslinux.cfg
''
# petitboot does not support relative paths for LINUX or INITRD; it prepends
# a `/` when parsing these fields
+ ''
sed -i 's_^\(\W\W*\(INITRD\|initrd\|LINUX\|linux\)\W\)\.\./_\1/boot/_' ./files/boot/syslinux/syslinux.cfg
'';
};
}

View file

@ -1,7 +1,7 @@
{ {
x86_64-linux = "/nix/store/lsr79q5xqd9dv97wn87x12kzax8s8i1s-nix-2.13.2"; x86_64-linux = "/nix/store/mc43d38fibi94pp5crfwacl5gbslccd0-nix-2.13.3";
i686-linux = "/nix/store/wky9xjwiwzpifgk0s3f2nrg8nr67bi7x-nix-2.13.2"; i686-linux = "/nix/store/09m966pj26cgd4ihlg8ihl1106j3vih8-nix-2.13.3";
aarch64-linux = "/nix/store/v8drr3x1ia6bdr8y4vl79mlz61xynrpm-nix-2.13.2"; aarch64-linux = "/nix/store/7f191d125akld27gc6jl0r13l8pl7x0h-nix-2.13.3";
x86_64-darwin = "/nix/store/1l14si31p4aw7c1gwgjy0nq55k38j9nj-nix-2.13.2"; x86_64-darwin = "/nix/store/1wn9jkvi2zqfjnjgg7lnp30r2q2y8whd-nix-2.13.3";
aarch64-darwin = "/nix/store/6x7nr1r780fgn254zhkwhih3f3i8cr45-nix-2.13.2"; aarch64-darwin = "/nix/store/8w0v2mffa10chrf1h66cbvbpw86qmh85-nix-2.13.3";
} }

View file

@ -392,7 +392,7 @@ in
tape = 25; tape = 25;
video = 26; video = 26;
dialout = 27; dialout = 27;
polkituser = 28; #polkituser = 28; # currently unused, polkitd doesn't need a group
utmp = 29; utmp = 29;
# ddclient = 30; # converted to DynamicUser = true # ddclient = 30; # converted to DynamicUser = true
davfs2 = 31; davfs2 = 31;
@ -510,7 +510,6 @@ in
#seeks = 148; # removed 2020-06-21 #seeks = 148; # removed 2020-06-21
prosody = 149; prosody = 149;
i2pd = 150; i2pd = 150;
systemd-coredump = 151;
systemd-network = 152; systemd-network = 152;
systemd-resolve = 153; systemd-resolve = 153;
systemd-timesync = 154; systemd-timesync = 154;

View file

@ -59,6 +59,7 @@
./hardware/gpgsmartcards.nix ./hardware/gpgsmartcards.nix
./hardware/hackrf.nix ./hardware/hackrf.nix
./hardware/i2c.nix ./hardware/i2c.nix
./hardware/keyboard/qmk.nix
./hardware/keyboard/teck.nix ./hardware/keyboard/teck.nix
./hardware/keyboard/uhk.nix ./hardware/keyboard/uhk.nix
./hardware/keyboard/zsa.nix ./hardware/keyboard/zsa.nix
@ -1052,6 +1053,7 @@
./services/search/kibana.nix ./services/search/kibana.nix
./services/search/meilisearch.nix ./services/search/meilisearch.nix
./services/search/opensearch.nix ./services/search/opensearch.nix
./services/search/qdrant.nix
./services/search/solr.nix ./services/search/solr.nix
./services/security/aesmd.nix ./services/security/aesmd.nix
./services/security/certmgr.nix ./services/security/certmgr.nix
@ -1167,6 +1169,7 @@
./services/web-apps/moodle.nix ./services/web-apps/moodle.nix
./services/web-apps/netbox.nix ./services/web-apps/netbox.nix
./services/web-apps/nextcloud.nix ./services/web-apps/nextcloud.nix
./services/web-apps/nextcloud-notify_push.nix
./services/web-apps/nexus.nix ./services/web-apps/nexus.nix
./services/web-apps/nifi.nix ./services/web-apps/nifi.nix
./services/web-apps/node-red.nix ./services/web-apps/node-red.nix

View file

@ -113,7 +113,7 @@ in
group = "polkituser"; group = "polkituser";
}; };
users.groups.polkituser.gid = config.ids.gids.polkituser; users.groups.polkituser = {};
}; };
} }

View file

@ -5,44 +5,58 @@ with lib;
let let
cfg = config.services.borgmatic; cfg = config.services.borgmatic;
settingsFormat = pkgs.formats.yaml { }; settingsFormat = pkgs.formats.yaml { };
cfgType = with types; submodule {
freeformType = settingsFormat.type;
options.location = {
source_directories = mkOption {
type = listOf str;
description = mdDoc ''
List of source directories to backup (required). Globs and
tildes are expanded.
'';
example = [ "/home" "/etc" "/var/log/syslog*" ];
};
repositories = mkOption {
type = listOf str;
description = mdDoc ''
Paths to local or remote repositories (required). Tildes are
expanded. Multiple repositories are backed up to in
sequence. Borg placeholders can be used. See the output of
"borg help placeholders" for details. See ssh_command for
SSH options like identity file or port. If systemd service
is used, then add local repository paths in the systemd
service file to the ReadWritePaths list.
'';
example = [
"ssh://user@backupserver/./sourcehostname.borg"
"ssh://user@backupserver/./{fqdn}"
"/var/local/backups/local.borg"
];
};
};
};
cfgfile = settingsFormat.generate "config.yaml" cfg.settings; cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
in { in
{
options.services.borgmatic = { options.services.borgmatic = {
enable = mkEnableOption (lib.mdDoc "borgmatic"); enable = mkEnableOption (mdDoc "borgmatic");
settings = mkOption { settings = mkOption {
description = lib.mdDoc '' description = mdDoc ''
See https://torsion.org/borgmatic/docs/reference/configuration/ See https://torsion.org/borgmatic/docs/reference/configuration/
''; '';
type = types.submodule { default = null;
freeformType = settingsFormat.type; type = types.nullOr cfgType;
options.location = { };
source_directories = mkOption {
type = types.listOf types.str; configurations = mkOption {
description = lib.mdDoc '' description = mdDoc ''
List of source directories to backup (required). Globs and Set of borgmatic configurations, see https://torsion.org/borgmatic/docs/reference/configuration/
tildes are expanded. '';
''; default = { };
example = [ "/home" "/etc" "/var/log/syslog*" ]; type = types.attrsOf cfgType;
};
repositories = mkOption {
type = types.listOf types.str;
description = lib.mdDoc ''
Paths to local or remote repositories (required). Tildes are
expanded. Multiple repositories are backed up to in
sequence. Borg placeholders can be used. See the output of
"borg help placeholders" for details. See ssh_command for
SSH options like identity file or port. If systemd service
is used, then add local repository paths in the systemd
service file to the ReadWritePaths list.
'';
example = [
"user@backupserver:sourcehostname.borg"
"user@backupserver:{fqdn}"
];
};
};
};
}; };
}; };
@ -50,9 +64,13 @@ in {
environment.systemPackages = [ pkgs.borgmatic ]; environment.systemPackages = [ pkgs.borgmatic ];
environment.etc."borgmatic/config.yaml".source = cfgfile; environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //
mapAttrs'
(name: value: nameValuePair
"borgmatic.d/${name}.yaml"
{ source = settingsFormat.generate "${name}.yaml" value; })
cfg.configurations;
systemd.packages = [ pkgs.borgmatic ]; systemd.packages = [ pkgs.borgmatic ];
}; };
} }

View file

@ -106,6 +106,14 @@ in
description = lib.mdDoc "Only run the server. This option only makes sense for a server."; description = lib.mdDoc "Only run the server. This option only makes sense for a server.";
}; };
environmentFile = mkOption {
type = types.nullOr types.path;
description = lib.mdDoc ''
File path containing environment variables for configuring the k3s service in the format of an EnvironmentFile. See systemd.exec(5).
'';
default = null;
};
configPath = mkOption { configPath = mkOption {
type = types.nullOr types.path; type = types.nullOr types.path;
default = null; default = null;
@ -154,6 +162,7 @@ in
LimitNPROC = "infinity"; LimitNPROC = "infinity";
LimitCORE = "infinity"; LimitCORE = "infinity";
TasksMax = "infinity"; TasksMax = "infinity";
EnvironmentFile = cfg.environmentFile;
ExecStart = concatStringsSep " \\\n " ( ExecStart = concatStringsSep " \\\n " (
[ [
"${cfg.package}/bin/k3s ${cfg.role}" "${cfg.package}/bin/k3s ${cfg.role}"

View file

@ -41,17 +41,42 @@ with lib;
tokenFile = mkOption { tokenFile = mkOption {
type = types.path; type = types.path;
description = lib.mdDoc '' description = lib.mdDoc ''
The full path to a file which contains either a runner registration token or a The full path to a file which contains either
(fine-grained) personal access token (PAT).
The file should contain exactly one line with the token without any newline.
If a registration token is given, it can be used to re-register a runner of the same
name but is time-limited. If the file contains a PAT, the service creates a new
registration token on startup as needed. Make sure the PAT has a scope of
`admin:org` for organization-wide registrations or a scope of
`repo` for a single repository. Fine-grained PATs need read and write permission
to the "Administration" resources.
Changing this option or the file's content triggers a new runner registration. * a fine-grained personal access token (PAT),
* a classic PAT
* or a runner registration token
Changing this option or the `tokenFile`s content triggers a new runner registration.
We suggest using the fine-grained PATs. A runner registration token is valid
only for 1 hour after creation, so the next time the runner configuration changes
this will give you hard-to-debug HTTP 404 errors in the configure step.
The file should contain exactly one line with the token without any newline.
(Use `echo -n 'token' > token file` to make sure no newlines sneak in.)
If the file contains a PAT, the service creates a new registration token
on startup as needed.
If a registration token is given, it can be used to re-register a runner of the same
name but is time-limited as noted above.
For fine-grained PATs:
Give it "Read and Write access to organization/repository self hosted runners",
depending on whether it is organization wide or per-repository. You might have to
experiment a little, fine-grained PATs are a `beta` Github feature and still subject
to change; nonetheless they are the best option at the moment.
For classic PATs:
Make sure the PAT has a scope of `admin:org` for organization-wide registrations
or a scope of `repo` for a single repository.
For runner registration tokens:
Nothing special needs to be done, but updating will break after one hour,
so these are not recommended.
''; '';
example = "/run/secrets/github-runner/nixos.token"; example = "/run/secrets/github-runner/nixos.token";
}; };

View file

@ -124,6 +124,8 @@ in
# The state directory is entirely empty which indicates a first start # The state directory is entirely empty which indicates a first start
copy_tokens copy_tokens
fi fi
# Always clean workDir
find -H "$WORK_DIRECTORY" -mindepth 1 -delete
''; '';
configureRunner = writeScript "configure" '' configureRunner = writeScript "configure" ''
if [[ -e "${newConfigTokenPath}" ]]; then if [[ -e "${newConfigTokenPath}" ]]; then
@ -159,9 +161,6 @@ in
fi fi
''; '';
setupWorkDir = writeScript "setup-work-dirs" '' setupWorkDir = writeScript "setup-work-dirs" ''
# Cleanup previous service
${pkgs.findutils}/bin/find -H "$WORK_DIRECTORY" -mindepth 1 -delete
# Link _diag dir # Link _diag dir
ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag" ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag"

View file

@ -577,7 +577,7 @@ in {
}; };
}; };
# Enable periodic clear-docker-cache script # Enable periodic clear-docker-cache script
systemd.services.gitlab-runner-clear-docker-cache = { systemd.services.gitlab-runner-clear-docker-cache = mkIf (cfg.clear-docker-cache.enable && (any (s: s.executor == "docker") (attrValues cfg.services))) {
description = "Prune gitlab-runner docker resources"; description = "Prune gitlab-runner docker resources";
restartIfChanged = false; restartIfChanged = false;
unitConfig.X-StopOnRemoval = false; unitConfig.X-StopOnRemoval = false;
@ -590,7 +590,7 @@ in {
${pkgs.gitlab-runner}/bin/clear-docker-cache ${toString cfg.clear-docker-cache.flags} ${pkgs.gitlab-runner}/bin/clear-docker-cache ${toString cfg.clear-docker-cache.flags}
''; '';
startAt = optional cfg.clear-docker-cache.enable cfg.clear-docker-cache.dates; startAt = cfg.clear-docker-cache.dates;
}; };
# Enable docker if `docker` executor is used in any service # Enable docker if `docker` executor is used in any service
virtualisation.docker.enable = mkIf ( virtualisation.docker.enable = mkIf (

View file

@ -0,0 +1,38 @@
{
"context.properties": {},
"context.modules": [
{
"name": "libpipewire-module-rt",
"args": {
"nice.level": -11
},
"flags": [
"ifexists",
"nofail"
]
},
{
"name": "libpipewire-module-protocol-native"
},
{
"name": "libpipewire-module-client-node"
},
{
"name": "libpipewire-module-adapter"
},
{
"name": "libpipewire-module-rtp-source",
"args": {
"sap.ip": "239.255.255.255",
"sap.port": 9875,
"sess.latency.msec": 10,
"local.ifname": "eth0",
"stream.props": {
"media.class": "Audio/Source",
"node.virtual": false,
"device.api": "aes67"
}
}
}
]
}

View file

@ -3,10 +3,10 @@
"link.max-buffers": 16, "link.max-buffers": 16,
"core.daemon": true, "core.daemon": true,
"core.name": "pipewire-0", "core.name": "pipewire-0",
"default.clock.min-quantum": 16,
"vm.overrides": { "vm.overrides": {
"default.clock.min-quantum": 1024 "default.clock.min-quantum": 1024
} },
"module.x11.bell": true
}, },
"context.spa-libs": { "context.spa-libs": {
"audio.convert.*": "audioconvert/libspa-audioconvert", "audio.convert.*": "audioconvert/libspa-audioconvert",
@ -77,6 +77,11 @@
"flags": [ "flags": [
"ifexists", "ifexists",
"nofail" "nofail"
],
"condition": [
{
"module.x11.bell": true
}
] ]
} }
], ],

View file

@ -809,7 +809,7 @@ in
// optionalAttrs (cfg.relayHost != "") { relayhost = if cfg.lookupMX // optionalAttrs (cfg.relayHost != "") { relayhost = if cfg.lookupMX
then "${cfg.relayHost}:${toString cfg.relayPort}" then "${cfg.relayHost}:${toString cfg.relayPort}"
else "[${cfg.relayHost}]:${toString cfg.relayPort}"; } else "[${cfg.relayHost}]:${toString cfg.relayPort}"; }
// optionalAttrs config.networking.enableIPv6 { inet_protocols = mkDefault "all"; } // optionalAttrs (!config.networking.enableIPv6) { inet_protocols = mkDefault "ipv4"; }
// optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; } // optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; }
// optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; } // optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; }
// optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; } // optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; }

View file

@ -288,11 +288,11 @@ in
LimitNOFILE = 65535; LimitNOFILE = 65535;
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile; EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
LoadCredential = cfg.loadCredential; LoadCredential = cfg.loadCredential;
ExecStartPre = '' ExecStartPre = [''
${pkgs.envsubst}/bin/envsubst \ ${pkgs.envsubst}/bin/envsubst \
-i ${configurationYaml} \ -i ${configurationYaml} \
-o /run/dendrite/dendrite.yaml -o /run/dendrite/dendrite.yaml
''; ''];
ExecStart = lib.strings.concatStringsSep " " ([ ExecStart = lib.strings.concatStringsSep " " ([
"${pkgs.dendrite}/bin/dendrite-monolith-server" "${pkgs.dendrite}/bin/dendrite-monolith-server"
"--config /run/dendrite/dendrite.yaml" "--config /run/dendrite/dendrite.yaml"

View file

@ -31,7 +31,7 @@ let
"m.homeserver".base_url = "https://${fqdn}"; "m.homeserver".base_url = "https://${fqdn}";
"m.identity_server" = {}; "m.identity_server" = {};
}; };
serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443"; serverConfig."m.server" = "${fqdn}:443";
mkWellKnown = data: '' mkWellKnown = data: ''
add_header Content-Type application/json; add_header Content-Type application/json;
add_header Access-Control-Allow-Origin *; add_header Access-Control-Allow-Origin *;

View file

@ -5,7 +5,7 @@ with lib;
let let
cfg = config.services.gitea; cfg = config.services.gitea;
opt = options.services.gitea; opt = options.services.gitea;
gitea = cfg.package; exe = lib.getExe cfg.package;
pg = config.services.postgresql; pg = config.services.postgresql;
useMysql = cfg.database.type == "mysql"; useMysql = cfg.database.type == "mysql";
usePostgresql = cfg.database.type == "postgres"; usePostgresql = cfg.database.type == "postgres";
@ -248,7 +248,7 @@ in
staticRootPath = mkOption { staticRootPath = mkOption {
type = types.either types.str types.path; type = types.either types.str types.path;
default = gitea.data; default = cfg.package.data;
defaultText = literalExpression "package.data"; defaultText = literalExpression "package.data";
example = "/var/lib/gitea/data"; example = "/var/lib/gitea/data";
description = lib.mdDoc "Upper level of template and static files path."; description = lib.mdDoc "Upper level of template and static files path.";
@ -481,14 +481,14 @@ in
# If we have a folder or symlink with gitea locales, remove it # If we have a folder or symlink with gitea locales, remove it
# And symlink the current gitea locales in place # And symlink the current gitea locales in place
"L+ '${cfg.stateDir}/conf/locale' - - - - ${gitea.out}/locale" "L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
]; ];
systemd.services.gitea = { systemd.services.gitea = {
description = "gitea"; description = "gitea";
after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service"; after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
path = [ gitea pkgs.git pkgs.gnupg ]; path = [ cfg.package pkgs.git pkgs.gnupg ];
# In older versions the secret naming for JWT was kind of confusing. # In older versions the secret naming for JWT was kind of confusing.
# The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET # The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
@ -512,7 +512,7 @@ in
cp -f ${configFile} ${runConfig} cp -f ${configFile} ${runConfig}
if [ ! -s ${secretKey} ]; then if [ ! -s ${secretKey} ]; then
${gitea}/bin/gitea generate secret SECRET_KEY > ${secretKey} ${exe} generate secret SECRET_KEY > ${secretKey}
fi fi
# Migrate LFS_JWT_SECRET filename # Migrate LFS_JWT_SECRET filename
@ -521,15 +521,15 @@ in
fi fi
if [ ! -s ${oauth2JwtSecret} ]; then if [ ! -s ${oauth2JwtSecret} ]; then
${gitea}/bin/gitea generate secret JWT_SECRET > ${oauth2JwtSecret} ${exe} generate secret JWT_SECRET > ${oauth2JwtSecret}
fi fi
if [ ! -s ${lfsJwtSecret} ]; then if [ ! -s ${lfsJwtSecret} ]; then
${gitea}/bin/gitea generate secret LFS_JWT_SECRET > ${lfsJwtSecret} ${exe} generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
fi fi
if [ ! -s ${internalToken} ]; then if [ ! -s ${internalToken} ]; then
${gitea}/bin/gitea generate secret INTERNAL_TOKEN > ${internalToken} ${exe} generate secret INTERNAL_TOKEN > ${internalToken}
fi fi
chmod u+w '${runConfig}' chmod u+w '${runConfig}'
@ -548,15 +548,15 @@ in
''} ''}
# run migrations/init the database # run migrations/init the database
${gitea}/bin/gitea migrate ${exe} migrate
# update all hooks' binary paths # update all hooks' binary paths
${gitea}/bin/gitea admin regenerate hooks ${exe} admin regenerate hooks
# update command option in authorized_keys # update command option in authorized_keys
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ] if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
then then
${gitea}/bin/gitea admin regenerate keys ${exe} admin regenerate keys
fi fi
''; '';
@ -565,7 +565,7 @@ in
User = cfg.user; User = cfg.user;
Group = "gitea"; Group = "gitea";
WorkingDirectory = cfg.stateDir; WorkingDirectory = cfg.stateDir;
ExecStart = "${gitea}/bin/gitea web --pid /run/gitea/gitea.pid"; ExecStart = "${exe} web --pid /run/gitea/gitea.pid";
Restart = "always"; Restart = "always";
# Runtime directory and mode # Runtime directory and mode
RuntimeDirectory = "gitea"; RuntimeDirectory = "gitea";
@ -597,7 +597,7 @@ in
PrivateMounts = true; PrivateMounts = true;
# System Call Filtering # System Call Filtering
SystemCallArchitectures = "native"; SystemCallArchitectures = "native";
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @reboot @setuid @swap"; SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
}; };
environment = { environment = {
@ -635,7 +635,7 @@ in
systemd.services.gitea-dump = mkIf cfg.dump.enable { systemd.services.gitea-dump = mkIf cfg.dump.enable {
description = "gitea dump"; description = "gitea dump";
after = [ "gitea.service" ]; after = [ "gitea.service" ];
path = [ gitea ]; path = [ cfg.package ];
environment = { environment = {
USER = cfg.user; USER = cfg.user;
@ -646,7 +646,7 @@ in
serviceConfig = { serviceConfig = {
Type = "oneshot"; Type = "oneshot";
User = cfg.user; User = cfg.user;
ExecStart = "${gitea}/bin/gitea dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}"; ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
WorkingDirectory = cfg.dump.backupDir; WorkingDirectory = cfg.dump.backupDir;
}; };
}; };
@ -658,5 +658,5 @@ in
timerConfig.OnCalendar = cfg.dump.interval; timerConfig.OnCalendar = cfg.dump.interval;
}; };
}; };
meta.maintainers = with lib.maintainers; [ srhb ma27 ]; meta.maintainers = with lib.maintainers; [ srhb ma27 thehedgeh0g ];
} }

View file

@ -135,7 +135,7 @@ in
} }
{ {
assertion = (cfg.configFile != null) != (cfg.settings != null); assertion = (cfg.configFile != null) != (cfg.settings != null);
message = "You need to either specify services.klipper.settings or services.klipper.defaultConfig."; message = "You need to either specify services.klipper.settings or services.klipper.configFile.";
} }
]; ];

View file

@ -6,6 +6,7 @@ let
pkg = cfg.package; pkg = cfg.package;
defaultUser = "paperless"; defaultUser = "paperless";
nltkDir = "/var/cache/paperless/nltk";
# Don't start a redis instance if the user sets a custom redis connection # Don't start a redis instance if the user sets a custom redis connection
enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig; enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
@ -15,6 +16,7 @@ let
PAPERLESS_DATA_DIR = cfg.dataDir; PAPERLESS_DATA_DIR = cfg.dataDir;
PAPERLESS_MEDIA_ROOT = cfg.mediaDir; PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir; PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
PAPERLESS_NLTK_DIR = nltkDir;
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}"; GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
} // optionalAttrs (config.time.timeZone != null) { } // optionalAttrs (config.time.timeZone != null) {
PAPERLESS_TIME_ZONE = config.time.timeZone; PAPERLESS_TIME_ZONE = config.time.timeZone;
@ -24,12 +26,14 @@ let
lib.mapAttrs (_: toString) cfg.extraConfig lib.mapAttrs (_: toString) cfg.extraConfig
); );
manage = let manage =
setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env); let
in pkgs.writeShellScript "manage" '' setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env);
${setupEnv} in
exec ${pkg}/bin/paperless-ngx "$@" pkgs.writeShellScript "manage" ''
''; ${setupEnv}
exec ${pkg}/bin/paperless-ngx "$@"
'';
# Secure the services # Secure the services
defaultServiceConfig = { defaultServiceConfig = {
@ -47,6 +51,7 @@ let
cfg.dataDir cfg.dataDir
cfg.mediaDir cfg.mediaDir
]; ];
CacheDirectory = "paperless";
CapabilityBoundingSet = ""; CapabilityBoundingSet = "";
# ProtectClock adds DeviceAllow=char-rtc r # ProtectClock adds DeviceAllow=char-rtc r
DeviceAllow = ""; DeviceAllow = "";
@ -170,7 +175,7 @@ in
extraConfig = mkOption { extraConfig = mkOption {
type = types.attrs; type = types.attrs;
default = {}; default = { };
description = lib.mdDoc '' description = lib.mdDoc ''
Extra paperless config options. Extra paperless config options.
@ -291,6 +296,33 @@ in
}; };
}; };
# Download NLTK corpus data
systemd.services.paperless-download-nltk-data = {
wantedBy = [ "paperless-scheduler.service" ];
before = [ "paperless-scheduler.service" ];
after = [ "network-online.target" ];
serviceConfig = defaultServiceConfig // {
User = cfg.user;
Type = "oneshot";
# Enable internet access
PrivateNetwork = false;
# Restrict write access
BindPaths = [];
BindReadOnlyPaths = [
"/nix/store"
"-/etc/resolv.conf"
"-/etc/nsswitch.conf"
"-/etc/ssl/certs"
"-/etc/static/ssl/certs"
"-/etc/hosts"
"-/etc/localtime"
];
ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in ''
${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords
'';
};
};
systemd.services.paperless-consumer = { systemd.services.paperless-consumer = {
description = "Paperless document consumer"; description = "Paperless document consumer";
# Bind to `paperless-scheduler` so that the consumer never runs # Bind to `paperless-scheduler` so that the consumer never runs

View file

@ -269,6 +269,10 @@ in
assertion = cfg.filterForward -> config.networking.nftables.enable; assertion = cfg.filterForward -> config.networking.nftables.enable;
message = "filterForward only works with the nftables based firewall"; message = "filterForward only works with the nftables based firewall";
} }
{
assertion = cfg.autoLoadConntrackHelpers -> lib.versionOlder config.boot.kernelPackages.kernel.version "6";
message = "conntrack helper autoloading has been removed from kernel 6.0 and newer";
}
]; ];
networking.firewall.trustedInterfaces = [ "lo" ]; networking.firewall.trustedInterfaces = [ "lo" ];

View file

@ -299,17 +299,51 @@ in {
''; '';
}; };
domain_map = mkOption { scope = mkOption {
type = types.attrsOf types.str; type = types.listOf types.str;
default = {}; default = ["openid" "profile" "email"];
description = lib.mdDoc '' description = lib.mdDoc ''
Domain map is used to map incomming users (by their email) to Scopes used in the OIDC flow.
a namespace. The key can be a string, or regex. '';
};
extra_params = mkOption {
type = types.attrsOf types.str;
default = { };
description = lib.mdDoc ''
Custom query parameters to send with the Authorize Endpoint request.
''; '';
example = { example = {
".*" = "default-namespace"; domain_hint = "example.com";
}; };
}; };
allowed_domains = mkOption {
type = types.listOf types.str;
default = [ ];
description = lib.mdDoc ''
Allowed principal domains. if an authenticated user's domain
is not in this list authentication request will be rejected.
'';
example = [ "example.com" ];
};
allowed_users = mkOption {
type = types.listOf types.str;
default = [ ];
description = lib.mdDoc ''
Users allowed to authenticate even if not in allowedDomains.
'';
example = [ "alice@example.com" ];
};
strip_email_domain = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether the domain part of the email address should be removed when generating namespaces.
'';
};
}; };
tls_letsencrypt_hostname = mkOption { tls_letsencrypt_hostname = mkOption {
@ -392,13 +426,16 @@ in {
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"]) (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"])
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ["services" "headscale" "settings" "oidc" "domain_map"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"]) (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
(mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"]) (mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
(mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"]) (mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
(mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"]) (mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
(mkRemovedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ''
Headscale no longer uses domain_map. If you're using an old version of headscale you can still set this option via services.headscale.settings.oidc.domain_map.
'')
]; ];
config = mkIf cfg.enable { config = mkIf cfg.enable {

View file

@ -10,9 +10,12 @@ in {
address = mkOption { address = mkOption {
type = types.str; type = types.str;
default = ""; default = "localhost";
description = mdDoc "Bind address. Corresponds to the `-a` flag."; description = mdDoc ''
example = "localhost"; Bind address. Corresponds to the `-a` flag.
Set to `""` to bind to all addresses.
'';
example = "[::1]";
}; };
port = mkOption { port = mkOption {

View file

@ -28,6 +28,32 @@ in
<https://wiki.nftables.org/wiki-nftables/index.php/Troubleshooting#Question_4._How_do_nftables_and_iptables_interact_when_used_on_the_same_system.3F>. <https://wiki.nftables.org/wiki-nftables/index.php/Troubleshooting#Question_4._How_do_nftables_and_iptables_interact_when_used_on_the_same_system.3F>.
''; '';
}; };
networking.nftables.checkRuleset = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Run `nft check` on the ruleset to spot syntax errors during build.
Because this is executed in a sandbox, the check might fail if it requires
access to any environmental factors or paths outside the Nix store.
To circumvent this, the ruleset file can be edited using the preCheckRuleset
option to work in the sandbox environment.
'';
};
networking.nftables.preCheckRuleset = mkOption {
type = types.lines;
default = "";
example = lib.literalExpression ''
sed 's/skgid meadow/skgid nogroup/g' -i ruleset.conf
'';
description = lib.mdDoc ''
This script gets run before the ruleset is checked. It can be used to
create additional files needed for the ruleset check to work, or modify
the ruleset for cases the build environment cannot cover.
'';
};
networking.nftables.ruleset = mkOption { networking.nftables.ruleset = mkOption {
type = types.lines; type = types.lines;
default = ""; default = "";
@ -105,13 +131,24 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
reloadIfChanged = true; reloadIfChanged = true;
serviceConfig = let serviceConfig = let
rulesScript = pkgs.writeScript "nftables-rules" '' rulesScript = pkgs.writeTextFile {
#! ${pkgs.nftables}/bin/nft -f name = "nftables-rules";
flush ruleset executable = true;
${if cfg.rulesetFile != null then '' text = ''
include "${cfg.rulesetFile}" #! ${pkgs.nftables}/bin/nft -f
'' else cfg.ruleset} flush ruleset
''; ${if cfg.rulesetFile != null then ''
include "${cfg.rulesetFile}"
'' else cfg.ruleset}
'';
checkPhase = lib.optionalString cfg.checkRuleset ''
cp $out ruleset.conf
${cfg.preCheckRuleset}
export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
'';
};
in { in {
Type = "oneshot"; Type = "oneshot";
RemainAfterExit = true; RemainAfterExit = true;

View file

@ -14,7 +14,6 @@ let
path = makeBinPath (getAttr "openvpn-${name}" config.systemd.services).path; path = makeBinPath (getAttr "openvpn-${name}" config.systemd.services).path;
upScript = '' upScript = ''
#! /bin/sh
export PATH=${path} export PATH=${path}
# For convenience in client scripts, extract the remote domain # For convenience in client scripts, extract the remote domain
@ -34,7 +33,6 @@ let
''; '';
downScript = '' downScript = ''
#! /bin/sh
export PATH=${path} export PATH=${path}
${optionalString cfg.updateResolvConf ${optionalString cfg.updateResolvConf
"${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"} "${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
@ -47,9 +45,9 @@ let
${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"} ${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"}
${cfg.config} ${cfg.config}
${optionalString (cfg.up != "" || cfg.updateResolvConf) ${optionalString (cfg.up != "" || cfg.updateResolvConf)
"up ${pkgs.writeScript "openvpn-${name}-up" upScript}"} "up ${pkgs.writeShellScript "openvpn-${name}-up" upScript}"}
${optionalString (cfg.down != "" || cfg.updateResolvConf) ${optionalString (cfg.down != "" || cfg.updateResolvConf)
"down ${pkgs.writeScript "openvpn-${name}-down" downScript}"} "down ${pkgs.writeShellScript "openvpn-${name}-down" downScript}"}
${optionalString (cfg.authUserPass != null) ${optionalString (cfg.authUserPass != null)
"auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" '' "auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
${cfg.authUserPass.username} ${cfg.authUserPass.username}

View file

@ -11,6 +11,14 @@ in
services.teleport = with lib.types; { services.teleport = with lib.types; {
enable = mkEnableOption (lib.mdDoc "the Teleport service"); enable = mkEnableOption (lib.mdDoc "the Teleport service");
package = mkOption {
type = types.package;
default = pkgs.teleport;
defaultText = lib.literalMD "pkgs.teleport";
example = lib.literalMD "pkgs.teleport_11";
description = lib.mdDoc "The teleport package to use";
};
settings = mkOption { settings = mkOption {
type = settingsYaml.type; type = settingsYaml.type;
default = { }; default = { };
@ -74,14 +82,14 @@ in
}; };
config = mkIf config.services.teleport.enable { config = mkIf config.services.teleport.enable {
environment.systemPackages = [ pkgs.teleport ]; environment.systemPackages = [ cfg.package ];
systemd.services.teleport = { systemd.services.teleport = {
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network.target" ]; after = [ "network.target" ];
serviceConfig = { serviceConfig = {
ExecStart = '' ExecStart = ''
${pkgs.teleport}/bin/teleport start \ ${cfg.package}/bin/teleport start \
${optionalString cfg.insecure.enable "--insecure"} \ ${optionalString cfg.insecure.enable "--insecure"} \
${optionalString cfg.diag.enable "--diag-addr=${cfg.diag.addr}:${toString cfg.diag.port}"} \ ${optionalString cfg.diag.enable "--diag-addr=${cfg.diag.addr}:${toString cfg.diag.port}"} \
${optionalString (cfg.settings != { }) "--config=${settingsYaml.generate "teleport.yaml" cfg.settings}"} ${optionalString (cfg.settings != { }) "--config=${settingsYaml.generate "teleport.yaml" cfg.settings}"}

View file

@ -286,6 +286,8 @@ in {
LockPersonality = true; LockPersonality = true;
RestrictSUIDSGID = true; RestrictSUIDSGID = true;
ReadWritePaths = [ cfg.stateDir ];
Restart = "on-failure"; Restart = "on-failure";
RestartSec = "5s"; RestartSec = "5s";
}; };

View file

@ -199,12 +199,16 @@ in
# java.security.AccessControlException: # java.security.AccessControlException:
# access denied ("java.io.FilePermission" "/var/lib/opensearch/config/opensearch.yml" "read") # access denied ("java.io.FilePermission" "/var/lib/opensearch/config/opensearch.yml" "read")
rm -f ${configDir}/opensearch.yml
cp ${opensearchYml} ${configDir}/opensearch.yml cp ${opensearchYml} ${configDir}/opensearch.yml
# Make sure the logging configuration for old OpenSearch versions is removed: # Make sure the logging configuration for old OpenSearch versions is removed:
rm -f "${configDir}/logging.yml" rm -f "${configDir}/logging.yml"
rm -f ${configDir}/${loggingConfigFilename}
cp ${loggingConfigFile} ${configDir}/${loggingConfigFilename} cp ${loggingConfigFile} ${configDir}/${loggingConfigFilename}
mkdir -p ${configDir}/scripts mkdir -p ${configDir}/scripts
rm -f ${configDir}/jvm.options
cp ${cfg.package}/config/jvm.options ${configDir}/jvm.options cp ${cfg.package}/config/jvm.options ${configDir}/jvm.options
# redirect jvm logs to the data directory # redirect jvm logs to the data directory

View file

@ -0,0 +1,128 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.qdrant;
settingsFormat = pkgs.formats.yaml { };
configFile = settingsFormat.generate "config.yaml" cfg.settings;
in {
options = {
services.qdrant = {
enable = mkEnableOption (lib.mdDoc "Vector Search Engine for the next generation of AI applications");
settings = mkOption {
description = lib.mdDoc ''
Configuration for Qdrant
Refer to <https://github.com/qdrant/qdrant/blob/master/config/config.yaml> for details on supported values.
'';
type = settingsFormat.type;
example = {
storage = {
storage_path = "/var/lib/qdrant/storage";
snapshots_path = "/var/lib/qdrant/snapshots";
};
hsnw_index = {
on_disk = true;
};
service = {
host = "127.0.0.1";
http_port = 6333;
grpc_port = 6334;
};
telemetry_disabled = true;
};
defaultText = literalExpression ''
{
storage = {
storage_path = "/var/lib/qdrant/storage";
snapshots_path = "/var/lib/qdrant/snapshots";
};
hsnw_index = {
on_disk = true;
};
service = {
host = "127.0.0.1";
http_port = 6333;
grpc_port = 6334;
};
telemetry_disabled = true;
}
'';
};
};
};
config = mkIf cfg.enable {
services.qdrant.settings = {
storage.storage_path = mkDefault "/var/lib/qdrant/storage";
storage.snapshots_path = mkDefault "/var/lib/qdrant/snapshots";
# The following default values are the same as in the default config,
# they are just written here for convenience.
storage.on_disk_payload = mkDefault true;
storage.wal.wal_capacity_mb = mkDefault 32;
storage.wal.wal_segments_ahead = mkDefault 0;
storage.performance.max_search_threads = mkDefault 0;
storage.performance.max_optimization_threads = mkDefault 1;
storage.optimizers.deleted_threshold = mkDefault 0.2;
storage.optimizers.vacuum_min_vector_number = mkDefault 1000;
storage.optimizers.default_segment_number = mkDefault 0;
storage.optimizers.max_segment_size_kb = mkDefault null;
storage.optimizers.memmap_threshold_kb = mkDefault null;
storage.optimizers.indexing_threshold_kb = mkDefault 20000;
storage.optimizers.flush_interval_sec = mkDefault 5;
storage.optimizers.max_optimization_threads = mkDefault 1;
storage.hnsw_index.m = mkDefault 16;
storage.hnsw_index.ef_construct = mkDefault 100;
storage.hnsw_index.full_scan_threshold_kb = mkDefault 10000;
storage.hnsw_index.max_indexing_threads = mkDefault 0;
storage.hnsw_index.on_disk = mkDefault false;
storage.hnsw_index.payload_m = mkDefault null;
service.max_request_size_mb = mkDefault 32;
service.max_workers = mkDefault 0;
service.http_port = mkDefault 6333;
service.grpc_port = mkDefault 6334;
service.enable_cors = mkDefault true;
cluster.enabled = mkDefault false;
# the following have been altered for security
service.host = mkDefault "127.0.0.1";
telemetry_disabled = mkDefault true;
};
systemd.services.qdrant = {
description = "Vector Search Engine for the next generation of AI applications";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = "${pkgs.qdrant}/bin/qdrant --config-path ${configFile}";
DynamicUser = true;
Restart = "on-failure";
StateDirectory = "qdrant";
CapabilityBoundingSet = "";
NoNewPrivileges = true;
PrivateTmp = true;
ProtectHome = true;
ProtectClock = true;
ProtectProc = "noaccess";
ProcSubset = "pid";
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
ProtectHostname = true;
RestrictSUIDSGID = true;
RestrictRealtime = true;
RestrictNamespaces = true;
LockPersonality = true;
RemoveIPC = true;
SystemCallFilter = [ "@system-service" "~@privileged" ];
};
};
};
}

View file

@ -318,8 +318,8 @@ to make packages available in the chroot.
{option}`services.systemd.akkoma.serviceConfig.BindPaths` and {option}`services.systemd.akkoma.serviceConfig.BindPaths` and
{option}`services.systemd.akkoma.serviceConfig.BindReadOnlyPaths` permit access to outside paths {option}`services.systemd.akkoma.serviceConfig.BindReadOnlyPaths` permit access to outside paths
through bind mounts. Refer to through bind mounts. Refer to
[{manpage}`systemd.exec(5)`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=) [`BindPaths=`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
for details. of {manpage}`systemd.exec(5)` for details.
### Distributed deployment {#modules-services-akkoma-distributed-deployment} ### Distributed deployment {#modules-services-akkoma-distributed-deployment}

View file

@ -84,7 +84,7 @@ in {
"-addr" "${cfg.bindIP}:${toString cfg.port}" "-addr" "${cfg.bindIP}:${toString cfg.port}"
"-theme" "${cfg.theme}" "-theme" "${cfg.theme}"
"imaps://${cfg.imaps.host}:${toString cfg.imaps.port}" "imaps://${cfg.imaps.host}:${toString cfg.imaps.port}"
"smpts://${cfg.smtps.host}:${toString cfg.smtps.port}" "smtps://${cfg.smtps.host}:${toString cfg.smtps.port}"
]; ];
}; };
}; };

View file

@ -0,0 +1,96 @@
{ config, options, lib, pkgs, ... }:
let
cfg = config.services.nextcloud.notify_push;
in
{
options.services.nextcloud.notify_push = {
enable = lib.mkEnableOption (lib.mdDoc "Notify push");
package = lib.mkOption {
type = lib.types.package;
default = pkgs.nextcloud-notify_push;
defaultText = lib.literalMD "pkgs.nextcloud-notify_push";
description = lib.mdDoc "Which package to use for notify_push";
};
socketPath = lib.mkOption {
type = lib.types.str;
default = "/run/nextcloud-notify_push/sock";
description = lib.mdDoc "Socket path to use for notify_push";
};
logLevel = lib.mkOption {
type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
default = "error";
description = lib.mdDoc "Log level";
};
} // (
lib.genAttrs [
"dbtype"
"dbname"
"dbuser"
"dbpassFile"
"dbhost"
"dbport"
"dbtableprefix"
] (
opt: options.services.nextcloud.config.${opt} // {
default = config.services.nextcloud.config.${opt};
defaultText = "config.services.nextcloud.config.${opt}";
}
)
);
config = lib.mkIf cfg.enable {
systemd.services.nextcloud-notify_push = let
nextcloudUrl = "http${lib.optionalString config.services.nextcloud.https "s"}://${config.services.nextcloud.hostName}";
in {
description = "Push daemon for Nextcloud clients";
documentation = [ "https://github.com/nextcloud/notify_push" ];
after = [ "phpfpm-nextcloud.service" ];
wantedBy = [ "multi-user.target" ];
environment = {
NEXTCLOUD_URL = nextcloudUrl;
SOCKET_PATH = cfg.socketPath;
DATABASE_PREFIX = cfg.dbtableprefix;
LOG = cfg.logLevel;
};
postStart = ''
${config.services.nextcloud.occ}/bin/nextcloud-occ notify_push:setup ${nextcloudUrl}/push
'';
script = let
dbType = if cfg.dbtype == "pgsql" then "postgresql" else cfg.dbtype;
dbUser = lib.optionalString (cfg.dbuser != null) cfg.dbuser;
dbPass = lib.optionalString (cfg.dbpassFile != null) ":$DATABASE_PASSWORD";
isSocket = lib.hasPrefix "/" (toString cfg.dbhost);
dbHost = lib.optionalString (cfg.dbhost != null) (if
isSocket then
if dbType == "postgresql" then "?host=${cfg.dbhost}" else
if dbType == "mysql" then "?socket=${cfg.dbhost}" else throw "unsupported dbtype"
else
"@${cfg.dbhost}");
dbName = lib.optionalString (cfg.dbname != null) "/${cfg.dbname}";
dbUrl = "${dbType}://${dbUser}${dbPass}${lib.optionalString (!isSocket) dbHost}${dbName}${lib.optionalString isSocket dbHost}";
in lib.optionalString (dbPass != "") ''
export DATABASE_PASSWORD="$(<"${cfg.dbpassFile}")"
'' + ''
export DATABASE_URL="${dbUrl}"
${cfg.package}/bin/notify_push --glob-config '${config.services.nextcloud.datadir}/config/config.php'
'';
serviceConfig = {
User = "nextcloud";
Group = "nextcloud";
RuntimeDirectory = [ "nextcloud-notify_push" ];
Restart = "on-failure";
RestartSec = "5s";
};
};
services.nginx.virtualHosts.${config.services.nextcloud.hostName}.locations."^~ /push/" = {
proxyPass = "http://unix:${cfg.socketPath}";
proxyWebsockets = true;
recommendedProxySettings = true;
};
};
}

View file

@ -169,6 +169,9 @@ in
}; };
services.udev.packages = [ services.udev.packages = [
pkgs.pantheon.gnome-settings-daemon pkgs.pantheon.gnome-settings-daemon
# Force enable KMS modifiers for devices that require them.
# https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1443
pkgs.pantheon.mutter
]; ];
systemd.packages = [ systemd.packages = [
pkgs.pantheon.gnome-settings-daemon pkgs.pantheon.gnome-settings-daemon

View file

@ -379,12 +379,7 @@ in
security.pam.services.kde = { allowNullPassword = true; }; security.pam.services.kde = { allowNullPassword = true; };
# Doing these one by one seems silly, but we currently lack a better security.pam.services.login.enableKwallet = true;
# construct for handling common pam configs.
security.pam.services.gdm.enableKwallet = true;
security.pam.services.kdm.enableKwallet = true;
security.pam.services.lightdm.enableKwallet = true;
security.pam.services.sddm.enableKwallet = true;
systemd.user.services = { systemd.user.services = {
plasma-early-setup = mkIf cfg.runUsingSystemd { plasma-early-setup = mkIf cfg.runUsingSystemd {

View file

@ -215,10 +215,12 @@ in
}; };
security.pam.services = { security.pam.services = {
sddm = { sddm.text = ''
allowNullPassword = true; auth substack login
startSession = true; account include login
}; password substack login
session include login
'';
sddm-greeter.text = '' sddm-greeter.text = ''
auth required pam_succeed_if.so audit quiet_success user = sddm auth required pam_succeed_if.so audit quiet_success user = sddm

View file

@ -256,7 +256,7 @@ in
videoDrivers = mkOption { videoDrivers = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
default = [ "amdgpu" "radeon" "nouveau" "modesetting" "fbdev" ]; default = [ "modesetting" "fbdev" ];
example = [ example = [
"nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304" "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
"amdgpu-pro" "amdgpu-pro"

View file

@ -1948,7 +1948,7 @@ in
Extra command-line arguments to pass to systemd-networkd-wait-online. Extra command-line arguments to pass to systemd-networkd-wait-online.
These also affect per-interface `systemd-network-wait-online@` services. These also affect per-interface `systemd-network-wait-online@` services.
See [{manpage}`systemd-networkd-wait-online.service(8)`](https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online.service.html) for all available options. See {manpage}`systemd-networkd-wait-online.service(8)` for all available options.
''; '';
type = with types; listOf str; type = with types; listOf str;
default = []; default = [];

View file

@ -66,9 +66,7 @@ in {
uid = config.ids.uids.systemd-coredump; uid = config.ids.uids.systemd-coredump;
group = "systemd-coredump"; group = "systemd-coredump";
}; };
users.groups.systemd-coredump = { users.groups.systemd-coredump = {};
gid = config.ids.gids.systemd-coredump;
};
}) })
(mkIf (!cfg.enable) { (mkIf (!cfg.enable) {

View file

@ -118,7 +118,7 @@ let
name = "initrd-bin-env"; name = "initrd-bin-env";
paths = map getBin cfg.initrdBin; paths = map getBin cfg.initrdBin;
pathsToLink = ["/bin" "/sbin"]; pathsToLink = ["/bin" "/sbin"];
postBuild = concatStringsSep "\n" (mapAttrsToList (n: v: "ln -s '${v}' $out/bin/'${n}'") cfg.extraBin); postBuild = concatStringsSep "\n" (mapAttrsToList (n: v: "ln -sf '${v}' $out/bin/'${n}'") cfg.extraBin);
}; };
initialRamdisk = pkgs.makeInitrdNG { initialRamdisk = pkgs.makeInitrdNG {

View file

@ -100,7 +100,7 @@ in
logDriver = logDriver =
mkOption { mkOption {
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"]; type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs" "local"];
default = "journald"; default = "journald";
description = description =
lib.mdDoc '' lib.mdDoc ''

View file

@ -44,7 +44,7 @@ in
serviceConfig = { serviceConfig = {
ExecStart = "${cfg.package}/bin/multipassd --logger platform --verbosity ${cfg.logLevel}"; ExecStart = "${cfg.package}/bin/multipassd --logger platform --verbosity ${cfg.logLevel}";
SyslogIdentifer = "multipassd"; SyslogIdentifier = "multipassd";
Restart = "on-failure"; Restart = "on-failure";
TimeoutStopSec = 300; TimeoutStopSec = 300;
Type = "simple"; Type = "simple";

View file

@ -183,10 +183,6 @@ in
systemd.packages = [ cfg.package ]; systemd.packages = [ cfg.package ];
systemd.services.podman.serviceConfig = {
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
};
systemd.services.podman-prune = { systemd.services.podman-prune = {
description = "Prune podman resources"; description = "Prune podman resources";
@ -207,10 +203,6 @@ in
systemd.sockets.podman.wantedBy = [ "sockets.target" ]; systemd.sockets.podman.wantedBy = [ "sockets.target" ];
systemd.sockets.podman.socketConfig.SocketGroup = "podman"; systemd.sockets.podman.socketConfig.SocketGroup = "podman";
systemd.user.services.podman.serviceConfig = {
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
};
systemd.user.sockets.podman.wantedBy = [ "sockets.target" ]; systemd.user.sockets.podman.wantedBy = [ "sockets.target" ];
systemd.tmpfiles.packages = [ systemd.tmpfiles.packages = [

View file

@ -108,9 +108,9 @@ let
set -e set -e
NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}") NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${toString config.virtualisation.diskImage}}") || test -z "$NIX_DISK_IMAGE"
if ! test -e "$NIX_DISK_IMAGE"; then if test -n "$NIX_DISK_IMAGE" && ! test -e "$NIX_DISK_IMAGE"; then
${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \ ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
${toString config.virtualisation.diskSize}M ${toString config.virtualisation.diskSize}M
fi fi
@ -346,7 +346,7 @@ in
virtualisation.diskImage = virtualisation.diskImage =
mkOption { mkOption {
type = types.str; type = types.nullOr types.str;
default = "./${config.system.name}.qcow2"; default = "./${config.system.name}.qcow2";
defaultText = literalExpression ''"./''${config.system.name}.qcow2"''; defaultText = literalExpression ''"./''${config.system.name}.qcow2"'';
description = description =
@ -354,6 +354,9 @@ in
Path to the disk image containing the root filesystem. Path to the disk image containing the root filesystem.
The image will be created on startup if it does not The image will be created on startup if it does not
exist. exist.
If null, a tmpfs will be used as the root filesystem and
the VM's state will not be persistent.
''; '';
}; };
@ -990,12 +993,12 @@ in
]; ];
virtualisation.qemu.drives = mkMerge [ virtualisation.qemu.drives = mkMerge [
[{ (mkIf (cfg.diskImage != null) [{
name = "root"; name = "root";
file = ''"$NIX_DISK_IMAGE"''; file = ''"$NIX_DISK_IMAGE"'';
driveExtraOpts.cache = "writeback"; driveExtraOpts.cache = "writeback";
driveExtraOpts.werror = "report"; driveExtraOpts.werror = "report";
}] }])
(mkIf cfg.useNixStoreImage [{ (mkIf cfg.useNixStoreImage [{
name = "nix-store"; name = "nix-store";
file = ''"$TMPDIR"/store.img''; file = ''"$TMPDIR"/store.img'';
@ -1018,20 +1021,21 @@ in
}) cfg.emptyDiskImages) }) cfg.emptyDiskImages)
]; ];
fileSystems = mkVMOverride cfg.fileSystems;
# Mount the host filesystem via 9P, and bind-mount the Nix store # Mount the host filesystem via 9P, and bind-mount the Nix store
# of the host into our own filesystem. We use mkVMOverride to # of the host into our own filesystem. We use mkVMOverride to
# allow this module to be applied to "normal" NixOS system # allow this module to be applied to "normal" NixOS system
# configuration, where the regular value for the `fileSystems' # configuration, where the regular value for the `fileSystems'
# attribute should be disregarded for the purpose of building a VM # attribute should be disregarded for the purpose of building a VM
# test image (since those filesystems don't exist in the VM). # test image (since those filesystems don't exist in the VM).
fileSystems = virtualisation.fileSystems = let
let
mkSharedDir = tag: share: mkSharedDir = tag: share:
{ {
name = name =
if tag == "nix-store" && cfg.writableStore if tag == "nix-store" && cfg.writableStore
then "/nix/.ro-store" then "/nix/.ro-store"
else share.target; else share.target;
value.device = tag; value.device = tag;
value.fsType = "9p"; value.fsType = "9p";
value.neededForBoot = true; value.neededForBoot = true;
@ -1039,44 +1043,42 @@ in
[ "trans=virtio" "version=9p2000.L" "msize=${toString cfg.msize}" ] [ "trans=virtio" "version=9p2000.L" "msize=${toString cfg.msize}" ]
++ lib.optional (tag == "nix-store") "cache=loose"; ++ lib.optional (tag == "nix-store") "cache=loose";
}; };
in in lib.mkMerge [
mkVMOverride (cfg.fileSystems // (lib.mapAttrs' mkSharedDir cfg.sharedDirectories)
optionalAttrs cfg.useDefaultFilesystems { {
"/".device = cfg.bootDevice; "/" = lib.mkIf cfg.useDefaultFilesystems (if cfg.diskImage == null then {
"/".fsType = "ext4"; device = "tmpfs";
"/".autoFormat = true; fsType = "tmpfs";
} // } else {
optionalAttrs config.boot.tmpOnTmpfs { device = cfg.bootDevice;
"/tmp" = { fsType = "ext4";
autoFormat = true;
});
"/tmp" = lib.mkIf config.boot.tmpOnTmpfs {
device = "tmpfs"; device = "tmpfs";
fsType = "tmpfs"; fsType = "tmpfs";
neededForBoot = true; neededForBoot = true;
# Sync with systemd's tmp.mount; # Sync with systemd's tmp.mount;
options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmpOnTmpfsSize}" ]; options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmpOnTmpfsSize}" ];
}; };
} // "/nix/${if cfg.writableStore then ".ro-store" else "store"}" = lib.mkIf cfg.useNixStoreImage {
optionalAttrs cfg.useNixStoreImage {
"/nix/${if cfg.writableStore then ".ro-store" else "store"}" = {
device = "${lookupDriveDeviceName "nix-store" cfg.qemu.drives}"; device = "${lookupDriveDeviceName "nix-store" cfg.qemu.drives}";
neededForBoot = true; neededForBoot = true;
options = [ "ro" ]; options = [ "ro" ];
}; };
} // "/nix/.rw-store" = lib.mkIf (cfg.writableStore && cfg.writableStoreUseTmpfs) {
optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs) {
"/nix/.rw-store" = {
fsType = "tmpfs"; fsType = "tmpfs";
options = [ "mode=0755" ]; options = [ "mode=0755" ];
neededForBoot = true; neededForBoot = true;
}; };
} //
optionalAttrs cfg.useBootLoader {
# see note [Disk layout with `useBootLoader`] # see note [Disk layout with `useBootLoader`]
"/boot" = { "/boot" = lib.mkIf cfg.useBootLoader {
device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk` device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
fsType = "vfat"; fsType = "vfat";
noCheck = true; # fsck fails on a r/o filesystem noCheck = true; # fsck fails on a r/o filesystem
}; };
} // lib.mapAttrs' mkSharedDir cfg.sharedDirectories); }
];
boot.initrd.systemd = lib.mkIf (config.boot.initrd.systemd.enable && cfg.writableStore) { boot.initrd.systemd = lib.mkIf (config.boot.initrd.systemd.enable && cfg.writableStore) {
mounts = [{ mounts = [{

View file

@ -81,7 +81,7 @@ in {
extraDisk = mkOption { extraDisk = mkOption {
description = lib.mdDoc '' description = lib.mdDoc ''
Optional extra disk/hdd configuration. Optional extra disk/hdd configuration.
The disk will be an 'ext4' partition on a separate VMDK file. The disk will be an 'ext4' partition on a separate file.
''; '';
default = null; default = null;
example = { example = {
@ -183,8 +183,8 @@ in {
export HOME=$PWD export HOME=$PWD
export PATH=${pkgs.virtualbox}/bin:$PATH export PATH=${pkgs.virtualbox}/bin:$PATH
echo "creating VirtualBox pass-through disk wrapper (no copying involved)..." echo "converting image to VirtualBox format..."
VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage VBoxManage convertfromraw $diskImage disk.vdi
${optionalString (cfg.extraDisk != null) '' ${optionalString (cfg.extraDisk != null) ''
echo "creating extra disk: data-disk.raw" echo "creating extra disk: data-disk.raw"
@ -196,8 +196,8 @@ in {
mkpart primary ext4 1MiB -1 mkpart primary ext4 1MiB -1
eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs) eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
echo "creating extra disk: data-disk.vmdk" echo "creating extra disk: data-disk.vdi"
VBoxManage internalcommands createrawvmdk -filename data-disk.vmdk -rawdisk $dataDiskImage VBoxManage convertfromraw $dataDiskImage data-disk.vdi
''} ''}
echo "creating VirtualBox VM..." echo "creating VirtualBox VM..."
@ -209,10 +209,10 @@ in {
${lib.cli.toGNUCommandLineShell { } cfg.params} ${lib.cli.toGNUCommandLineShell { } cfg.params}
VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController} VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \ VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
--medium disk.vmdk --medium disk.vdi
${optionalString (cfg.extraDisk != null) '' ${optionalString (cfg.extraDisk != null) ''
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \ VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
--medium data-disk.vmdk --medium data-disk.vdi
''} ''}
echo "exporting VirtualBox VM..." echo "exporting VirtualBox VM..."

View file

@ -100,7 +100,6 @@ in rec {
(onFullSupported "nixos.tests.login") (onFullSupported "nixos.tests.login")
(onFullSupported "nixos.tests.misc") (onFullSupported "nixos.tests.misc")
(onFullSupported "nixos.tests.mutableUsers") (onFullSupported "nixos.tests.mutableUsers")
(onFullSupported "nixos.tests.nat.firewall-conntrack")
(onFullSupported "nixos.tests.nat.firewall") (onFullSupported "nixos.tests.nat.firewall")
(onFullSupported "nixos.tests.nat.standalone") (onFullSupported "nixos.tests.nat.standalone")
(onFullSupported "nixos.tests.networking.scripted.bond") (onFullSupported "nixos.tests.networking.scripted.bond")
@ -131,8 +130,7 @@ in rec {
(onFullSupported "nixos.tests.networking.networkd.virtual") (onFullSupported "nixos.tests.networking.networkd.virtual")
(onFullSupported "nixos.tests.networking.networkd.vlan") (onFullSupported "nixos.tests.networking.networkd.vlan")
(onFullSupported "nixos.tests.systemd-networkd-ipv6-prefix-delegation") (onFullSupported "nixos.tests.systemd-networkd-ipv6-prefix-delegation")
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314 (onFullSupported "nixos.tests.nfs3.simple")
#(onFullSupported "nixos.tests.nfs3.simple")
(onFullSupported "nixos.tests.nfs4.simple") (onFullSupported "nixos.tests.nfs4.simple")
(onSystems ["x86_64-linux"] "nixos.tests.oci-containers.podman") (onSystems ["x86_64-linux"] "nixos.tests.oci-containers.podman")
(onFullSupported "nixos.tests.openssh") (onFullSupported "nixos.tests.openssh")

View file

@ -39,8 +39,7 @@ in rec {
login login
misc misc
nat nat
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314 nfs3
#nfs3
openssh openssh
php php
predictable-interface-names predictable-interface-names
@ -119,11 +118,9 @@ in rec {
"nixos.tests.ipv6" "nixos.tests.ipv6"
"nixos.tests.login" "nixos.tests.login"
"nixos.tests.misc" "nixos.tests.misc"
"nixos.tests.nat.firewall-conntrack"
"nixos.tests.nat.firewall" "nixos.tests.nat.firewall"
"nixos.tests.nat.standalone" "nixos.tests.nat.standalone"
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314 "nixos.tests.nfs3.simple"
#"nixos.tests.nfs3.simple"
"nixos.tests.openssh" "nixos.tests.openssh"
"nixos.tests.php.fpm" "nixos.tests.php.fpm"
"nixos.tests.php.pcre" "nixos.tests.php.pcre"

View file

@ -228,6 +228,7 @@ in {
fluentd = handleTest ./fluentd.nix {}; fluentd = handleTest ./fluentd.nix {};
fluidd = handleTest ./fluidd.nix {}; fluidd = handleTest ./fluidd.nix {};
fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {}; fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
forgejo = handleTest ./gitea.nix { giteaPackage = pkgs.forgejo; };
freenet = handleTest ./freenet.nix {}; freenet = handleTest ./freenet.nix {};
freeswitch = handleTest ./freeswitch.nix {}; freeswitch = handleTest ./freeswitch.nix {};
freshrss-sqlite = handleTest ./freshrss-sqlite.nix {}; freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
@ -241,7 +242,7 @@ in {
geth = handleTest ./geth.nix {}; geth = handleTest ./geth.nix {};
ghostunnel = handleTest ./ghostunnel.nix {}; ghostunnel = handleTest ./ghostunnel.nix {};
gitdaemon = handleTest ./gitdaemon.nix {}; gitdaemon = handleTest ./gitdaemon.nix {};
gitea = handleTest ./gitea.nix {}; gitea = handleTest ./gitea.nix { giteaPackage = pkgs.gitea; };
gitlab = handleTest ./gitlab.nix {}; gitlab = handleTest ./gitlab.nix {};
gitolite = handleTest ./gitolite.nix {}; gitolite = handleTest ./gitolite.nix {};
gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {}; gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
@ -432,10 +433,8 @@ in {
nagios = handleTest ./nagios.nix {}; nagios = handleTest ./nagios.nix {};
nar-serve = handleTest ./nar-serve.nix {}; nar-serve = handleTest ./nar-serve.nix {};
nat.firewall = handleTest ./nat.nix { withFirewall = true; }; nat.firewall = handleTest ./nat.nix { withFirewall = true; };
nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
nat.standalone = handleTest ./nat.nix { withFirewall = false; }; nat.standalone = handleTest ./nat.nix { withFirewall = false; };
nat.nftables.firewall = handleTest ./nat.nix { withFirewall = true; nftables = true; }; nat.nftables.firewall = handleTest ./nat.nix { withFirewall = true; nftables = true; };
nat.nftables.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; nftables = true; };
nat.nftables.standalone = handleTest ./nat.nix { withFirewall = false; nftables = true; }; nat.nftables.standalone = handleTest ./nat.nix { withFirewall = false; nftables = true; };
nats = handleTest ./nats.nix {}; nats = handleTest ./nats.nix {};
navidrome = handleTest ./navidrome.nix {}; navidrome = handleTest ./navidrome.nix {};

View file

@ -54,7 +54,7 @@ with lib;
client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml") client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml")
# log in to atuin server on client node # log in to atuin server on client node
client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k {key}") client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k \"{key}\"")
# pull records from atuin server # pull records from atuin server
client.succeed("${atuin}/bin/atuin sync -f") client.succeed("${atuin}/bin/atuin sync -f")

View file

@ -1,6 +1,6 @@
import ./make-test-python.nix ({ pkgs, ... }: { import ./make-test-python.nix ({ pkgs, ... }: {
name = "clickhouse"; name = "clickhouse";
meta.maintainers = with pkgs.lib.maintainers; [ ma27 ]; meta.maintainers = with pkgs.lib.maintainers; [ ];
nodes.machine = { nodes.machine = {
services.clickhouse.enable = true; services.clickhouse.enable = true;

View file

@ -1,5 +1,6 @@
{ system ? builtins.currentSystem, { system ? builtins.currentSystem,
config ? {}, config ? {},
giteaPackage ? pkgs.gitea,
pkgs ? import ../.. { inherit system config; } pkgs ? import ../.. { inherit system config; }
}: }:
@ -7,10 +8,25 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
with pkgs.lib; with pkgs.lib;
let let
## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
signingPrivateKey = ''
-----BEGIN PGP PRIVATE KEY BLOCK-----
lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
GCqGCRf9O/hzBA==
=9Uy3
-----END PGP PRIVATE KEY BLOCK-----
'';
signingPrivateKeyId = "4D642DE8B678C79D";
supportedDbTypes = [ "mysql" "postgres" "sqlite3" ]; supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
makeGiteaTest = type: nameValuePair type (makeTest { makeGiteaTest = type: nameValuePair type (makeTest {
name = "gitea-${type}"; name = "${giteaPackage.pname}-${type}";
meta.maintainers = with maintainers; [ aanderse kolaente ma27 ]; meta.maintainers = with maintainers; [ aanderse indeednotjames kolaente ma27 ];
nodes = { nodes = {
server = { config, pkgs, ... }: { server = { config, pkgs, ... }: {
@ -18,9 +34,11 @@ let
services.gitea = { services.gitea = {
enable = true; enable = true;
database = { inherit type; }; database = { inherit type; };
package = giteaPackage;
settings.service.DISABLE_REGISTRATION = true; settings.service.DISABLE_REGISTRATION = true;
settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
}; };
environment.systemPackages = [ pkgs.gitea pkgs.jq ]; environment.systemPackages = [ giteaPackage pkgs.gnupg pkgs.jq ];
services.openssh.enable = true; services.openssh.enable = true;
}; };
client1 = { config, pkgs, ... }: { client1 = { config, pkgs, ... }: {
@ -56,6 +74,13 @@ let
server.wait_for_open_port(3000) server.wait_for_open_port(3000)
server.succeed("curl --fail http://localhost:3000/") server.succeed("curl --fail http://localhost:3000/")
server.succeed(
"su -l gitea -c 'gpg --homedir /var/lib/gitea/data/home/.gnupg "
+ "--import ${toString (pkgs.writeText "gitea.key" signingPrivateKey)}'"
)
assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
server.succeed( server.succeed(
"curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. " "curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+ "Please contact your site administrator.'" + "Please contact your site administrator.'"

View file

@ -2,7 +2,6 @@ import ./make-test-python.nix ({ pkgs, ...}: {
name = "haproxy"; name = "haproxy";
nodes = { nodes = {
machine = { ... }: { machine = { ... }: {
imports = [ ../modules/profiles/minimal.nix ];
services.haproxy = { services.haproxy = {
enable = true; enable = true;
config = '' config = ''

Some files were not shown because too many files have changed in this diff Show more