Project import generated by Copybara.
GitOrigin-RevId: 3c5319ad3aa51551182ac82ea17ab1c6b0f0df89
This commit is contained in:
parent
a861c3f460
commit
4d5a95770c
2519 changed files with 42801 additions and 43424 deletions
2
third_party/nixpkgs/.github/CODEOWNERS
vendored
2
third_party/nixpkgs/.github/CODEOWNERS
vendored
|
@ -104,9 +104,7 @@
|
|||
|
||||
# Python-related code and docs
|
||||
/maintainers/scripts/update-python-libraries @FRidh
|
||||
/pkgs/top-level/python-packages.nix @FRidh @jonringer
|
||||
/pkgs/development/interpreters/python @FRidh
|
||||
/pkgs/development/python-modules @FRidh @jonringer
|
||||
/doc/languages-frameworks/python.section.md @FRidh @mweinelt
|
||||
/pkgs/development/tools/poetry2nix @adisbladis
|
||||
/pkgs/development/interpreters/python/hooks @FRidh @jonringer
|
||||
|
|
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Create backport PRs
|
||||
uses: korthout/backport-action@v1.1.0
|
||||
uses: korthout/backport-action@v1.2.0
|
||||
with:
|
||||
# Config README: https://github.com/korthout/backport-action#backport-action
|
||||
pull_description: |-
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
- uses: cachix/cachix-action@v12
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
|
|
|
@ -28,16 +28,14 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
with:
|
||||
# nixpkgs commit is pinned so that it doesn't break
|
||||
# editorconfig-checker 2.4.0
|
||||
nix_path: nixpkgs=https://github.com/NixOS/nixpkgs/archive/c473cc8714710179df205b153f4e9fa007107ff9.tar.gz
|
||||
- name: install editorconfig-checker
|
||||
run: nix-env -iA editorconfig-checker -f '<nixpkgs>'
|
||||
- name: Checking EditorConfig
|
||||
run: |
|
||||
cat "$HOME/changed_files" | xargs -r editorconfig-checker -disable-indent-size
|
||||
cat "$HOME/changed_files" | nix-shell -p editorconfig-checker --run 'xargs -r editorconfig-checker -disable-indent-size'
|
||||
- if: ${{ failure() }}
|
||||
run: |
|
||||
echo "::error :: Hey! It looks like your changes don't follow our editorconfig settings. Read https://editorconfig.org/#download to configure your editor so you never see this error again."
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v19
|
||||
- uses: cachix/install-nix-action@v20
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixpkgs-unstable
|
||||
- name: setup
|
||||
|
|
2
third_party/nixpkgs/doc/.gitignore
vendored
2
third_party/nixpkgs/doc/.gitignore
vendored
|
@ -6,3 +6,5 @@ functions/library/locations.xml
|
|||
highlightjs
|
||||
manual-full.xml
|
||||
out
|
||||
result
|
||||
result-*
|
||||
|
|
|
@ -101,6 +101,7 @@ in
|
|||
diskSize = "auto";
|
||||
additionalSpace = "0M"; # Defaults to 512M.
|
||||
copyChannel = false;
|
||||
memSize = 2048; # Qemu VM memory size in megabytes. Defaults to 1024M.
|
||||
}
|
||||
```
|
||||
|
||||
|
|
1
third_party/nixpkgs/doc/builders/special.xml
vendored
1
third_party/nixpkgs/doc/builders/special.xml
vendored
|
@ -6,6 +6,7 @@
|
|||
This chapter describes several special builders.
|
||||
</para>
|
||||
<xi:include href="special/fhs-environments.section.xml" />
|
||||
<xi:include href="special/makesetuphook.section.xml" />
|
||||
<xi:include href="special/mkshell.section.xml" />
|
||||
<xi:include href="special/darwin-builder.section.xml" />
|
||||
</chapter>
|
||||
|
|
37
third_party/nixpkgs/doc/builders/special/makesetuphook.section.md
vendored
Normal file
37
third_party/nixpkgs/doc/builders/special/makesetuphook.section.md
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
# pkgs.makeSetupHook {#sec-pkgs.makeSetupHook}
|
||||
|
||||
`pkgs.makeSetupHook` is a builder that produces hooks that go in to `nativeBuildInputs`
|
||||
|
||||
## Usage {#sec-pkgs.makeSetupHook-usage}
|
||||
|
||||
```nix
|
||||
pkgs.makeSetupHook {
|
||||
name = "something-hook";
|
||||
propagatedBuildInputs = [ pkgs.commandsomething ];
|
||||
depsTargetTargetPropagated = [ pkgs.libsomething ];
|
||||
} ./script.sh
|
||||
```
|
||||
|
||||
#### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash
|
||||
|
||||
```nix
|
||||
pkgs.makeSetupHook {
|
||||
name = "run-hello-hook";
|
||||
propagatedBuildInputs = [ pkgs.hello ];
|
||||
substitutions = { shell = "${pkgs.bash}/bin/bash"; };
|
||||
passthru.tests.greeting = callPackage ./test { };
|
||||
meta.platforms = lib.platforms.linux;
|
||||
} (writeScript "run-hello-hook.sh" ''
|
||||
#!@shell@
|
||||
hello
|
||||
'')
|
||||
```
|
||||
|
||||
## Attributes
|
||||
|
||||
* `name` Set the name of the hook.
|
||||
* `propagatedBuildInputs` Runtime dependencies (such as binaries) of the hook.
|
||||
* `depsTargetTargetPropagated` Non-binary dependencies.
|
||||
* `meta`
|
||||
* `passthru`
|
||||
* `substitutions` Variables for `substituteAll`
|
|
@ -56,11 +56,11 @@ See the `zlib` example:
|
|||
|
||||
zlib = (pkgs.zlib.override {
|
||||
stdenv = pkgs.emscriptenStdenv;
|
||||
}).overrideDerivation
|
||||
}).overrideAttrs
|
||||
(old: rec {
|
||||
buildInputs = old.buildInputs ++ [ pkg-config ];
|
||||
# we need to reset this setting!
|
||||
NIX_CFLAGS_COMPILE="";
|
||||
env = (old.env or { }) // { NIX_CFLAGS_COMPILE = ""; };
|
||||
configurePhase = ''
|
||||
# FIXME: Some tests require writing at $HOME
|
||||
HOME=$TMPDIR
|
||||
|
|
|
@ -1019,7 +1019,7 @@ buildPythonPackage rec {
|
|||
|
||||
The `buildPythonPackage` mainly does four things:
|
||||
|
||||
* In the `buildPhase`, it calls `${python.interpreter} setup.py bdist_wheel` to
|
||||
* In the `buildPhase`, it calls `${python.pythonForBuild.interpreter} setup.py bdist_wheel` to
|
||||
build a wheel binary zipfile.
|
||||
* In the `installPhase`, it installs the wheel file using `pip install *.whl`.
|
||||
* In the `postFixup` phase, the `wrapPythonPrograms` bash function is called to
|
||||
|
@ -1546,7 +1546,7 @@ of such package using the feature is `pkgs/tools/X11/xpra/default.nix`.
|
|||
As workaround install it as an extra `preInstall` step:
|
||||
|
||||
```shell
|
||||
${python.interpreter} setup.py install_data --install-dir=$out --root=$out
|
||||
${python.pythonForBuild.interpreter} setup.py install_data --install-dir=$out --root=$out
|
||||
sed -i '/ = data\_files/d' setup.py
|
||||
```
|
||||
|
||||
|
@ -1821,6 +1821,11 @@ hosted on GitHub, exporting a `GITHUB_API_TOKEN` is highly recommended.
|
|||
Updating packages in bulk leads to lots of breakages, which is why a
|
||||
stabilization period on the `python-unstable` branch is required.
|
||||
|
||||
If a package is fragile and often breaks during these bulks updates, it
|
||||
may be reasonable to set `passthru.skipBulkUpdate = true` in the
|
||||
derivation. This decision should not be made on a whim and should
|
||||
always be supported by a qualifying comment.
|
||||
|
||||
Once the branch is sufficiently stable it should normally be merged
|
||||
into the `staging` branch.
|
||||
|
||||
|
|
|
@ -1329,7 +1329,7 @@ bin/blib.a(bios_console.o): In function `bios_handle_cup':
|
|||
|
||||
Adds the `-O2 -D_FORTIFY_SOURCE=2` compiler options. During code generation the compiler knows a great deal of information about buffer sizes (where possible), and attempts to replace insecure unlimited length buffer function calls with length-limited ones. This is especially useful for old, crufty code. Additionally, format strings in writable memory that contain `%n` are blocked. If an application depends on such a format string, it will need to be worked around.
|
||||
|
||||
Additionally, some warnings are enabled which might trigger build failures if compiler warnings are treated as errors in the package build. In this case, set `NIX_CFLAGS_COMPILE` to `-Wno-error=warning-type`.
|
||||
Additionally, some warnings are enabled which might trigger build failures if compiler warnings are treated as errors in the package build. In this case, set `env.NIX_CFLAGS_COMPILE` to `-Wno-error=warning-type`.
|
||||
|
||||
This needs to be turned off or fixed for errors similar to:
|
||||
|
||||
|
|
5
third_party/nixpkgs/lib/ascii-table.nix
vendored
5
third_party/nixpkgs/lib/ascii-table.nix
vendored
|
@ -1,4 +1,7 @@
|
|||
{ " " = 32;
|
||||
{ "\t" = 9;
|
||||
"\n" = 10;
|
||||
"\r" = 13;
|
||||
" " = 32;
|
||||
"!" = 33;
|
||||
"\"" = 34;
|
||||
"#" = 35;
|
||||
|
|
7
third_party/nixpkgs/lib/customisation.nix
vendored
7
third_party/nixpkgs/lib/customisation.nix
vendored
|
@ -213,6 +213,13 @@ rec {
|
|||
outputSpecified = true;
|
||||
drvPath = assert condition; drv.${outputName}.drvPath;
|
||||
outPath = assert condition; drv.${outputName}.outPath;
|
||||
} //
|
||||
# TODO: give the derivation control over the outputs.
|
||||
# `overrideAttrs` may not be the only attribute that needs
|
||||
# updating when switching outputs.
|
||||
lib.optionalAttrs (passthru?overrideAttrs) {
|
||||
# TODO: also add overrideAttrs when overrideAttrs is not custom, e.g. when not splicing.
|
||||
overrideAttrs = f: (passthru.overrideAttrs f).${outputName};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
64
third_party/nixpkgs/lib/debug.nix
vendored
64
third_party/nixpkgs/lib/debug.nix
vendored
|
@ -109,6 +109,8 @@ rec {
|
|||
traceSeqN 2 { a.b.c = 3; } null
|
||||
trace: { a = { b = {…}; }; }
|
||||
=> null
|
||||
|
||||
Type: traceSeqN :: Int -> a -> b -> b
|
||||
*/
|
||||
traceSeqN = depth: x: y:
|
||||
let snip = v: if isList v then noQuotes "[…]" v
|
||||
|
@ -173,17 +175,63 @@ rec {
|
|||
|
||||
# -- TESTING --
|
||||
|
||||
/* Evaluate a set of tests. A test is an attribute set `{expr,
|
||||
expected}`, denoting an expression and its expected result. The
|
||||
result is a list of failed tests, each represented as `{name,
|
||||
expected, actual}`, denoting the attribute name of the failing
|
||||
test and its expected and actual results.
|
||||
/* Evaluates a set of tests.
|
||||
|
||||
A test is an attribute set `{expr, expected}`,
|
||||
denoting an expression and its expected result.
|
||||
|
||||
The result is a `list` of __failed tests__, each represented as
|
||||
`{name, expected, result}`,
|
||||
|
||||
- expected
|
||||
- What was passed as `expected`
|
||||
- result
|
||||
- The actual `result` of the test
|
||||
|
||||
Used for regression testing of the functions in lib; see
|
||||
tests.nix for an example. Only tests having names starting with
|
||||
"test" are run.
|
||||
tests.nix for more examples.
|
||||
|
||||
Add attr { tests = ["testName"]; } to run these tests only.
|
||||
Important: Only attributes that start with `test` are executed.
|
||||
|
||||
- If you want to run only a subset of the tests add the attribute `tests = ["testName"];`
|
||||
|
||||
Example:
|
||||
|
||||
runTests {
|
||||
testAndOk = {
|
||||
expr = lib.and true false;
|
||||
expected = false;
|
||||
};
|
||||
testAndFail = {
|
||||
expr = lib.and true false;
|
||||
expected = true;
|
||||
};
|
||||
}
|
||||
->
|
||||
[
|
||||
{
|
||||
name = "testAndFail";
|
||||
expected = true;
|
||||
result = false;
|
||||
}
|
||||
]
|
||||
|
||||
Type:
|
||||
runTests :: {
|
||||
tests = [ String ];
|
||||
${testName} :: {
|
||||
expr :: a;
|
||||
expected :: a;
|
||||
};
|
||||
}
|
||||
->
|
||||
[
|
||||
{
|
||||
name :: String;
|
||||
expected :: a;
|
||||
result :: a;
|
||||
}
|
||||
]
|
||||
*/
|
||||
runTests =
|
||||
# Tests to run
|
||||
|
|
2
third_party/nixpkgs/lib/default.nix
vendored
2
third_party/nixpkgs/lib/default.nix
vendored
|
@ -100,7 +100,7 @@ let
|
|||
escapeShellArg escapeShellArgs
|
||||
isStorePath isStringLike
|
||||
isValidPosixName toShellVar toShellVars
|
||||
escapeRegex escapeXML replaceChars lowerChars
|
||||
escapeRegex escapeURL escapeXML replaceChars lowerChars
|
||||
upperChars toLower toUpper addContextFrom splitString
|
||||
removePrefix removeSuffix versionOlder versionAtLeast
|
||||
getName getVersion
|
||||
|
|
10
third_party/nixpkgs/lib/licenses.nix
vendored
10
third_party/nixpkgs/lib/licenses.nix
vendored
|
@ -109,6 +109,11 @@ in mkLicense lset) ({
|
|||
fullName = "Apache License 2.0";
|
||||
};
|
||||
|
||||
asl20-llvm = {
|
||||
spdxId = "Apache-2.0 WITH LLVM-exception";
|
||||
fullName = "Apache License 2.0 with LLVM Exceptions";
|
||||
};
|
||||
|
||||
bitstreamVera = {
|
||||
spdxId = "Bitstream-Vera";
|
||||
fullName = "Bitstream Vera Font License";
|
||||
|
@ -657,11 +662,6 @@ in mkLicense lset) ({
|
|||
url = "https://opensource.franz.com/preamble.html";
|
||||
};
|
||||
|
||||
llvm-exception = {
|
||||
spdxId = "LLVM-exception";
|
||||
fullName = "LLVM Exception"; # LLVM exceptions to the Apache 2.0 License
|
||||
};
|
||||
|
||||
lppl12 = {
|
||||
spdxId = "LPPL-1.2";
|
||||
fullName = "LaTeX Project Public License v1.2";
|
||||
|
|
63
third_party/nixpkgs/lib/options.nix
vendored
63
third_party/nixpkgs/lib/options.nix
vendored
|
@ -36,6 +36,9 @@ let
|
|||
inherit (lib.types)
|
||||
mkOptionType
|
||||
;
|
||||
inherit (lib.lists)
|
||||
last
|
||||
;
|
||||
prioritySuggestion = ''
|
||||
Use `lib.mkForce value` or `lib.mkDefault value` to change the priority on any of these definitions.
|
||||
'';
|
||||
|
@ -107,17 +110,28 @@ rec {
|
|||
/* Creates an Option attribute set for an option that specifies the
|
||||
package a module should use for some purpose.
|
||||
|
||||
The package is specified as a list of strings representing its attribute path in nixpkgs.
|
||||
Type: mkPackageOption :: pkgs -> (string|[string]) ->
|
||||
{ default? :: [string], example? :: null|string|[string], extraDescription? :: string } ->
|
||||
option
|
||||
|
||||
Because of this, you need to pass nixpkgs itself as the first argument.
|
||||
The package is specified in the third argument under `default` as a list of strings
|
||||
representing its attribute path in nixpkgs (or another package set).
|
||||
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
|
||||
|
||||
The second argument is the name of the option, used in the description "The <name> package to use.".
|
||||
The second argument may be either a string or a list of strings.
|
||||
It provides the display name of the package in the description of the generated option
|
||||
(using only the last element if the passed value is a list)
|
||||
and serves as the fallback value for the `default` argument.
|
||||
|
||||
You can also pass an example value, either a literal string or a package's attribute path.
|
||||
To include extra information in the description, pass `extraDescription` to
|
||||
append arbitrary text to the generated description.
|
||||
You can also pass an `example` value, either a literal string or an attribute path.
|
||||
|
||||
You can omit the default path if the name of the option is also attribute path in nixpkgs.
|
||||
The default argument can be omitted if the provided name is
|
||||
an attribute of pkgs (if name is a string) or a
|
||||
valid attribute path in pkgs (if name is a list).
|
||||
|
||||
Type: mkPackageOption :: pkgs -> string -> { default :: [string]; example :: null | string | [string]; } -> option
|
||||
If you wish to explicitly provide no default, pass `null` as `default`.
|
||||
|
||||
Example:
|
||||
mkPackageOption pkgs "hello" { }
|
||||
|
@ -129,27 +143,46 @@ rec {
|
|||
example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
|
||||
}
|
||||
=> { _type = "option"; default = «derivation /nix/store/jxx55cxsjrf8kyh3fp2ya17q99w7541r-ghc-8.10.7.drv»; defaultText = { ... }; description = "The GHC package to use."; example = { ... }; type = { ... }; }
|
||||
|
||||
Example:
|
||||
mkPackageOption pkgs [ "python39Packages" "pytorch" ] {
|
||||
extraDescription = "This is an example and doesn't actually do anything.";
|
||||
}
|
||||
=> { _type = "option"; default = «derivation /nix/store/gvqgsnc4fif9whvwd9ppa568yxbkmvk8-python3.9-pytorch-1.10.2.drv»; defaultText = { ... }; description = "The pytorch package to use. This is an example and doesn't actually do anything."; type = { ... }; }
|
||||
|
||||
*/
|
||||
mkPackageOption =
|
||||
# Package set (a specific version of nixpkgs)
|
||||
# Package set (a specific version of nixpkgs or a subset)
|
||||
pkgs:
|
||||
# Name for the package, shown in option description
|
||||
name:
|
||||
{ default ? [ name ], example ? null }:
|
||||
let default' = if !isList default then [ default ] else default;
|
||||
{
|
||||
# The attribute path where the default package is located
|
||||
default ? name,
|
||||
# A string or an attribute path to use as an example
|
||||
example ? null,
|
||||
# Additional text to include in the option description
|
||||
extraDescription ? "",
|
||||
}:
|
||||
let
|
||||
name' = if isList name then last name else name;
|
||||
default' = if isList default then default else [ default ];
|
||||
defaultPath = concatStringsSep "." default';
|
||||
defaultValue = attrByPath default'
|
||||
(throw "${defaultPath} cannot be found in pkgs") pkgs;
|
||||
in mkOption {
|
||||
defaultText = literalExpression ("pkgs." + defaultPath);
|
||||
type = lib.types.package;
|
||||
description = "The ${name} package to use.";
|
||||
default = attrByPath default'
|
||||
(throw "${concatStringsSep "." default'} cannot be found in pkgs") pkgs;
|
||||
defaultText = literalExpression ("pkgs." + concatStringsSep "." default');
|
||||
description = "The ${name'} package to use."
|
||||
+ (if extraDescription == "" then "" else " ") + extraDescription;
|
||||
${if default != null then "default" else null} = defaultValue;
|
||||
${if example != null then "example" else null} = literalExpression
|
||||
(if isList example then "pkgs." + concatStringsSep "." example else example);
|
||||
};
|
||||
|
||||
/* Like mkPackageOption, but emit an mdDoc description instead of DocBook. */
|
||||
mkPackageOptionMD = args: name: extra:
|
||||
let option = mkPackageOption args name extra;
|
||||
mkPackageOptionMD = pkgs: name: extra:
|
||||
let option = mkPackageOption pkgs name extra;
|
||||
in option // { description = lib.mdDoc option.description; };
|
||||
|
||||
/* This option accepts anything, but it does not produce any result.
|
||||
|
|
21
third_party/nixpkgs/lib/strings.nix
vendored
21
third_party/nixpkgs/lib/strings.nix
vendored
|
@ -34,6 +34,8 @@ rec {
|
|||
unsafeDiscardStringContext
|
||||
;
|
||||
|
||||
asciiTable = import ./ascii-table.nix;
|
||||
|
||||
/* Concatenate a list of strings.
|
||||
|
||||
Type: concatStrings :: [string] -> string
|
||||
|
@ -327,9 +329,7 @@ rec {
|
|||
=> 40
|
||||
|
||||
*/
|
||||
charToInt = let
|
||||
table = import ./ascii-table.nix;
|
||||
in c: builtins.getAttr c table;
|
||||
charToInt = c: builtins.getAttr c asciiTable;
|
||||
|
||||
/* Escape occurrence of the elements of `list` in `string` by
|
||||
prefixing it with a backslash.
|
||||
|
@ -355,6 +355,21 @@ rec {
|
|||
*/
|
||||
escapeC = list: replaceStrings list (map (c: "\\x${ toLower (lib.toHexString (charToInt c))}") list);
|
||||
|
||||
/* Escape the string so it can be safely placed inside a URL
|
||||
query.
|
||||
|
||||
Type: escapeURL :: string -> string
|
||||
|
||||
Example:
|
||||
escapeURL "foo/bar baz"
|
||||
=> "foo%2Fbar%20baz"
|
||||
*/
|
||||
escapeURL = let
|
||||
unreserved = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
|
||||
toEscape = builtins.removeAttrs asciiTable unreserved;
|
||||
in
|
||||
replaceStrings (builtins.attrNames toEscape) (lib.mapAttrsToList (_: c: "%${fixedWidthString 2 "0" (lib.toHexString c)}") toEscape);
|
||||
|
||||
/* Quote string to be used safely within the Bourne shell.
|
||||
|
||||
Type: escapeShellArg :: string -> string
|
||||
|
|
9
third_party/nixpkgs/lib/tests/misc.nix
vendored
9
third_party/nixpkgs/lib/tests/misc.nix
vendored
|
@ -347,6 +347,15 @@ runTests {
|
|||
expected = "Hello\\x20World";
|
||||
};
|
||||
|
||||
testEscapeURL = testAllTrue [
|
||||
("" == strings.escapeURL "")
|
||||
("Hello" == strings.escapeURL "Hello")
|
||||
("Hello%20World" == strings.escapeURL "Hello World")
|
||||
("Hello%2FWorld" == strings.escapeURL "Hello/World")
|
||||
("42%25" == strings.escapeURL "42%")
|
||||
("%20%3F%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%09%3A%2F%40%24%27%28%29%2A%2C%3B" == strings.escapeURL " ?&=#+%!<>#\"{}|\\^[]`\t:/@$'()*,;")
|
||||
];
|
||||
|
||||
testToInt = testAllTrue [
|
||||
# Naive
|
||||
(123 == toInt "123")
|
||||
|
|
110
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
110
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
|
@ -2442,6 +2442,12 @@
|
|||
githubId = 5394722;
|
||||
name = "Spencer Baugh";
|
||||
};
|
||||
cathalmullan = {
|
||||
email = "contact@cathal.dev";
|
||||
github = "CathalMullan";
|
||||
githubId = 37139470;
|
||||
name = "Cathal Mullan";
|
||||
};
|
||||
catouc = {
|
||||
email = "catouc@philipp.boeschen.me";
|
||||
github = "catouc";
|
||||
|
@ -2872,6 +2878,13 @@
|
|||
githubId = 718298;
|
||||
name = "Michael Livshin";
|
||||
};
|
||||
CobaltCause = {
|
||||
name = "Charles Hall";
|
||||
email = "charles@computer.surgery";
|
||||
github = "CobaltCause";
|
||||
githubId = 7003738;
|
||||
matrix = "@charles:computer.surgery";
|
||||
};
|
||||
cobbal = {
|
||||
email = "andrew.cobb@gmail.com";
|
||||
github = "cobbal";
|
||||
|
@ -3588,6 +3601,13 @@
|
|||
githubId = 62989;
|
||||
name = "Demyan Rogozhin";
|
||||
};
|
||||
dennajort = {
|
||||
email = "gosselinjb@gmail.com";
|
||||
matrix = "@dennajort:matrix.org";
|
||||
github = "dennajort";
|
||||
githubId = 1536838;
|
||||
name = "Jean-Baptiste Gosselin";
|
||||
};
|
||||
derchris = {
|
||||
email = "derchris@me.com";
|
||||
github = "derchrisuk";
|
||||
|
@ -3650,6 +3670,12 @@
|
|||
github = "Dettorer";
|
||||
githubId = 2761682;
|
||||
};
|
||||
developer-guy = {
|
||||
name = "Batuhan Apaydın";
|
||||
email = "developerguyn@gmail.com";
|
||||
github = "developer-guy";
|
||||
githubId = 16693043;
|
||||
};
|
||||
devhell = {
|
||||
email = ''"^"@regexmail.net'';
|
||||
github = "devhell";
|
||||
|
@ -4010,6 +4036,11 @@
|
|||
githubId = 1931963;
|
||||
name = "David Sferruzza";
|
||||
};
|
||||
dsymbol = {
|
||||
name = "dsymbol";
|
||||
github = "dsymbol";
|
||||
githubId = 88138099;
|
||||
};
|
||||
dtzWill = {
|
||||
email = "w@wdtz.org";
|
||||
github = "dtzWill";
|
||||
|
@ -6890,6 +6921,12 @@
|
|||
githubId = 10786794;
|
||||
name = "Markus Hihn";
|
||||
};
|
||||
jessemoore = {
|
||||
email = "jesse@jessemoore.dev";
|
||||
github = "jesseDMoore1994";
|
||||
githubId = 30251156;
|
||||
name = "Jesse Moore";
|
||||
};
|
||||
jethro = {
|
||||
email = "jethrokuan95@gmail.com";
|
||||
github = "jethrokuan";
|
||||
|
@ -7175,6 +7212,12 @@
|
|||
github = "joepie91";
|
||||
githubId = 1663259;
|
||||
};
|
||||
joerdav = {
|
||||
email = "joe.davidson.21111@gmail.com";
|
||||
github = "joerdav";
|
||||
name = "Joe Davidson";
|
||||
githubId = 19927761;
|
||||
};
|
||||
joesalisbury = {
|
||||
email = "salisbury.joseph@gmail.com";
|
||||
github = "JosephSalisbury";
|
||||
|
@ -8913,6 +8956,9 @@
|
|||
github = "Ma27";
|
||||
githubId = 6025220;
|
||||
name = "Maximilian Bosch";
|
||||
keys = [{
|
||||
fingerprint = "62B9 9C26 F046 721E 26B0 04F6 D006 A998 C6AB FDF1";
|
||||
}];
|
||||
};
|
||||
ma9e = {
|
||||
email = "sean@lfo.team";
|
||||
|
@ -8990,6 +9036,12 @@
|
|||
githubId = 1238350;
|
||||
name = "Matthias Herrmann";
|
||||
};
|
||||
mahmoudk1000 = {
|
||||
email = "mahmoudk1000@gmail.com";
|
||||
github = "mahmoudk1000";
|
||||
githubId = 24735185;
|
||||
name = "Mahmoud Ayman";
|
||||
};
|
||||
majesticmullet = {
|
||||
email = "hoccthomas@gmail.com.au";
|
||||
github = "MajesticMullet";
|
||||
|
@ -9644,6 +9696,12 @@
|
|||
github = "michaelBelsanti";
|
||||
githubId = 62124625;
|
||||
};
|
||||
michaelgrahamevans = {
|
||||
email = "michaelgrahamevans@gmail.com";
|
||||
name = "Michael Evans";
|
||||
github = "michaelgrahamevans";
|
||||
githubId = 5932424;
|
||||
};
|
||||
michaelpj = {
|
||||
email = "michaelpj@gmail.com";
|
||||
github = "michaelpj";
|
||||
|
@ -10095,6 +10153,12 @@
|
|||
githubId = 3073833;
|
||||
name = "Massimo Redaelli";
|
||||
};
|
||||
mrfreezeex = {
|
||||
email = "arthur@cri.epita.fr";
|
||||
github = "MrFreezeex";
|
||||
name = "Arthur Outhenin-Chalandre";
|
||||
githubId = 3845213;
|
||||
};
|
||||
mrityunjaygr8 = {
|
||||
email = "mrityunjaysaxena1996@gmail.com";
|
||||
github = "mrityunjaygr8";
|
||||
|
@ -11623,6 +11687,12 @@
|
|||
fingerprint = "A3A3 65AE 16ED A7A0 C29C 88F1 9712 452E 8BE3 372E";
|
||||
}];
|
||||
};
|
||||
pinkcreeper100 = {
|
||||
email = "benmoreosm@gmail.com";
|
||||
github = "pinkcreeper100";
|
||||
githubId = 35699052;
|
||||
name = "Oliver Samuel Morris";
|
||||
};
|
||||
pinpox = {
|
||||
email = "mail@pablo.tools";
|
||||
github = "pinpox";
|
||||
|
@ -11878,6 +11948,12 @@
|
|||
githubId = 146413;
|
||||
name = "Tobias Poschwatta";
|
||||
};
|
||||
PowerUser64 = {
|
||||
email = "blakelysnorth@gmail.com";
|
||||
github = "PowerUser64";
|
||||
githubId = 24578572;
|
||||
name = "Blake North";
|
||||
};
|
||||
ppenguin = {
|
||||
name = "Jeroen Versteeg";
|
||||
email = "hieronymusv@gmail.com";
|
||||
|
@ -12168,6 +12244,7 @@
|
|||
github = "alyssais";
|
||||
githubId = 2768870;
|
||||
name = "Alyssa Ross";
|
||||
matrix = "@qyliss:fairydust.space";
|
||||
keys = [{
|
||||
fingerprint = "7573 56D7 79BB B888 773E 415E 736C CDF9 EF51 BD97";
|
||||
}];
|
||||
|
@ -12822,6 +12899,7 @@
|
|||
email = "rrbutani+nix@gmail.com";
|
||||
github = "rrbutani";
|
||||
githubId = 7833358;
|
||||
matrix = "@rbutani:matrix.org";
|
||||
keys = [{
|
||||
fingerprint = "7DCA 5615 8AB2 621F 2F32 9FF4 1C7C E491 479F A273";
|
||||
}];
|
||||
|
@ -12857,6 +12935,12 @@
|
|||
githubId = 61306;
|
||||
name = "Rene Treffer";
|
||||
};
|
||||
rubyowo = {
|
||||
name = "Rei Star";
|
||||
email = "perhaps-you-know@what-is.ml";
|
||||
github = "rubyowo";
|
||||
githubId = 105302757;
|
||||
};
|
||||
rumpelsepp = {
|
||||
name = "Stefan Tatschner";
|
||||
email = "stefan@rumpelsepp.org";
|
||||
|
@ -14802,6 +14886,12 @@
|
|||
githubId = 1634990;
|
||||
name = "Tom McLaughlin";
|
||||
};
|
||||
thornycrackers = {
|
||||
email = "codyfh@gmail.com";
|
||||
github = "thornycrackers";
|
||||
githubId = 4313010;
|
||||
name = "Cody Hiar";
|
||||
};
|
||||
thoughtpolice = {
|
||||
email = "aseipp@pobox.com";
|
||||
github = "thoughtpolice";
|
||||
|
@ -15312,6 +15402,11 @@
|
|||
github = "unrooted";
|
||||
githubId = 30440603;
|
||||
};
|
||||
unsolvedcypher = {
|
||||
name = "Matthew M";
|
||||
github = "UnsolvedCypher";
|
||||
githubId = 3170853;
|
||||
};
|
||||
uralbash = {
|
||||
email = "root@uralbash.ru";
|
||||
github = "uralbash";
|
||||
|
@ -15892,6 +15987,15 @@
|
|||
fingerprint = "DA03 D6C6 3F58 E796 AD26 E99B 366A 2940 479A 06FC";
|
||||
}];
|
||||
};
|
||||
williamvds = {
|
||||
email = "nixpkgs@williamvds.me";
|
||||
github = "williamvds";
|
||||
githubId = 26379999;
|
||||
name = "William Vigolo";
|
||||
keys = [{
|
||||
fingerprint = "9848 B216 BCBE 29BB 1C6A E0D5 7A4D F5A8 CDBD 49C7";
|
||||
}];
|
||||
};
|
||||
willibutz = {
|
||||
email = "willibutz@posteo.de";
|
||||
github = "WilliButz";
|
||||
|
@ -16016,6 +16120,12 @@
|
|||
github = "wr0belj";
|
||||
githubId = 40501814;
|
||||
};
|
||||
wraithm = {
|
||||
name = "Matthew Wraith";
|
||||
email = "wraithm@gmail.com";
|
||||
github = "wraithm";
|
||||
githubId = 1512913;
|
||||
};
|
||||
wrmilling = {
|
||||
name = "Winston R. Milling";
|
||||
email = "Winston@Milli.ng";
|
||||
|
|
|
@ -26,6 +26,7 @@ Because step 1) is quite expensive and takes roughly ~5 minutes the result is ca
|
|||
{-# LANGUAGE TupleSections #-}
|
||||
{-# LANGUAGE ViewPatterns #-}
|
||||
{-# OPTIONS_GHC -Wall #-}
|
||||
{-# LANGUAGE DataKinds #-}
|
||||
|
||||
import Control.Monad (forM_, (<=<))
|
||||
import Control.Monad.Trans (MonadIO (liftIO))
|
||||
|
@ -55,7 +56,12 @@ import Data.Time.Clock (UTCTime)
|
|||
import GHC.Generics (Generic)
|
||||
import Network.HTTP.Req (
|
||||
GET (GET),
|
||||
HttpResponse (HttpResponseBody),
|
||||
NoReqBody (NoReqBody),
|
||||
Option,
|
||||
Req,
|
||||
Scheme (Https),
|
||||
bsResponse,
|
||||
defaultHttpConfig,
|
||||
header,
|
||||
https,
|
||||
|
@ -76,6 +82,10 @@ import Control.Exception (evaluate)
|
|||
import qualified Data.IntMap.Strict as IntMap
|
||||
import qualified Data.IntSet as IntSet
|
||||
import Data.Bifunctor (second)
|
||||
import Data.Data (Proxy)
|
||||
import Data.ByteString (ByteString)
|
||||
import qualified Data.ByteString.Char8 as ByteString
|
||||
import Distribution.Simple.Utils (safeLast, fromUTF8BS)
|
||||
|
||||
newtype JobsetEvals = JobsetEvals
|
||||
{ evals :: Seq Eval
|
||||
|
@ -123,17 +133,31 @@ showT = Text.pack . show
|
|||
|
||||
getBuildReports :: IO ()
|
||||
getBuildReports = runReq defaultHttpConfig do
|
||||
evalMay <- Seq.lookup 0 . evals <$> myReq (https "hydra.nixos.org" /: "jobset" /: "nixpkgs" /: "haskell-updates" /: "evals") mempty
|
||||
evalMay <- Seq.lookup 0 . evals <$> hydraJSONQuery mempty ["jobset", "nixpkgs", "haskell-updates", "evals"]
|
||||
eval@Eval{id} <- maybe (liftIO $ fail "No Evalution found") pure evalMay
|
||||
liftIO . putStrLn $ "Fetching evaluation " <> show id <> " from Hydra. This might take a few minutes..."
|
||||
buildReports :: Seq Build <- myReq (https "hydra.nixos.org" /: "eval" /: showT id /: "builds") (responseTimeout 600000000)
|
||||
buildReports :: Seq Build <- hydraJSONQuery (responseTimeout 600000000) ["eval", showT id, "builds"]
|
||||
liftIO do
|
||||
fileName <- reportFileName
|
||||
putStrLn $ "Finished fetching all builds from Hydra, saving report as " <> fileName
|
||||
now <- getCurrentTime
|
||||
encodeFile fileName (eval, now, buildReports)
|
||||
where
|
||||
myReq query option = responseBody <$> req GET query NoReqBody jsonResponse (header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
|
||||
|
||||
hydraQuery :: HttpResponse a => Proxy a -> Option 'Https -> [Text] -> Req (HttpResponseBody a)
|
||||
hydraQuery responseType option query =
|
||||
responseBody
|
||||
<$> req
|
||||
GET
|
||||
(foldl' (/:) (https "hydra.nixos.org") query)
|
||||
NoReqBody
|
||||
responseType
|
||||
(header "User-Agent" "hydra-report.hs/v1 (nixpkgs;maintainers/scripts/haskell)" <> option)
|
||||
|
||||
hydraJSONQuery :: FromJSON a => Option 'Https -> [Text] -> Req a
|
||||
hydraJSONQuery = hydraQuery jsonResponse
|
||||
|
||||
hydraPlainQuery :: [Text] -> Req ByteString
|
||||
hydraPlainQuery = hydraQuery bsResponse mempty
|
||||
|
||||
hydraEvalCommand :: FilePath
|
||||
hydraEvalCommand = "hydra-eval-jobs"
|
||||
|
@ -326,14 +350,8 @@ instance Functor (Table row col) where
|
|||
instance Foldable (Table row col) where
|
||||
foldMap f (Table a) = foldMap f a
|
||||
|
||||
buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary
|
||||
buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary
|
||||
where
|
||||
unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru)
|
||||
toSummary Build{finished, buildstatus, job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult state id))) maintainers reverseDeps unbrokenReverseDeps)
|
||||
where
|
||||
state :: BuildState
|
||||
state = case (finished, buildstatus) of
|
||||
getBuildState :: Build -> BuildState
|
||||
getBuildState Build{finished, buildstatus} = case (finished, buildstatus) of
|
||||
(0, _) -> Unfinished
|
||||
(_, Just 0) -> Success
|
||||
(_, Just 1) -> Failed
|
||||
|
@ -343,6 +361,13 @@ buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSumm
|
|||
(_, Just 7) -> TimedOut
|
||||
(_, Just 11) -> OutputLimitExceeded
|
||||
(_, i) -> Unknown i
|
||||
|
||||
buildSummary :: MaintainerMap -> ReverseDependencyMap -> Seq Build -> StatusSummary
|
||||
buildSummary maintainerMap reverseDependencyMap = foldl (Map.unionWith unionSummary) Map.empty . fmap toSummary
|
||||
where
|
||||
unionSummary (SummaryEntry (Table lb) lm lr lu) (SummaryEntry (Table rb) rm rr ru) = SummaryEntry (Table $ Map.union lb rb) (lm <> rm) (max lr rr) (max lu ru)
|
||||
toSummary build@Build{job, id, system} = Map.singleton name (SummaryEntry (Table (Map.singleton (set, Platform system) (BuildResult (getBuildState build) id))) maintainers reverseDeps unbrokenReverseDeps)
|
||||
where
|
||||
packageName = fromMaybe job (Text.stripSuffix ("." <> system) job)
|
||||
splitted = nonEmpty $ Text.splitOn "." packageName
|
||||
name = maybe packageName NonEmpty.last splitted
|
||||
|
@ -486,8 +511,23 @@ printMaintainerPing = do
|
|||
|
||||
printMarkBrokenList :: IO ()
|
||||
printMarkBrokenList = do
|
||||
(_, _, buildReport) <- readBuildReports
|
||||
forM_ buildReport \Build{buildstatus, job} ->
|
||||
case (buildstatus, Text.splitOn "." job) of
|
||||
(Just 1, ["haskellPackages", name, "x86_64-linux"]) -> putStrLn $ " - " <> Text.unpack name
|
||||
(_, fetchTime, buildReport) <- readBuildReports
|
||||
runReq defaultHttpConfig $ forM_ buildReport \build@Build{job, id} ->
|
||||
case (getBuildState build, Text.splitOn "." job) of
|
||||
(Failed, ["haskellPackages", name, "x86_64-linux"]) -> do
|
||||
-- Fetch build log from hydra to figure out the cause of the error.
|
||||
build_log <- ByteString.lines <$> hydraPlainQuery ["build", showT id, "nixlog", "1", "raw"]
|
||||
-- We use the last probable error cause found in the build log file.
|
||||
let error_message = fromMaybe " failure " $ safeLast $ mapMaybe probableErrorCause build_log
|
||||
liftIO $ putStrLn $ " - " <> Text.unpack name <> " # " <> error_message <> " in job https://hydra.nixos.org/build/" <> show id <> " at " <> formatTime defaultTimeLocale "%Y-%m-%d" fetchTime
|
||||
_ -> pure ()
|
||||
|
||||
{- | This function receives a line from a Nix Haskell builder build log and returns a possible error cause.
|
||||
| We might need to add other causes in the future if errors happen in unusual parts of the builder.
|
||||
-}
|
||||
probableErrorCause :: ByteString -> Maybe String
|
||||
probableErrorCause "Setup: Encountered missing or private dependencies:" = Just "dependency missing"
|
||||
probableErrorCause "running tests" = Just "test failure"
|
||||
probableErrorCause build_line | ByteString.isPrefixOf "Building" build_line = Just ("failure building " <> fromUTF8BS (fst $ ByteString.breakSubstring " for" $ ByteString.drop 9 build_line))
|
||||
probableErrorCause build_line | ByteString.isSuffixOf "Phase" build_line = Just ("failure in " <> fromUTF8BS build_line)
|
||||
probableErrorCause _ = Nothing
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
# Related scripts are update-hackage.sh, for updating the snapshot of the
|
||||
# Hackage database used by hackage2nix, and update-cabal2nix-unstable.sh,
|
||||
# for updating the version of hackage2nix used to perform this task.
|
||||
#
|
||||
# Note that this script doesn't gcroot anything, so it may be broken by an
|
||||
# unfortunately timed nix-store --gc.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
|
@ -20,15 +23,21 @@ HACKAGE2NIX="${HACKAGE2NIX:-hackage2nix}"
|
|||
# See: https://github.com/NixOS/nixpkgs/pull/122023
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
|
||||
|
||||
echo "Obtaining Hackage data"
|
||||
extraction_derivation='with import ./. {}; runCommandLocal "unpacked-cabal-hashes" { } "tar xf ${all-cabal-hashes} --strip-components=1 --one-top-level=$out"'
|
||||
unpacked_hackage="$(nix-build -E "$extraction_derivation" --no-out-link)"
|
||||
config_dir=pkgs/development/haskell-modules/configuration-hackage2nix
|
||||
|
||||
echo "Generating compiler configuration"
|
||||
compiler_config="$(nix-build -A haskellPackages.cabal2nix-unstable.compilerConfig --no-out-link)"
|
||||
|
||||
echo "Starting hackage2nix to regenerate pkgs/development/haskell-modules/hackage-packages.nix ..."
|
||||
"$HACKAGE2NIX" \
|
||||
--hackage "$unpacked_hackage" \
|
||||
--preferred-versions <(for n in "$unpacked_hackage"/*/preferred-versions; do cat "$n"; echo; done) \
|
||||
--nixpkgs "$PWD" \
|
||||
--config "$compiler_config" \
|
||||
--config "$config_dir/main.yaml" \
|
||||
--config "$config_dir/stackage.yaml" \
|
||||
--config "$config_dir/broken.yaml" \
|
||||
|
|
|
@ -32,7 +32,7 @@ lmpfrlib,,,,,5.3,alexshpilkin
|
|||
loadkit,,,,,,alerque
|
||||
lpeg,,,,,,vyp
|
||||
lpeg_patterns,,,,,,
|
||||
lpeglabel,,,,,,
|
||||
lpeglabel,,,,1.6.0,,
|
||||
lpty,,,,,,
|
||||
lrexlib-gnu,,,,,,
|
||||
lrexlib-pcre,,,,,,vyp
|
||||
|
|
|
|
@ -1,5 +1,3 @@
|
|||
#!/bin/sh
|
||||
build=`nix-build -E "with import (fetchTarball "channel:nixpkgs-unstable") {}; python3.withPackages(ps: with ps; [ packaging requests toolz ])"`
|
||||
python=${build}/bin/python
|
||||
exec ${python} pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py $@
|
||||
|
||||
#!/usr/bin/env nix-shell
|
||||
#!nix-shell -I nixpkgs=channel:nixpkgs-unstable -i bash -p "python3.withPackages (ps: with ps; [ packaging requests ])" -p nix-prefetch-git
|
||||
exec python3 pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py $@
|
||||
|
|
|
@ -1,3 +1,10 @@
|
|||
/*
|
||||
To run:
|
||||
|
||||
nix-shell maintainers/scripts/update.nix
|
||||
|
||||
See https://nixos.org/manual/nixpkgs/unstable/#var-passthru-updateScript
|
||||
*/
|
||||
{ package ? null
|
||||
, maintainer ? null
|
||||
, predicate ? null
|
||||
|
@ -8,8 +15,6 @@
|
|||
, commit ? null
|
||||
}:
|
||||
|
||||
# TODO: add assert statements
|
||||
|
||||
let
|
||||
pkgs = import ./../../default.nix (
|
||||
if include-overlays == false then
|
||||
|
|
|
@ -81,6 +81,7 @@ with lib.maintainers; {
|
|||
# Verify additions to this team with at least one already existing member of the team.
|
||||
members = [
|
||||
cdepillabout
|
||||
wraithm
|
||||
];
|
||||
scope = "Group registration for packages maintained by Bitnomial.";
|
||||
shortName = "Bitnomial employees";
|
||||
|
|
52
third_party/nixpkgs/nixos/doc/manual/default.nix
vendored
52
third_party/nixpkgs/nixos/doc/manual/default.nix
vendored
|
@ -135,12 +135,7 @@ let
|
|||
}
|
||||
'';
|
||||
|
||||
manual-combined = runCommand "nixos-manual-combined"
|
||||
{ inputs = lib.sourceFilesBySuffices ./. [ ".xml" ".md" ];
|
||||
nativeBuildInputs = [ pkgs.nixos-render-docs pkgs.libxml2.bin pkgs.libxslt.bin ];
|
||||
meta.description = "The NixOS manual as plain docbook XML";
|
||||
}
|
||||
''
|
||||
prepareManualFromMD = ''
|
||||
cp -r --no-preserve=all $inputs/* .
|
||||
|
||||
substituteInPlace ./manual.md \
|
||||
|
@ -157,6 +152,15 @@ let
|
|||
--replace \
|
||||
'@NIXOS_TEST_OPTIONS_JSON@' \
|
||||
${testOptionsDoc.optionsJSON}/share/doc/nixos/options.json
|
||||
'';
|
||||
|
||||
manual-combined = runCommand "nixos-manual-combined"
|
||||
{ inputs = lib.sourceFilesBySuffices ./. [ ".xml" ".md" ];
|
||||
nativeBuildInputs = [ pkgs.nixos-render-docs pkgs.libxml2.bin pkgs.libxslt.bin ];
|
||||
meta.description = "The NixOS manual as plain docbook XML";
|
||||
}
|
||||
''
|
||||
${prepareManualFromMD}
|
||||
|
||||
nixos-render-docs -j $NIX_BUILD_CORES manual docbook \
|
||||
--manpage-urls ${manpageUrls} \
|
||||
|
@ -193,7 +197,14 @@ in rec {
|
|||
|
||||
# Generate the NixOS manual.
|
||||
manualHTML = runCommand "nixos-manual-html"
|
||||
{ nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin ];
|
||||
{ nativeBuildInputs =
|
||||
if allowDocBook then [
|
||||
buildPackages.libxml2.bin
|
||||
buildPackages.libxslt.bin
|
||||
] else [
|
||||
buildPackages.nixos-render-docs
|
||||
];
|
||||
inputs = lib.optionals (! allowDocBook) (lib.sourceFilesBySuffices ./. [ ".md" ]);
|
||||
meta.description = "The NixOS manual in HTML format";
|
||||
allowedReferences = ["out"];
|
||||
}
|
||||
|
@ -201,6 +212,12 @@ in rec {
|
|||
# Generate the HTML manual.
|
||||
dst=$out/share/doc/nixos
|
||||
mkdir -p $dst
|
||||
|
||||
cp ${../../../doc/style.css} $dst/style.css
|
||||
cp ${../../../doc/overrides.css} $dst/overrides.css
|
||||
cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
|
||||
|
||||
${if allowDocBook then ''
|
||||
xsltproc \
|
||||
${manualXsltprocOptions} \
|
||||
--stringparam id.warnings "1" \
|
||||
|
@ -213,10 +230,25 @@ in rec {
|
|||
|
||||
mkdir -p $dst/images/callouts
|
||||
cp ${docbook_xsl_ns}/xml/xsl/docbook/images/callouts/*.svg $dst/images/callouts/
|
||||
'' else ''
|
||||
${prepareManualFromMD}
|
||||
|
||||
cp ${../../../doc/style.css} $dst/style.css
|
||||
cp ${../../../doc/overrides.css} $dst/overrides.css
|
||||
cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
|
||||
# TODO generator is set like this because the docbook/md manual compare workflow will
|
||||
# trigger if it's different
|
||||
nixos-render-docs -j $NIX_BUILD_CORES manual html \
|
||||
--manpage-urls ${manpageUrls} \
|
||||
--revision ${lib.escapeShellArg revision} \
|
||||
--generator "DocBook XSL Stylesheets V${docbook_xsl_ns.version}" \
|
||||
--stylesheet style.css \
|
||||
--stylesheet overrides.css \
|
||||
--stylesheet highlightjs/mono-blue.css \
|
||||
--script ./highlightjs/highlight.pack.js \
|
||||
--script ./highlightjs/loader.js \
|
||||
--toc-depth 1 \
|
||||
--chunk-toc-depth 1 \
|
||||
./manual.md \
|
||||
$dst/index.html
|
||||
''}
|
||||
|
||||
mkdir -p $out/nix-support
|
||||
echo "nix-build out $out" >> $out/nix-support/hydra-build-products
|
||||
|
|
|
@ -101,11 +101,24 @@ Creates an Option attribute set for an option that specifies the package a modul
|
|||
|
||||
**Note**: You shouldn’t necessarily make package options for all of your modules. You can always overwrite a specific package throughout nixpkgs by using [nixpkgs overlays](https://nixos.org/manual/nixpkgs/stable/#chap-overlays).
|
||||
|
||||
The default package is specified as a list of strings representing its attribute path in nixpkgs. Because of this, you need to pass nixpkgs itself as the first argument.
|
||||
The package is specified in the third argument under `default` as a list of strings
|
||||
representing its attribute path in nixpkgs (or another package set).
|
||||
Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
|
||||
|
||||
The second argument is the name of the option, used in the description "The \<name\> package to use.". You can also pass an example value, either a literal string or a package's attribute path.
|
||||
The second argument may be either a string or a list of strings.
|
||||
It provides the display name of the package in the description of the generated option
|
||||
(using only the last element if the passed value is a list)
|
||||
and serves as the fallback value for the `default` argument.
|
||||
|
||||
You can omit the default path if the name of the option is also attribute path in nixpkgs.
|
||||
To include extra information in the description, pass `extraDescription` to
|
||||
append arbitrary text to the generated description.
|
||||
You can also pass an `example` value, either a literal string or an attribute path.
|
||||
|
||||
The default argument can be omitted if the provided name is
|
||||
an attribute of pkgs (if name is a string) or a
|
||||
valid attribute path in pkgs (if name is a list).
|
||||
|
||||
If you wish to explicitly provide no default, pass `null` as `default`.
|
||||
|
||||
During the transition to CommonMark documentation `mkPackageOption` creates an option with a DocBook description attribute, once the transition is completed it will create a CommonMark description instead. `mkPackageOptionMD` always creates an option with a CommonMark description attribute and will be removed some time after the transition is completed.
|
||||
|
||||
|
@ -142,6 +155,21 @@ lib.mkOption {
|
|||
```
|
||||
:::
|
||||
|
||||
::: {#ex-options-declarations-util-mkPackageOption-extraDescription .example}
|
||||
```nix
|
||||
mkPackageOption pkgs [ "python39Packages" "pytorch" ] {
|
||||
extraDescription = "This is an example and doesn't actually do anything.";
|
||||
}
|
||||
# is like
|
||||
lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.python39Packages.pytorch;
|
||||
defaultText = lib.literalExpression "pkgs.python39Packages.pytorch";
|
||||
description = "The pytorch package to use. This is an example and doesn't actually do anything.";
|
||||
}
|
||||
```
|
||||
:::
|
||||
|
||||
## Extensible Option Types {#sec-option-declarations-eot}
|
||||
|
||||
Extensible option types is a feature that allow to extend certain types
|
||||
|
|
|
@ -428,7 +428,7 @@ Use the following commands:
|
|||
|
||||
UEFI systems
|
||||
|
||||
: You must select a boot-loader, either system-boot or GRUB. The recommended
|
||||
: You must select a boot-loader, either systemd-boot or GRUB. The recommended
|
||||
option is systemd-boot: set the option [](#opt-boot.loader.systemd-boot.enable)
|
||||
to `true`. `nixos-generate-config` should do this automatically
|
||||
for new configurations when booted in UEFI mode.
|
||||
|
@ -441,10 +441,10 @@ Use the following commands:
|
|||
If you want to use GRUB, set [](#opt-boot.loader.grub.device) to `nodev` and
|
||||
[](#opt-boot.loader.grub.efiSupport) to `true`.
|
||||
|
||||
With system-boot, you should not need any special configuration to detect
|
||||
With systemd-boot, you should not need any special configuration to detect
|
||||
other installed systems. With GRUB, set [](#opt-boot.loader.grub.useOSProber)
|
||||
to `true`, but this will only detect windows partitions, not other linux
|
||||
distributions. If you dual boot another linux distribution, use system-boot
|
||||
to `true`, but this will only detect windows partitions, not other Linux
|
||||
distributions. If you dual boot another Linux distribution, use systemd-boot
|
||||
instead.
|
||||
|
||||
If you need to configure networking for your machine the
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-build-vms \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-build-vms 8
|
||||
.Os NixOS
|
||||
.Dt nixos-build-vms 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-build-vms
|
||||
.Nd build a network of virtual machines from a network of NixOS configurations
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-enter \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-enter 8
|
||||
.Os NixOS
|
||||
.Dt nixos-enter 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-enter
|
||||
.Nd run a command in a NixOS chroot environment
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-generate-config \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-generate-config 8
|
||||
.Os NixOS
|
||||
.Dt nixos-generate-config 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-generate-config
|
||||
.Nd generate NixOS configuration modules
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-install \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-install 8
|
||||
.Os NixOS
|
||||
.Dt nixos-install 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-install
|
||||
.Nd install bootloader and NixOS
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-option \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-option 8
|
||||
.Os NixOS
|
||||
.Dt nixos-option 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-option
|
||||
.Nd inspect a NixOS configuration
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-rebuild \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-rebuild 8
|
||||
.Os NixOS
|
||||
.Dt nixos-rebuild 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-rebuild
|
||||
.Nd reconfigure a NixOS machine
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
.Dd January 1, 1980
|
||||
.\" nixpkgs groff will use Nixpkgs as the OS in the title by default, taking it from
|
||||
.\" doc-default-operating-system. mandoc doesn't have this register set by default,
|
||||
.\" so we can use it as a groff/mandoc switch.
|
||||
.ie ddoc-default-operating-system .Dt nixos-version \&8 "NixOS System Manager's Manual"
|
||||
.el .Dt nixos-version 8
|
||||
.Os NixOS
|
||||
.Dt nixos-version 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm nixos-version
|
||||
.Nd show the NixOS version
|
||||
|
|
|
@ -47,7 +47,10 @@ development/development.md
|
|||
contributing-to-this-manual.chapter.md
|
||||
```
|
||||
|
||||
```{=include=} appendix
|
||||
```{=include=} appendix html:into-file=//options.html
|
||||
nixos-options.md
|
||||
```
|
||||
|
||||
```{=include=} appendix html:into-file=//release-notes.html
|
||||
release-notes/release-notes.md
|
||||
```
|
||||
|
|
|
@ -8,6 +8,10 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
||||
- Core version changes:
|
||||
|
||||
- default linux: 5.15 -\> 6.1, all supported kernels available
|
||||
|
||||
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
|
||||
|
||||
- KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed.
|
||||
|
@ -78,6 +82,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
|
||||
|
||||
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
|
||||
|
||||
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
|
||||
|
||||
- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
|
||||
|
@ -103,7 +109,7 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
|
||||
|
||||
- `teleport` has been upgraded to major version 11. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and [release notes](https://goteleport.com/docs/changelog/#1100).
|
||||
- `teleport` has been upgraded from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
|
||||
|
||||
- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
|
||||
|
||||
|
@ -115,6 +121,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The [services.wordpress.sites.<name>.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.<name>.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
|
||||
|
||||
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
|
||||
|
||||
- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
|
||||
|
||||
- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
|
||||
|
@ -132,12 +140,18 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
[upstream's release notes](https://github.com/iputils/iputils/releases/tag/20221126)
|
||||
for more details and available replacements.
|
||||
|
||||
- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use.
|
||||
|
||||
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
|
||||
|
||||
## Other Notable Changes {#sec-release-23.05-notable-changes}
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
||||
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
|
||||
|
||||
- Pantheon now defaults to Mutter 42 and GNOME settings daemon 42, all Pantheon packages are now tracking elementary OS 7 updates.
|
||||
|
||||
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
|
||||
|
||||
- The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package)
|
||||
|
@ -162,6 +176,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- NixOS now defaults to using nsncd (a non-caching reimplementation in Rust) as NSS lookup dispatcher, instead of the buggy and deprecated glibc-provided nscd. If you need to switch back, set `services.nscd.enableNsncd = false`, but please open an issue in nixpkgs so your issue can be fixed.
|
||||
|
||||
- `services.borgmatic` now allows for multiple configurations, placed in `/etc/borgmatic.d/`, you can define them with `services.borgmatic.configurations`.
|
||||
|
||||
- The `dnsmasq` service now takes configuration via the
|
||||
`services.dnsmasq.settings` attribute set. The option
|
||||
`services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches
|
||||
|
@ -255,8 +271,12 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The `unifi-poller` package and corresponding NixOS module have been renamed to `unpoller` to match upstream.
|
||||
|
||||
- `protonmail-bridge` package has been updated to v3.0 and the CLI executable is now named bridge instead of protonmail-bridge to be more in line with upstream.
|
||||
|
||||
- The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting.
|
||||
|
||||
- `openjdk` from version 11 and above is not build with `openjfx` (i.e.: JavaFX) support by default anymore. You can re-enable it by overriding, e.g.: `openjdk11.override { enableJavaFX = true; };`.
|
||||
|
||||
- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
|
||||
|
||||
- `tvbrowser-bin` was removed, and now `tvbrowser` is built from source.
|
||||
|
@ -266,3 +286,5 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.
|
||||
|
||||
- The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed.
|
||||
|
||||
- `k3s` can now be configured with an EnvironmentFile for its systemd service, allowing secrets to be provided without ending up in the Nix Store.
|
||||
|
|
|
@ -154,6 +154,9 @@ To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$imag
|
|||
, # Shell code executed after the VM has finished.
|
||||
postVM ? ""
|
||||
|
||||
, # Guest memory size
|
||||
memSize ? 1024
|
||||
|
||||
, # Copy the contents of the Nix store to the root of the image and
|
||||
# skip further setup. Incompatible with `contents`,
|
||||
# `installBootLoader` and `configFile`.
|
||||
|
@ -525,7 +528,7 @@ let format' = format; in let
|
|||
"-drive if=pflash,format=raw,unit=1,file=$efiVars"
|
||||
]
|
||||
);
|
||||
memSize = 1024;
|
||||
inherit memSize;
|
||||
} ''
|
||||
export PATH=${binPath}:$PATH
|
||||
|
||||
|
|
|
@ -73,6 +73,9 @@
|
|||
, # Shell code executed after the VM has finished.
|
||||
postVM ? ""
|
||||
|
||||
, # Guest memory size
|
||||
memSize ? 1024
|
||||
|
||||
, name ? "nixos-disk-image"
|
||||
|
||||
, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
|
||||
|
@ -242,6 +245,7 @@ let
|
|||
{
|
||||
QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report"
|
||||
+ " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
|
||||
inherit memSize;
|
||||
preVM = ''
|
||||
PATH=$PATH:${pkgs.qemu_kvm}/bin
|
||||
mkdir $out
|
||||
|
|
|
@ -215,12 +215,14 @@ foreach my $u (@{$spec->{users}}) {
|
|||
} else {
|
||||
$u->{uid} = allocUid($name, $u->{isSystemUser}) if !defined $u->{uid};
|
||||
|
||||
if (!defined $u->{hashedPassword}) {
|
||||
if (defined $u->{initialPassword}) {
|
||||
$u->{hashedPassword} = hashPassword($u->{initialPassword});
|
||||
} elsif (defined $u->{initialHashedPassword}) {
|
||||
$u->{hashedPassword} = $u->{initialHashedPassword};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Ensure home directory incl. ownership and permissions.
|
||||
if ($u->{createHome} and !$is_dry) {
|
||||
|
|
|
@ -273,6 +273,9 @@ let
|
|||
{command}`passwd` command. Otherwise, it's
|
||||
equivalent to setting the {option}`hashedPassword` option.
|
||||
|
||||
Note that the {option}`hashedPassword` option will override
|
||||
this option if both are set.
|
||||
|
||||
${hashedPasswordDescription}
|
||||
'';
|
||||
};
|
||||
|
@ -291,6 +294,9 @@ let
|
|||
is world-readable in the Nix store, so it should only be
|
||||
used for guest accounts or passwords that will be changed
|
||||
promptly.
|
||||
|
||||
Note that the {option}`password` option will override this
|
||||
option if both are set.
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
16
third_party/nixpkgs/nixos/modules/hardware/keyboard/qmk.nix
vendored
Normal file
16
third_party/nixpkgs/nixos/modules/hardware/keyboard/qmk.nix
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.hardware.keyboard.qmk;
|
||||
inherit (lib) mdDoc mkEnableOption mkIf;
|
||||
|
||||
in
|
||||
{
|
||||
options.hardware.keyboard.qmk = {
|
||||
enable = mkEnableOption (mdDoc "non-root access to the firmware of QMK keyboards");
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.udev.packages = [ pkgs.qmk-udev-rules ];
|
||||
};
|
||||
}
|
|
@ -1,16 +1,16 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.hardware.keyboard.teck;
|
||||
inherit (lib) mdDoc mkEnableOption mkIf;
|
||||
|
||||
in
|
||||
{
|
||||
options.hardware.keyboard.teck = {
|
||||
enable = mkEnableOption (lib.mdDoc "non-root access to the firmware of TECK keyboards");
|
||||
enable = mkEnableOption (mdDoc "non-root access to the firmware of TECK keyboards");
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.udev.packages = [ pkgs.teck-udev-rules ];
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
cfg = config.hardware.keyboard.uhk;
|
||||
inherit (lib) mdDoc mkEnableOption mkIf;
|
||||
|
||||
in
|
||||
{
|
||||
options.hardware.keyboard.uhk = {
|
||||
enable = mkEnableOption (lib.mdDoc ''
|
||||
enable = mkEnableOption (mdDoc ''
|
||||
non-root access to the firmware of UHK keyboards.
|
||||
You need it when you want to flash a new firmware on the keyboard.
|
||||
Access to the keyboard is granted to users in the "input" group.
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mkOption mkIf types;
|
||||
cfg = config.hardware.keyboard.zsa;
|
||||
inherit (lib) mkEnableOption mkIf mdDoc;
|
||||
|
||||
in
|
||||
{
|
||||
options.hardware.keyboard.zsa = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Enables udev rules for keyboards from ZSA like the ErgoDox EZ, Planck EZ and Moonlander Mark I.
|
||||
enable = mkEnableOption (mdDoc ''
|
||||
udev rules for keyboards from ZSA like the ErgoDox EZ, Planck EZ and Moonlander Mark I.
|
||||
You need it when you want to flash a new configuration on the keyboard
|
||||
or use their live training in the browser.
|
||||
You may want to install the wally-cli package.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
|
|
@ -461,7 +461,9 @@ in
|
|||
# If requested enable modesetting via kernel parameter.
|
||||
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
|
||||
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
|
||||
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1";
|
||||
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
|
||||
# proprietary driver is not compiled with support for X86_KERNEL_IBT
|
||||
++ optional (!cfg.open && config.boot.kernelPackages.kernel.kernelAtLeast "6.2") "ibt=off";
|
||||
|
||||
services.udev.extraRules =
|
||||
''
|
||||
|
|
49
third_party/nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix
vendored
Normal file
49
third_party/nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
# To build, use:
|
||||
# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-powerpc64le.nix -A config.system.build.sdImage
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../../profiles/base.nix
|
||||
../../profiles/installation-device.nix
|
||||
./sd-image.nix
|
||||
];
|
||||
|
||||
boot.loader = {
|
||||
# powerpc64le-linux typically uses petitboot
|
||||
grub.enable = false;
|
||||
generic-extlinux-compatible = {
|
||||
# petitboot is not does not support all of the extlinux extensions to
|
||||
# syslinux, but its parser is very forgiving; it essentially ignores
|
||||
# whatever it doesn't understand. See below for a filename adjustment.
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
boot.consoleLogLevel = lib.mkDefault 7;
|
||||
boot.kernelParams = [ "console=hvc0" ];
|
||||
|
||||
sdImage = {
|
||||
populateFirmwareCommands = "";
|
||||
populateRootCommands = ''
|
||||
mkdir -p ./files/boot
|
||||
${config.boot.loader.generic-extlinux-compatible.populateCmd} \
|
||||
-c ${config.system.build.toplevel} \
|
||||
-d ./files/boot
|
||||
''
|
||||
# https://github.com/open-power/petitboot/blob/master/discover/syslinux-parser.c
|
||||
# petitboot will look in these paths (plus all-caps versions of them):
|
||||
# /boot/syslinux/syslinux.cfg
|
||||
# /syslinux/syslinux.cfg
|
||||
# /syslinux.cfg
|
||||
+ ''
|
||||
mv ./files/boot/extlinux ./files/boot/syslinux
|
||||
mv ./files/boot/syslinux/extlinux.conf ./files/boot/syslinux/syslinux.cfg
|
||||
''
|
||||
# petitboot does not support relative paths for LINUX or INITRD; it prepends
|
||||
# a `/` when parsing these fields
|
||||
+ ''
|
||||
sed -i 's_^\(\W\W*\(INITRD\|initrd\|LINUX\|linux\)\W\)\.\./_\1/boot/_' ./files/boot/syslinux/syslinux.cfg
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
x86_64-linux = "/nix/store/lsr79q5xqd9dv97wn87x12kzax8s8i1s-nix-2.13.2";
|
||||
i686-linux = "/nix/store/wky9xjwiwzpifgk0s3f2nrg8nr67bi7x-nix-2.13.2";
|
||||
aarch64-linux = "/nix/store/v8drr3x1ia6bdr8y4vl79mlz61xynrpm-nix-2.13.2";
|
||||
x86_64-darwin = "/nix/store/1l14si31p4aw7c1gwgjy0nq55k38j9nj-nix-2.13.2";
|
||||
aarch64-darwin = "/nix/store/6x7nr1r780fgn254zhkwhih3f3i8cr45-nix-2.13.2";
|
||||
x86_64-linux = "/nix/store/mc43d38fibi94pp5crfwacl5gbslccd0-nix-2.13.3";
|
||||
i686-linux = "/nix/store/09m966pj26cgd4ihlg8ihl1106j3vih8-nix-2.13.3";
|
||||
aarch64-linux = "/nix/store/7f191d125akld27gc6jl0r13l8pl7x0h-nix-2.13.3";
|
||||
x86_64-darwin = "/nix/store/1wn9jkvi2zqfjnjgg7lnp30r2q2y8whd-nix-2.13.3";
|
||||
aarch64-darwin = "/nix/store/8w0v2mffa10chrf1h66cbvbpw86qmh85-nix-2.13.3";
|
||||
}
|
||||
|
|
|
@ -392,7 +392,7 @@ in
|
|||
tape = 25;
|
||||
video = 26;
|
||||
dialout = 27;
|
||||
polkituser = 28;
|
||||
#polkituser = 28; # currently unused, polkitd doesn't need a group
|
||||
utmp = 29;
|
||||
# ddclient = 30; # converted to DynamicUser = true
|
||||
davfs2 = 31;
|
||||
|
@ -510,7 +510,6 @@ in
|
|||
#seeks = 148; # removed 2020-06-21
|
||||
prosody = 149;
|
||||
i2pd = 150;
|
||||
systemd-coredump = 151;
|
||||
systemd-network = 152;
|
||||
systemd-resolve = 153;
|
||||
systemd-timesync = 154;
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
./hardware/gpgsmartcards.nix
|
||||
./hardware/hackrf.nix
|
||||
./hardware/i2c.nix
|
||||
./hardware/keyboard/qmk.nix
|
||||
./hardware/keyboard/teck.nix
|
||||
./hardware/keyboard/uhk.nix
|
||||
./hardware/keyboard/zsa.nix
|
||||
|
@ -1052,6 +1053,7 @@
|
|||
./services/search/kibana.nix
|
||||
./services/search/meilisearch.nix
|
||||
./services/search/opensearch.nix
|
||||
./services/search/qdrant.nix
|
||||
./services/search/solr.nix
|
||||
./services/security/aesmd.nix
|
||||
./services/security/certmgr.nix
|
||||
|
@ -1167,6 +1169,7 @@
|
|||
./services/web-apps/moodle.nix
|
||||
./services/web-apps/netbox.nix
|
||||
./services/web-apps/nextcloud.nix
|
||||
./services/web-apps/nextcloud-notify_push.nix
|
||||
./services/web-apps/nexus.nix
|
||||
./services/web-apps/nifi.nix
|
||||
./services/web-apps/node-red.nix
|
||||
|
|
|
@ -113,7 +113,7 @@ in
|
|||
group = "polkituser";
|
||||
};
|
||||
|
||||
users.groups.polkituser.gid = config.ids.gids.polkituser;
|
||||
users.groups.polkituser = {};
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -5,29 +5,21 @@ with lib;
|
|||
let
|
||||
cfg = config.services.borgmatic;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
|
||||
in {
|
||||
options.services.borgmatic = {
|
||||
enable = mkEnableOption (lib.mdDoc "borgmatic");
|
||||
|
||||
settings = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
See https://torsion.org/borgmatic/docs/reference/configuration/
|
||||
'';
|
||||
type = types.submodule {
|
||||
cfgType = with types; submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
options.location = {
|
||||
source_directories = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
type = listOf str;
|
||||
description = mdDoc ''
|
||||
List of source directories to backup (required). Globs and
|
||||
tildes are expanded.
|
||||
'';
|
||||
example = [ "/home" "/etc" "/var/log/syslog*" ];
|
||||
};
|
||||
repositories = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
type = listOf str;
|
||||
description = mdDoc ''
|
||||
Paths to local or remote repositories (required). Tildes are
|
||||
expanded. Multiple repositories are backed up to in
|
||||
sequence. Borg placeholders can be used. See the output of
|
||||
|
@ -37,12 +29,34 @@ in {
|
|||
service file to the ReadWritePaths list.
|
||||
'';
|
||||
example = [
|
||||
"user@backupserver:sourcehostname.borg"
|
||||
"user@backupserver:{fqdn}"
|
||||
"ssh://user@backupserver/./sourcehostname.borg"
|
||||
"ssh://user@backupserver/./{fqdn}"
|
||||
"/var/local/backups/local.borg"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
|
||||
in
|
||||
{
|
||||
options.services.borgmatic = {
|
||||
enable = mkEnableOption (mdDoc "borgmatic");
|
||||
|
||||
settings = mkOption {
|
||||
description = mdDoc ''
|
||||
See https://torsion.org/borgmatic/docs/reference/configuration/
|
||||
'';
|
||||
default = null;
|
||||
type = types.nullOr cfgType;
|
||||
};
|
||||
|
||||
configurations = mkOption {
|
||||
description = mdDoc ''
|
||||
Set of borgmatic configurations, see https://torsion.org/borgmatic/docs/reference/configuration/
|
||||
'';
|
||||
default = { };
|
||||
type = types.attrsOf cfgType;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -50,9 +64,13 @@ in {
|
|||
|
||||
environment.systemPackages = [ pkgs.borgmatic ];
|
||||
|
||||
environment.etc."borgmatic/config.yaml".source = cfgfile;
|
||||
environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //
|
||||
mapAttrs'
|
||||
(name: value: nameValuePair
|
||||
"borgmatic.d/${name}.yaml"
|
||||
{ source = settingsFormat.generate "${name}.yaml" value; })
|
||||
cfg.configurations;
|
||||
|
||||
systemd.packages = [ pkgs.borgmatic ];
|
||||
|
||||
};
|
||||
}
|
||||
|
|
|
@ -106,6 +106,14 @@ in
|
|||
description = lib.mdDoc "Only run the server. This option only makes sense for a server.";
|
||||
};
|
||||
|
||||
environmentFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
description = lib.mdDoc ''
|
||||
File path containing environment variables for configuring the k3s service in the format of an EnvironmentFile. See systemd.exec(5).
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
configPath = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
|
@ -154,6 +162,7 @@ in
|
|||
LimitNPROC = "infinity";
|
||||
LimitCORE = "infinity";
|
||||
TasksMax = "infinity";
|
||||
EnvironmentFile = cfg.environmentFile;
|
||||
ExecStart = concatStringsSep " \\\n " (
|
||||
[
|
||||
"${cfg.package}/bin/k3s ${cfg.role}"
|
||||
|
|
|
@ -41,17 +41,42 @@ with lib;
|
|||
tokenFile = mkOption {
|
||||
type = types.path;
|
||||
description = lib.mdDoc ''
|
||||
The full path to a file which contains either a runner registration token or a
|
||||
(fine-grained) personal access token (PAT).
|
||||
The file should contain exactly one line with the token without any newline.
|
||||
If a registration token is given, it can be used to re-register a runner of the same
|
||||
name but is time-limited. If the file contains a PAT, the service creates a new
|
||||
registration token on startup as needed. Make sure the PAT has a scope of
|
||||
`admin:org` for organization-wide registrations or a scope of
|
||||
`repo` for a single repository. Fine-grained PATs need read and write permission
|
||||
to the "Administration" resources.
|
||||
The full path to a file which contains either
|
||||
|
||||
Changing this option or the file's content triggers a new runner registration.
|
||||
* a fine-grained personal access token (PAT),
|
||||
* a classic PAT
|
||||
* or a runner registration token
|
||||
|
||||
Changing this option or the `tokenFile`’s content triggers a new runner registration.
|
||||
|
||||
We suggest using the fine-grained PATs. A runner registration token is valid
|
||||
only for 1 hour after creation, so the next time the runner configuration changes
|
||||
this will give you hard-to-debug HTTP 404 errors in the configure step.
|
||||
|
||||
The file should contain exactly one line with the token without any newline.
|
||||
(Use `echo -n '…token…' > …token file…` to make sure no newlines sneak in.)
|
||||
|
||||
If the file contains a PAT, the service creates a new registration token
|
||||
on startup as needed.
|
||||
If a registration token is given, it can be used to re-register a runner of the same
|
||||
name but is time-limited as noted above.
|
||||
|
||||
For fine-grained PATs:
|
||||
|
||||
Give it "Read and Write access to organization/repository self hosted runners",
|
||||
depending on whether it is organization wide or per-repository. You might have to
|
||||
experiment a little, fine-grained PATs are a `beta` Github feature and still subject
|
||||
to change; nonetheless they are the best option at the moment.
|
||||
|
||||
For classic PATs:
|
||||
|
||||
Make sure the PAT has a scope of `admin:org` for organization-wide registrations
|
||||
or a scope of `repo` for a single repository.
|
||||
|
||||
For runner registration tokens:
|
||||
|
||||
Nothing special needs to be done, but updating will break after one hour,
|
||||
so these are not recommended.
|
||||
'';
|
||||
example = "/run/secrets/github-runner/nixos.token";
|
||||
};
|
||||
|
|
|
@ -124,6 +124,8 @@ in
|
|||
# The state directory is entirely empty which indicates a first start
|
||||
copy_tokens
|
||||
fi
|
||||
# Always clean workDir
|
||||
find -H "$WORK_DIRECTORY" -mindepth 1 -delete
|
||||
'';
|
||||
configureRunner = writeScript "configure" ''
|
||||
if [[ -e "${newConfigTokenPath}" ]]; then
|
||||
|
@ -159,9 +161,6 @@ in
|
|||
fi
|
||||
'';
|
||||
setupWorkDir = writeScript "setup-work-dirs" ''
|
||||
# Cleanup previous service
|
||||
${pkgs.findutils}/bin/find -H "$WORK_DIRECTORY" -mindepth 1 -delete
|
||||
|
||||
# Link _diag dir
|
||||
ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag"
|
||||
|
||||
|
|
|
@ -577,7 +577,7 @@ in {
|
|||
};
|
||||
};
|
||||
# Enable periodic clear-docker-cache script
|
||||
systemd.services.gitlab-runner-clear-docker-cache = {
|
||||
systemd.services.gitlab-runner-clear-docker-cache = mkIf (cfg.clear-docker-cache.enable && (any (s: s.executor == "docker") (attrValues cfg.services))) {
|
||||
description = "Prune gitlab-runner docker resources";
|
||||
restartIfChanged = false;
|
||||
unitConfig.X-StopOnRemoval = false;
|
||||
|
@ -590,7 +590,7 @@ in {
|
|||
${pkgs.gitlab-runner}/bin/clear-docker-cache ${toString cfg.clear-docker-cache.flags}
|
||||
'';
|
||||
|
||||
startAt = optional cfg.clear-docker-cache.enable cfg.clear-docker-cache.dates;
|
||||
startAt = cfg.clear-docker-cache.dates;
|
||||
};
|
||||
# Enable docker if `docker` executor is used in any service
|
||||
virtualisation.docker.enable = mkIf (
|
||||
|
|
38
third_party/nixpkgs/nixos/modules/services/desktops/pipewire/daemon/pipewire-aes67.conf.json
vendored
Normal file
38
third_party/nixpkgs/nixos/modules/services/desktops/pipewire/daemon/pipewire-aes67.conf.json
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"context.properties": {},
|
||||
"context.modules": [
|
||||
{
|
||||
"name": "libpipewire-module-rt",
|
||||
"args": {
|
||||
"nice.level": -11
|
||||
},
|
||||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-protocol-native"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-client-node"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-adapter"
|
||||
},
|
||||
{
|
||||
"name": "libpipewire-module-rtp-source",
|
||||
"args": {
|
||||
"sap.ip": "239.255.255.255",
|
||||
"sap.port": 9875,
|
||||
"sess.latency.msec": 10,
|
||||
"local.ifname": "eth0",
|
||||
"stream.props": {
|
||||
"media.class": "Audio/Source",
|
||||
"node.virtual": false,
|
||||
"device.api": "aes67"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -3,10 +3,10 @@
|
|||
"link.max-buffers": 16,
|
||||
"core.daemon": true,
|
||||
"core.name": "pipewire-0",
|
||||
"default.clock.min-quantum": 16,
|
||||
"vm.overrides": {
|
||||
"default.clock.min-quantum": 1024
|
||||
}
|
||||
},
|
||||
"module.x11.bell": true
|
||||
},
|
||||
"context.spa-libs": {
|
||||
"audio.convert.*": "audioconvert/libspa-audioconvert",
|
||||
|
@ -77,6 +77,11 @@
|
|||
"flags": [
|
||||
"ifexists",
|
||||
"nofail"
|
||||
],
|
||||
"condition": [
|
||||
{
|
||||
"module.x11.bell": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
|
|
|
@ -809,7 +809,7 @@ in
|
|||
// optionalAttrs (cfg.relayHost != "") { relayhost = if cfg.lookupMX
|
||||
then "${cfg.relayHost}:${toString cfg.relayPort}"
|
||||
else "[${cfg.relayHost}]:${toString cfg.relayPort}"; }
|
||||
// optionalAttrs config.networking.enableIPv6 { inet_protocols = mkDefault "all"; }
|
||||
// optionalAttrs (!config.networking.enableIPv6) { inet_protocols = mkDefault "ipv4"; }
|
||||
// optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; }
|
||||
// optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; }
|
||||
// optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; }
|
||||
|
|
|
@ -288,11 +288,11 @@ in
|
|||
LimitNOFILE = 65535;
|
||||
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
|
||||
LoadCredential = cfg.loadCredential;
|
||||
ExecStartPre = ''
|
||||
ExecStartPre = [''
|
||||
${pkgs.envsubst}/bin/envsubst \
|
||||
-i ${configurationYaml} \
|
||||
-o /run/dendrite/dendrite.yaml
|
||||
'';
|
||||
''];
|
||||
ExecStart = lib.strings.concatStringsSep " " ([
|
||||
"${pkgs.dendrite}/bin/dendrite-monolith-server"
|
||||
"--config /run/dendrite/dendrite.yaml"
|
||||
|
|
|
@ -31,7 +31,7 @@ let
|
|||
"m.homeserver".base_url = "https://${fqdn}";
|
||||
"m.identity_server" = {};
|
||||
};
|
||||
serverConfig."m.server" = "${config.services.matrix-synapse.settings.server_name}:443";
|
||||
serverConfig."m.server" = "${fqdn}:443";
|
||||
mkWellKnown = data: ''
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
|
|
@ -5,7 +5,7 @@ with lib;
|
|||
let
|
||||
cfg = config.services.gitea;
|
||||
opt = options.services.gitea;
|
||||
gitea = cfg.package;
|
||||
exe = lib.getExe cfg.package;
|
||||
pg = config.services.postgresql;
|
||||
useMysql = cfg.database.type == "mysql";
|
||||
usePostgresql = cfg.database.type == "postgres";
|
||||
|
@ -248,7 +248,7 @@ in
|
|||
|
||||
staticRootPath = mkOption {
|
||||
type = types.either types.str types.path;
|
||||
default = gitea.data;
|
||||
default = cfg.package.data;
|
||||
defaultText = literalExpression "package.data";
|
||||
example = "/var/lib/gitea/data";
|
||||
description = lib.mdDoc "Upper level of template and static files path.";
|
||||
|
@ -481,14 +481,14 @@ in
|
|||
|
||||
# If we have a folder or symlink with gitea locales, remove it
|
||||
# And symlink the current gitea locales in place
|
||||
"L+ '${cfg.stateDir}/conf/locale' - - - - ${gitea.out}/locale"
|
||||
"L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
|
||||
];
|
||||
|
||||
systemd.services.gitea = {
|
||||
description = "gitea";
|
||||
after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
path = [ gitea pkgs.git pkgs.gnupg ];
|
||||
path = [ cfg.package pkgs.git pkgs.gnupg ];
|
||||
|
||||
# In older versions the secret naming for JWT was kind of confusing.
|
||||
# The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
|
||||
|
@ -512,7 +512,7 @@ in
|
|||
cp -f ${configFile} ${runConfig}
|
||||
|
||||
if [ ! -s ${secretKey} ]; then
|
||||
${gitea}/bin/gitea generate secret SECRET_KEY > ${secretKey}
|
||||
${exe} generate secret SECRET_KEY > ${secretKey}
|
||||
fi
|
||||
|
||||
# Migrate LFS_JWT_SECRET filename
|
||||
|
@ -521,15 +521,15 @@ in
|
|||
fi
|
||||
|
||||
if [ ! -s ${oauth2JwtSecret} ]; then
|
||||
${gitea}/bin/gitea generate secret JWT_SECRET > ${oauth2JwtSecret}
|
||||
${exe} generate secret JWT_SECRET > ${oauth2JwtSecret}
|
||||
fi
|
||||
|
||||
if [ ! -s ${lfsJwtSecret} ]; then
|
||||
${gitea}/bin/gitea generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
|
||||
${exe} generate secret LFS_JWT_SECRET > ${lfsJwtSecret}
|
||||
fi
|
||||
|
||||
if [ ! -s ${internalToken} ]; then
|
||||
${gitea}/bin/gitea generate secret INTERNAL_TOKEN > ${internalToken}
|
||||
${exe} generate secret INTERNAL_TOKEN > ${internalToken}
|
||||
fi
|
||||
|
||||
chmod u+w '${runConfig}'
|
||||
|
@ -548,15 +548,15 @@ in
|
|||
''}
|
||||
|
||||
# run migrations/init the database
|
||||
${gitea}/bin/gitea migrate
|
||||
${exe} migrate
|
||||
|
||||
# update all hooks' binary paths
|
||||
${gitea}/bin/gitea admin regenerate hooks
|
||||
${exe} admin regenerate hooks
|
||||
|
||||
# update command option in authorized_keys
|
||||
if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
|
||||
then
|
||||
${gitea}/bin/gitea admin regenerate keys
|
||||
${exe} admin regenerate keys
|
||||
fi
|
||||
'';
|
||||
|
||||
|
@ -565,7 +565,7 @@ in
|
|||
User = cfg.user;
|
||||
Group = "gitea";
|
||||
WorkingDirectory = cfg.stateDir;
|
||||
ExecStart = "${gitea}/bin/gitea web --pid /run/gitea/gitea.pid";
|
||||
ExecStart = "${exe} web --pid /run/gitea/gitea.pid";
|
||||
Restart = "always";
|
||||
# Runtime directory and mode
|
||||
RuntimeDirectory = "gitea";
|
||||
|
@ -597,7 +597,7 @@ in
|
|||
PrivateMounts = true;
|
||||
# System Call Filtering
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @reboot @setuid @swap";
|
||||
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
|
||||
};
|
||||
|
||||
environment = {
|
||||
|
@ -635,7 +635,7 @@ in
|
|||
systemd.services.gitea-dump = mkIf cfg.dump.enable {
|
||||
description = "gitea dump";
|
||||
after = [ "gitea.service" ];
|
||||
path = [ gitea ];
|
||||
path = [ cfg.package ];
|
||||
|
||||
environment = {
|
||||
USER = cfg.user;
|
||||
|
@ -646,7 +646,7 @@ in
|
|||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = cfg.user;
|
||||
ExecStart = "${gitea}/bin/gitea dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
|
||||
ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
|
||||
WorkingDirectory = cfg.dump.backupDir;
|
||||
};
|
||||
};
|
||||
|
@ -658,5 +658,5 @@ in
|
|||
timerConfig.OnCalendar = cfg.dump.interval;
|
||||
};
|
||||
};
|
||||
meta.maintainers = with lib.maintainers; [ srhb ma27 ];
|
||||
meta.maintainers = with lib.maintainers; [ srhb ma27 thehedgeh0g ];
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ in
|
|||
}
|
||||
{
|
||||
assertion = (cfg.configFile != null) != (cfg.settings != null);
|
||||
message = "You need to either specify services.klipper.settings or services.klipper.defaultConfig.";
|
||||
message = "You need to either specify services.klipper.settings or services.klipper.configFile.";
|
||||
}
|
||||
];
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ let
|
|||
pkg = cfg.package;
|
||||
|
||||
defaultUser = "paperless";
|
||||
nltkDir = "/var/cache/paperless/nltk";
|
||||
|
||||
# Don't start a redis instance if the user sets a custom redis connection
|
||||
enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
|
||||
|
@ -15,6 +16,7 @@ let
|
|||
PAPERLESS_DATA_DIR = cfg.dataDir;
|
||||
PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
|
||||
PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
|
||||
PAPERLESS_NLTK_DIR = nltkDir;
|
||||
GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
|
||||
} // optionalAttrs (config.time.timeZone != null) {
|
||||
PAPERLESS_TIME_ZONE = config.time.timeZone;
|
||||
|
@ -24,9 +26,11 @@ let
|
|||
lib.mapAttrs (_: toString) cfg.extraConfig
|
||||
);
|
||||
|
||||
manage = let
|
||||
manage =
|
||||
let
|
||||
setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env);
|
||||
in pkgs.writeShellScript "manage" ''
|
||||
in
|
||||
pkgs.writeShellScript "manage" ''
|
||||
${setupEnv}
|
||||
exec ${pkg}/bin/paperless-ngx "$@"
|
||||
'';
|
||||
|
@ -47,6 +51,7 @@ let
|
|||
cfg.dataDir
|
||||
cfg.mediaDir
|
||||
];
|
||||
CacheDirectory = "paperless";
|
||||
CapabilityBoundingSet = "";
|
||||
# ProtectClock adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
|
@ -170,7 +175,7 @@ in
|
|||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
Extra paperless config options.
|
||||
|
||||
|
@ -291,6 +296,33 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
# Download NLTK corpus data
|
||||
systemd.services.paperless-download-nltk-data = {
|
||||
wantedBy = [ "paperless-scheduler.service" ];
|
||||
before = [ "paperless-scheduler.service" ];
|
||||
after = [ "network-online.target" ];
|
||||
serviceConfig = defaultServiceConfig // {
|
||||
User = cfg.user;
|
||||
Type = "oneshot";
|
||||
# Enable internet access
|
||||
PrivateNetwork = false;
|
||||
# Restrict write access
|
||||
BindPaths = [];
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
"-/etc/resolv.conf"
|
||||
"-/etc/nsswitch.conf"
|
||||
"-/etc/ssl/certs"
|
||||
"-/etc/static/ssl/certs"
|
||||
"-/etc/hosts"
|
||||
"-/etc/localtime"
|
||||
];
|
||||
ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in ''
|
||||
${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.paperless-consumer = {
|
||||
description = "Paperless document consumer";
|
||||
# Bind to `paperless-scheduler` so that the consumer never runs
|
||||
|
|
|
@ -269,6 +269,10 @@ in
|
|||
assertion = cfg.filterForward -> config.networking.nftables.enable;
|
||||
message = "filterForward only works with the nftables based firewall";
|
||||
}
|
||||
{
|
||||
assertion = cfg.autoLoadConntrackHelpers -> lib.versionOlder config.boot.kernelPackages.kernel.version "6";
|
||||
message = "conntrack helper autoloading has been removed from kernel 6.0 and newer";
|
||||
}
|
||||
];
|
||||
|
||||
networking.firewall.trustedInterfaces = [ "lo" ];
|
||||
|
|
|
@ -299,17 +299,51 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
domain_map = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
scope = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = ["openid" "profile" "email"];
|
||||
description = lib.mdDoc ''
|
||||
Domain map is used to map incomming users (by their email) to
|
||||
a namespace. The key can be a string, or regex.
|
||||
Scopes used in the OIDC flow.
|
||||
'';
|
||||
};
|
||||
|
||||
extra_params = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
Custom query parameters to send with the Authorize Endpoint request.
|
||||
'';
|
||||
example = {
|
||||
".*" = "default-namespace";
|
||||
domain_hint = "example.com";
|
||||
};
|
||||
};
|
||||
|
||||
allowed_domains = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
description = lib.mdDoc ''
|
||||
Allowed principal domains. if an authenticated user's domain
|
||||
is not in this list authentication request will be rejected.
|
||||
'';
|
||||
example = [ "example.com" ];
|
||||
};
|
||||
|
||||
allowed_users = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
description = lib.mdDoc ''
|
||||
Users allowed to authenticate even if not in allowedDomains.
|
||||
'';
|
||||
example = [ "alice@example.com" ];
|
||||
};
|
||||
|
||||
strip_email_domain = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether the domain part of the email address should be removed when generating namespaces.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
tls_letsencrypt_hostname = mkOption {
|
||||
|
@ -392,13 +426,16 @@ in {
|
|||
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_file"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ["services" "headscale" "settings" "oidc" "domain_map"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
|
||||
(mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
|
||||
|
||||
(mkRemovedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ''
|
||||
Headscale no longer uses domain_map. If you're using an old version of headscale you can still set this option via services.headscale.settings.oidc.domain_map.
|
||||
'')
|
||||
];
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
|
|
@ -10,9 +10,12 @@ in {
|
|||
|
||||
address = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = mdDoc "Bind address. Corresponds to the `-a` flag.";
|
||||
example = "localhost";
|
||||
default = "localhost";
|
||||
description = mdDoc ''
|
||||
Bind address. Corresponds to the `-a` flag.
|
||||
Set to `""` to bind to all addresses.
|
||||
'';
|
||||
example = "[::1]";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
|
|
|
@ -28,6 +28,32 @@ in
|
|||
<https://wiki.nftables.org/wiki-nftables/index.php/Troubleshooting#Question_4._How_do_nftables_and_iptables_interact_when_used_on_the_same_system.3F>.
|
||||
'';
|
||||
};
|
||||
|
||||
networking.nftables.checkRuleset = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = lib.mdDoc ''
|
||||
Run `nft check` on the ruleset to spot syntax errors during build.
|
||||
Because this is executed in a sandbox, the check might fail if it requires
|
||||
access to any environmental factors or paths outside the Nix store.
|
||||
To circumvent this, the ruleset file can be edited using the preCheckRuleset
|
||||
option to work in the sandbox environment.
|
||||
'';
|
||||
};
|
||||
|
||||
networking.nftables.preCheckRuleset = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = lib.literalExpression ''
|
||||
sed 's/skgid meadow/skgid nogroup/g' -i ruleset.conf
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
This script gets run before the ruleset is checked. It can be used to
|
||||
create additional files needed for the ruleset check to work, or modify
|
||||
the ruleset for cases the build environment cannot cover.
|
||||
'';
|
||||
};
|
||||
|
||||
networking.nftables.ruleset = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
|
@ -105,13 +131,24 @@ in
|
|||
wantedBy = [ "multi-user.target" ];
|
||||
reloadIfChanged = true;
|
||||
serviceConfig = let
|
||||
rulesScript = pkgs.writeScript "nftables-rules" ''
|
||||
rulesScript = pkgs.writeTextFile {
|
||||
name = "nftables-rules";
|
||||
executable = true;
|
||||
text = ''
|
||||
#! ${pkgs.nftables}/bin/nft -f
|
||||
flush ruleset
|
||||
${if cfg.rulesetFile != null then ''
|
||||
include "${cfg.rulesetFile}"
|
||||
'' else cfg.ruleset}
|
||||
'';
|
||||
checkPhase = lib.optionalString cfg.checkRuleset ''
|
||||
cp $out ruleset.conf
|
||||
${cfg.preCheckRuleset}
|
||||
export NIX_REDIRECTS=/etc/protocols=${pkgs.buildPackages.iana-etc}/etc/protocols:/etc/services=${pkgs.buildPackages.iana-etc}/etc/services
|
||||
LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
|
||||
${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
|
||||
'';
|
||||
};
|
||||
in {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
|
|
|
@ -14,7 +14,6 @@ let
|
|||
path = makeBinPath (getAttr "openvpn-${name}" config.systemd.services).path;
|
||||
|
||||
upScript = ''
|
||||
#! /bin/sh
|
||||
export PATH=${path}
|
||||
|
||||
# For convenience in client scripts, extract the remote domain
|
||||
|
@ -34,7 +33,6 @@ let
|
|||
'';
|
||||
|
||||
downScript = ''
|
||||
#! /bin/sh
|
||||
export PATH=${path}
|
||||
${optionalString cfg.updateResolvConf
|
||||
"${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
|
||||
|
@ -47,9 +45,9 @@ let
|
|||
${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"}
|
||||
${cfg.config}
|
||||
${optionalString (cfg.up != "" || cfg.updateResolvConf)
|
||||
"up ${pkgs.writeScript "openvpn-${name}-up" upScript}"}
|
||||
"up ${pkgs.writeShellScript "openvpn-${name}-up" upScript}"}
|
||||
${optionalString (cfg.down != "" || cfg.updateResolvConf)
|
||||
"down ${pkgs.writeScript "openvpn-${name}-down" downScript}"}
|
||||
"down ${pkgs.writeShellScript "openvpn-${name}-down" downScript}"}
|
||||
${optionalString (cfg.authUserPass != null)
|
||||
"auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
|
||||
${cfg.authUserPass.username}
|
||||
|
|
|
@ -11,6 +11,14 @@ in
|
|||
services.teleport = with lib.types; {
|
||||
enable = mkEnableOption (lib.mdDoc "the Teleport service");
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.teleport;
|
||||
defaultText = lib.literalMD "pkgs.teleport";
|
||||
example = lib.literalMD "pkgs.teleport_11";
|
||||
description = lib.mdDoc "The teleport package to use";
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = settingsYaml.type;
|
||||
default = { };
|
||||
|
@ -74,14 +82,14 @@ in
|
|||
};
|
||||
|
||||
config = mkIf config.services.teleport.enable {
|
||||
environment.systemPackages = [ pkgs.teleport ];
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
systemd.services.teleport = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${pkgs.teleport}/bin/teleport start \
|
||||
${cfg.package}/bin/teleport start \
|
||||
${optionalString cfg.insecure.enable "--insecure"} \
|
||||
${optionalString cfg.diag.enable "--diag-addr=${cfg.diag.addr}:${toString cfg.diag.port}"} \
|
||||
${optionalString (cfg.settings != { }) "--config=${settingsYaml.generate "teleport.yaml" cfg.settings}"}
|
||||
|
|
|
@ -286,6 +286,8 @@ in {
|
|||
LockPersonality = true;
|
||||
RestrictSUIDSGID = true;
|
||||
|
||||
ReadWritePaths = [ cfg.stateDir ];
|
||||
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
};
|
||||
|
|
|
@ -199,12 +199,16 @@ in
|
|||
# java.security.AccessControlException:
|
||||
# access denied ("java.io.FilePermission" "/var/lib/opensearch/config/opensearch.yml" "read")
|
||||
|
||||
rm -f ${configDir}/opensearch.yml
|
||||
cp ${opensearchYml} ${configDir}/opensearch.yml
|
||||
|
||||
# Make sure the logging configuration for old OpenSearch versions is removed:
|
||||
rm -f "${configDir}/logging.yml"
|
||||
rm -f ${configDir}/${loggingConfigFilename}
|
||||
cp ${loggingConfigFile} ${configDir}/${loggingConfigFilename}
|
||||
mkdir -p ${configDir}/scripts
|
||||
|
||||
rm -f ${configDir}/jvm.options
|
||||
cp ${cfg.package}/config/jvm.options ${configDir}/jvm.options
|
||||
|
||||
# redirect jvm logs to the data directory
|
||||
|
|
128
third_party/nixpkgs/nixos/modules/services/search/qdrant.nix
vendored
Normal file
128
third_party/nixpkgs/nixos/modules/services/search/qdrant.nix
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
let
|
||||
|
||||
cfg = config.services.qdrant;
|
||||
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
configFile = settingsFormat.generate "config.yaml" cfg.settings;
|
||||
in {
|
||||
|
||||
options = {
|
||||
services.qdrant = {
|
||||
enable = mkEnableOption (lib.mdDoc "Vector Search Engine for the next generation of AI applications");
|
||||
|
||||
settings = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Configuration for Qdrant
|
||||
Refer to <https://github.com/qdrant/qdrant/blob/master/config/config.yaml> for details on supported values.
|
||||
'';
|
||||
|
||||
type = settingsFormat.type;
|
||||
|
||||
example = {
|
||||
storage = {
|
||||
storage_path = "/var/lib/qdrant/storage";
|
||||
snapshots_path = "/var/lib/qdrant/snapshots";
|
||||
};
|
||||
hsnw_index = {
|
||||
on_disk = true;
|
||||
};
|
||||
service = {
|
||||
host = "127.0.0.1";
|
||||
http_port = 6333;
|
||||
grpc_port = 6334;
|
||||
};
|
||||
telemetry_disabled = true;
|
||||
};
|
||||
|
||||
defaultText = literalExpression ''
|
||||
{
|
||||
storage = {
|
||||
storage_path = "/var/lib/qdrant/storage";
|
||||
snapshots_path = "/var/lib/qdrant/snapshots";
|
||||
};
|
||||
hsnw_index = {
|
||||
on_disk = true;
|
||||
};
|
||||
service = {
|
||||
host = "127.0.0.1";
|
||||
http_port = 6333;
|
||||
grpc_port = 6334;
|
||||
};
|
||||
telemetry_disabled = true;
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
services.qdrant.settings = {
|
||||
storage.storage_path = mkDefault "/var/lib/qdrant/storage";
|
||||
storage.snapshots_path = mkDefault "/var/lib/qdrant/snapshots";
|
||||
# The following default values are the same as in the default config,
|
||||
# they are just written here for convenience.
|
||||
storage.on_disk_payload = mkDefault true;
|
||||
storage.wal.wal_capacity_mb = mkDefault 32;
|
||||
storage.wal.wal_segments_ahead = mkDefault 0;
|
||||
storage.performance.max_search_threads = mkDefault 0;
|
||||
storage.performance.max_optimization_threads = mkDefault 1;
|
||||
storage.optimizers.deleted_threshold = mkDefault 0.2;
|
||||
storage.optimizers.vacuum_min_vector_number = mkDefault 1000;
|
||||
storage.optimizers.default_segment_number = mkDefault 0;
|
||||
storage.optimizers.max_segment_size_kb = mkDefault null;
|
||||
storage.optimizers.memmap_threshold_kb = mkDefault null;
|
||||
storage.optimizers.indexing_threshold_kb = mkDefault 20000;
|
||||
storage.optimizers.flush_interval_sec = mkDefault 5;
|
||||
storage.optimizers.max_optimization_threads = mkDefault 1;
|
||||
storage.hnsw_index.m = mkDefault 16;
|
||||
storage.hnsw_index.ef_construct = mkDefault 100;
|
||||
storage.hnsw_index.full_scan_threshold_kb = mkDefault 10000;
|
||||
storage.hnsw_index.max_indexing_threads = mkDefault 0;
|
||||
storage.hnsw_index.on_disk = mkDefault false;
|
||||
storage.hnsw_index.payload_m = mkDefault null;
|
||||
service.max_request_size_mb = mkDefault 32;
|
||||
service.max_workers = mkDefault 0;
|
||||
service.http_port = mkDefault 6333;
|
||||
service.grpc_port = mkDefault 6334;
|
||||
service.enable_cors = mkDefault true;
|
||||
cluster.enabled = mkDefault false;
|
||||
# the following have been altered for security
|
||||
service.host = mkDefault "127.0.0.1";
|
||||
telemetry_disabled = mkDefault true;
|
||||
};
|
||||
|
||||
systemd.services.qdrant = {
|
||||
description = "Vector Search Engine for the next generation of AI applications";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.qdrant}/bin/qdrant --config-path ${configFile}";
|
||||
DynamicUser = true;
|
||||
Restart = "on-failure";
|
||||
StateDirectory = "qdrant";
|
||||
CapabilityBoundingSet = "";
|
||||
NoNewPrivileges = true;
|
||||
PrivateTmp = true;
|
||||
ProtectHome = true;
|
||||
ProtectClock = true;
|
||||
ProtectProc = "noaccess";
|
||||
ProcSubset = "pid";
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHostname = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictNamespaces = true;
|
||||
LockPersonality = true;
|
||||
RemoveIPC = true;
|
||||
SystemCallFilter = [ "@system-service" "~@privileged" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -318,8 +318,8 @@ to make packages available in the chroot.
|
|||
{option}`services.systemd.akkoma.serviceConfig.BindPaths` and
|
||||
{option}`services.systemd.akkoma.serviceConfig.BindReadOnlyPaths` permit access to outside paths
|
||||
through bind mounts. Refer to
|
||||
[{manpage}`systemd.exec(5)`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
|
||||
for details.
|
||||
[`BindPaths=`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
|
||||
of {manpage}`systemd.exec(5)` for details.
|
||||
|
||||
### Distributed deployment {#modules-services-akkoma-distributed-deployment}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ in {
|
|||
"-addr" "${cfg.bindIP}:${toString cfg.port}"
|
||||
"-theme" "${cfg.theme}"
|
||||
"imaps://${cfg.imaps.host}:${toString cfg.imaps.port}"
|
||||
"smpts://${cfg.smtps.host}:${toString cfg.smtps.port}"
|
||||
"smtps://${cfg.smtps.host}:${toString cfg.smtps.port}"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
96
third_party/nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix
vendored
Normal file
96
third_party/nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
{ config, options, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.nextcloud.notify_push;
|
||||
in
|
||||
{
|
||||
options.services.nextcloud.notify_push = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc "Notify push");
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = pkgs.nextcloud-notify_push;
|
||||
defaultText = lib.literalMD "pkgs.nextcloud-notify_push";
|
||||
description = lib.mdDoc "Which package to use for notify_push";
|
||||
};
|
||||
|
||||
socketPath = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/run/nextcloud-notify_push/sock";
|
||||
description = lib.mdDoc "Socket path to use for notify_push";
|
||||
};
|
||||
|
||||
logLevel = lib.mkOption {
|
||||
type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
|
||||
default = "error";
|
||||
description = lib.mdDoc "Log level";
|
||||
};
|
||||
} // (
|
||||
lib.genAttrs [
|
||||
"dbtype"
|
||||
"dbname"
|
||||
"dbuser"
|
||||
"dbpassFile"
|
||||
"dbhost"
|
||||
"dbport"
|
||||
"dbtableprefix"
|
||||
] (
|
||||
opt: options.services.nextcloud.config.${opt} // {
|
||||
default = config.services.nextcloud.config.${opt};
|
||||
defaultText = "config.services.nextcloud.config.${opt}";
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.nextcloud-notify_push = let
|
||||
nextcloudUrl = "http${lib.optionalString config.services.nextcloud.https "s"}://${config.services.nextcloud.hostName}";
|
||||
in {
|
||||
description = "Push daemon for Nextcloud clients";
|
||||
documentation = [ "https://github.com/nextcloud/notify_push" ];
|
||||
after = [ "phpfpm-nextcloud.service" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
environment = {
|
||||
NEXTCLOUD_URL = nextcloudUrl;
|
||||
SOCKET_PATH = cfg.socketPath;
|
||||
DATABASE_PREFIX = cfg.dbtableprefix;
|
||||
LOG = cfg.logLevel;
|
||||
};
|
||||
postStart = ''
|
||||
${config.services.nextcloud.occ}/bin/nextcloud-occ notify_push:setup ${nextcloudUrl}/push
|
||||
'';
|
||||
script = let
|
||||
dbType = if cfg.dbtype == "pgsql" then "postgresql" else cfg.dbtype;
|
||||
dbUser = lib.optionalString (cfg.dbuser != null) cfg.dbuser;
|
||||
dbPass = lib.optionalString (cfg.dbpassFile != null) ":$DATABASE_PASSWORD";
|
||||
isSocket = lib.hasPrefix "/" (toString cfg.dbhost);
|
||||
dbHost = lib.optionalString (cfg.dbhost != null) (if
|
||||
isSocket then
|
||||
if dbType == "postgresql" then "?host=${cfg.dbhost}" else
|
||||
if dbType == "mysql" then "?socket=${cfg.dbhost}" else throw "unsupported dbtype"
|
||||
else
|
||||
"@${cfg.dbhost}");
|
||||
dbName = lib.optionalString (cfg.dbname != null) "/${cfg.dbname}";
|
||||
dbUrl = "${dbType}://${dbUser}${dbPass}${lib.optionalString (!isSocket) dbHost}${dbName}${lib.optionalString isSocket dbHost}";
|
||||
in lib.optionalString (dbPass != "") ''
|
||||
export DATABASE_PASSWORD="$(<"${cfg.dbpassFile}")"
|
||||
'' + ''
|
||||
export DATABASE_URL="${dbUrl}"
|
||||
${cfg.package}/bin/notify_push --glob-config '${config.services.nextcloud.datadir}/config/config.php'
|
||||
'';
|
||||
serviceConfig = {
|
||||
User = "nextcloud";
|
||||
Group = "nextcloud";
|
||||
RuntimeDirectory = [ "nextcloud-notify_push" ];
|
||||
Restart = "on-failure";
|
||||
RestartSec = "5s";
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts.${config.services.nextcloud.hostName}.locations."^~ /push/" = {
|
||||
proxyPass = "http://unix:${cfg.socketPath}";
|
||||
proxyWebsockets = true;
|
||||
recommendedProxySettings = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -169,6 +169,9 @@ in
|
|||
};
|
||||
services.udev.packages = [
|
||||
pkgs.pantheon.gnome-settings-daemon
|
||||
# Force enable KMS modifiers for devices that require them.
|
||||
# https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1443
|
||||
pkgs.pantheon.mutter
|
||||
];
|
||||
systemd.packages = [
|
||||
pkgs.pantheon.gnome-settings-daemon
|
||||
|
|
|
@ -379,12 +379,7 @@ in
|
|||
|
||||
security.pam.services.kde = { allowNullPassword = true; };
|
||||
|
||||
# Doing these one by one seems silly, but we currently lack a better
|
||||
# construct for handling common pam configs.
|
||||
security.pam.services.gdm.enableKwallet = true;
|
||||
security.pam.services.kdm.enableKwallet = true;
|
||||
security.pam.services.lightdm.enableKwallet = true;
|
||||
security.pam.services.sddm.enableKwallet = true;
|
||||
security.pam.services.login.enableKwallet = true;
|
||||
|
||||
systemd.user.services = {
|
||||
plasma-early-setup = mkIf cfg.runUsingSystemd {
|
||||
|
|
|
@ -215,10 +215,12 @@ in
|
|||
};
|
||||
|
||||
security.pam.services = {
|
||||
sddm = {
|
||||
allowNullPassword = true;
|
||||
startSession = true;
|
||||
};
|
||||
sddm.text = ''
|
||||
auth substack login
|
||||
account include login
|
||||
password substack login
|
||||
session include login
|
||||
'';
|
||||
|
||||
sddm-greeter.text = ''
|
||||
auth required pam_succeed_if.so audit quiet_success user = sddm
|
||||
|
|
|
@ -256,7 +256,7 @@ in
|
|||
|
||||
videoDrivers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "amdgpu" "radeon" "nouveau" "modesetting" "fbdev" ];
|
||||
default = [ "modesetting" "fbdev" ];
|
||||
example = [
|
||||
"nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
|
||||
"amdgpu-pro"
|
||||
|
|
|
@ -1948,7 +1948,7 @@ in
|
|||
Extra command-line arguments to pass to systemd-networkd-wait-online.
|
||||
These also affect per-interface `systemd-network-wait-online@` services.
|
||||
|
||||
See [{manpage}`systemd-networkd-wait-online.service(8)`](https://www.freedesktop.org/software/systemd/man/systemd-networkd-wait-online.service.html) for all available options.
|
||||
See {manpage}`systemd-networkd-wait-online.service(8)` for all available options.
|
||||
'';
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
|
|
|
@ -66,9 +66,7 @@ in {
|
|||
uid = config.ids.uids.systemd-coredump;
|
||||
group = "systemd-coredump";
|
||||
};
|
||||
users.groups.systemd-coredump = {
|
||||
gid = config.ids.gids.systemd-coredump;
|
||||
};
|
||||
users.groups.systemd-coredump = {};
|
||||
})
|
||||
|
||||
(mkIf (!cfg.enable) {
|
||||
|
|
|
@ -118,7 +118,7 @@ let
|
|||
name = "initrd-bin-env";
|
||||
paths = map getBin cfg.initrdBin;
|
||||
pathsToLink = ["/bin" "/sbin"];
|
||||
postBuild = concatStringsSep "\n" (mapAttrsToList (n: v: "ln -s '${v}' $out/bin/'${n}'") cfg.extraBin);
|
||||
postBuild = concatStringsSep "\n" (mapAttrsToList (n: v: "ln -sf '${v}' $out/bin/'${n}'") cfg.extraBin);
|
||||
};
|
||||
|
||||
initialRamdisk = pkgs.makeInitrdNG {
|
||||
|
|
|
@ -100,7 +100,7 @@ in
|
|||
|
||||
logDriver =
|
||||
mkOption {
|
||||
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs"];
|
||||
type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs" "local"];
|
||||
default = "journald";
|
||||
description =
|
||||
lib.mdDoc ''
|
||||
|
|
|
@ -44,7 +44,7 @@ in
|
|||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/multipassd --logger platform --verbosity ${cfg.logLevel}";
|
||||
SyslogIdentifer = "multipassd";
|
||||
SyslogIdentifier = "multipassd";
|
||||
Restart = "on-failure";
|
||||
TimeoutStopSec = 300;
|
||||
Type = "simple";
|
||||
|
|
|
@ -183,10 +183,6 @@ in
|
|||
|
||||
systemd.packages = [ cfg.package ];
|
||||
|
||||
systemd.services.podman.serviceConfig = {
|
||||
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
|
||||
};
|
||||
|
||||
systemd.services.podman-prune = {
|
||||
description = "Prune podman resources";
|
||||
|
||||
|
@ -207,10 +203,6 @@ in
|
|||
systemd.sockets.podman.wantedBy = [ "sockets.target" ];
|
||||
systemd.sockets.podman.socketConfig.SocketGroup = "podman";
|
||||
|
||||
systemd.user.services.podman.serviceConfig = {
|
||||
ExecStart = [ "" "${cfg.package}/bin/podman $LOGGING system service" ];
|
||||
};
|
||||
|
||||
systemd.user.sockets.podman.wantedBy = [ "sockets.target" ];
|
||||
|
||||
systemd.tmpfiles.packages = [
|
||||
|
|
|
@ -108,9 +108,9 @@ let
|
|||
|
||||
set -e
|
||||
|
||||
NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}")
|
||||
NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${toString config.virtualisation.diskImage}}") || test -z "$NIX_DISK_IMAGE"
|
||||
|
||||
if ! test -e "$NIX_DISK_IMAGE"; then
|
||||
if test -n "$NIX_DISK_IMAGE" && ! test -e "$NIX_DISK_IMAGE"; then
|
||||
${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
|
||||
${toString config.virtualisation.diskSize}M
|
||||
fi
|
||||
|
@ -346,7 +346,7 @@ in
|
|||
|
||||
virtualisation.diskImage =
|
||||
mkOption {
|
||||
type = types.str;
|
||||
type = types.nullOr types.str;
|
||||
default = "./${config.system.name}.qcow2";
|
||||
defaultText = literalExpression ''"./''${config.system.name}.qcow2"'';
|
||||
description =
|
||||
|
@ -354,6 +354,9 @@ in
|
|||
Path to the disk image containing the root filesystem.
|
||||
The image will be created on startup if it does not
|
||||
exist.
|
||||
|
||||
If null, a tmpfs will be used as the root filesystem and
|
||||
the VM's state will not be persistent.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -990,12 +993,12 @@ in
|
|||
];
|
||||
|
||||
virtualisation.qemu.drives = mkMerge [
|
||||
[{
|
||||
(mkIf (cfg.diskImage != null) [{
|
||||
name = "root";
|
||||
file = ''"$NIX_DISK_IMAGE"'';
|
||||
driveExtraOpts.cache = "writeback";
|
||||
driveExtraOpts.werror = "report";
|
||||
}]
|
||||
}])
|
||||
(mkIf cfg.useNixStoreImage [{
|
||||
name = "nix-store";
|
||||
file = ''"$TMPDIR"/store.img'';
|
||||
|
@ -1018,14 +1021,15 @@ in
|
|||
}) cfg.emptyDiskImages)
|
||||
];
|
||||
|
||||
fileSystems = mkVMOverride cfg.fileSystems;
|
||||
|
||||
# Mount the host filesystem via 9P, and bind-mount the Nix store
|
||||
# of the host into our own filesystem. We use mkVMOverride to
|
||||
# allow this module to be applied to "normal" NixOS system
|
||||
# configuration, where the regular value for the `fileSystems'
|
||||
# attribute should be disregarded for the purpose of building a VM
|
||||
# test image (since those filesystems don't exist in the VM).
|
||||
fileSystems =
|
||||
let
|
||||
virtualisation.fileSystems = let
|
||||
mkSharedDir = tag: share:
|
||||
{
|
||||
name =
|
||||
|
@ -1039,44 +1043,42 @@ in
|
|||
[ "trans=virtio" "version=9p2000.L" "msize=${toString cfg.msize}" ]
|
||||
++ lib.optional (tag == "nix-store") "cache=loose";
|
||||
};
|
||||
in
|
||||
mkVMOverride (cfg.fileSystems //
|
||||
optionalAttrs cfg.useDefaultFilesystems {
|
||||
"/".device = cfg.bootDevice;
|
||||
"/".fsType = "ext4";
|
||||
"/".autoFormat = true;
|
||||
} //
|
||||
optionalAttrs config.boot.tmpOnTmpfs {
|
||||
"/tmp" = {
|
||||
in lib.mkMerge [
|
||||
(lib.mapAttrs' mkSharedDir cfg.sharedDirectories)
|
||||
{
|
||||
"/" = lib.mkIf cfg.useDefaultFilesystems (if cfg.diskImage == null then {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
} else {
|
||||
device = cfg.bootDevice;
|
||||
fsType = "ext4";
|
||||
autoFormat = true;
|
||||
});
|
||||
"/tmp" = lib.mkIf config.boot.tmpOnTmpfs {
|
||||
device = "tmpfs";
|
||||
fsType = "tmpfs";
|
||||
neededForBoot = true;
|
||||
# Sync with systemd's tmp.mount;
|
||||
options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmpOnTmpfsSize}" ];
|
||||
};
|
||||
} //
|
||||
optionalAttrs cfg.useNixStoreImage {
|
||||
"/nix/${if cfg.writableStore then ".ro-store" else "store"}" = {
|
||||
"/nix/${if cfg.writableStore then ".ro-store" else "store"}" = lib.mkIf cfg.useNixStoreImage {
|
||||
device = "${lookupDriveDeviceName "nix-store" cfg.qemu.drives}";
|
||||
neededForBoot = true;
|
||||
options = [ "ro" ];
|
||||
};
|
||||
} //
|
||||
optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs) {
|
||||
"/nix/.rw-store" = {
|
||||
"/nix/.rw-store" = lib.mkIf (cfg.writableStore && cfg.writableStoreUseTmpfs) {
|
||||
fsType = "tmpfs";
|
||||
options = [ "mode=0755" ];
|
||||
neededForBoot = true;
|
||||
};
|
||||
} //
|
||||
optionalAttrs cfg.useBootLoader {
|
||||
# see note [Disk layout with `useBootLoader`]
|
||||
"/boot" = {
|
||||
"/boot" = lib.mkIf cfg.useBootLoader {
|
||||
device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
|
||||
fsType = "vfat";
|
||||
noCheck = true; # fsck fails on a r/o filesystem
|
||||
};
|
||||
} // lib.mapAttrs' mkSharedDir cfg.sharedDirectories);
|
||||
}
|
||||
];
|
||||
|
||||
boot.initrd.systemd = lib.mkIf (config.boot.initrd.systemd.enable && cfg.writableStore) {
|
||||
mounts = [{
|
||||
|
|
|
@ -81,7 +81,7 @@ in {
|
|||
extraDisk = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Optional extra disk/hdd configuration.
|
||||
The disk will be an 'ext4' partition on a separate VMDK file.
|
||||
The disk will be an 'ext4' partition on a separate file.
|
||||
'';
|
||||
default = null;
|
||||
example = {
|
||||
|
@ -183,8 +183,8 @@ in {
|
|||
export HOME=$PWD
|
||||
export PATH=${pkgs.virtualbox}/bin:$PATH
|
||||
|
||||
echo "creating VirtualBox pass-through disk wrapper (no copying involved)..."
|
||||
VBoxManage internalcommands createrawvmdk -filename disk.vmdk -rawdisk $diskImage
|
||||
echo "converting image to VirtualBox format..."
|
||||
VBoxManage convertfromraw $diskImage disk.vdi
|
||||
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
echo "creating extra disk: data-disk.raw"
|
||||
|
@ -196,8 +196,8 @@ in {
|
|||
mkpart primary ext4 1MiB -1
|
||||
eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
|
||||
mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
|
||||
echo "creating extra disk: data-disk.vmdk"
|
||||
VBoxManage internalcommands createrawvmdk -filename data-disk.vmdk -rawdisk $dataDiskImage
|
||||
echo "creating extra disk: data-disk.vdi"
|
||||
VBoxManage convertfromraw $dataDiskImage data-disk.vdi
|
||||
''}
|
||||
|
||||
echo "creating VirtualBox VM..."
|
||||
|
@ -209,10 +209,10 @@ in {
|
|||
${lib.cli.toGNUCommandLineShell { } cfg.params}
|
||||
VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
|
||||
--medium disk.vmdk
|
||||
--medium disk.vdi
|
||||
${optionalString (cfg.extraDisk != null) ''
|
||||
VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
|
||||
--medium data-disk.vmdk
|
||||
--medium data-disk.vdi
|
||||
''}
|
||||
|
||||
echo "exporting VirtualBox VM..."
|
||||
|
|
|
@ -100,7 +100,6 @@ in rec {
|
|||
(onFullSupported "nixos.tests.login")
|
||||
(onFullSupported "nixos.tests.misc")
|
||||
(onFullSupported "nixos.tests.mutableUsers")
|
||||
(onFullSupported "nixos.tests.nat.firewall-conntrack")
|
||||
(onFullSupported "nixos.tests.nat.firewall")
|
||||
(onFullSupported "nixos.tests.nat.standalone")
|
||||
(onFullSupported "nixos.tests.networking.scripted.bond")
|
||||
|
@ -131,8 +130,7 @@ in rec {
|
|||
(onFullSupported "nixos.tests.networking.networkd.virtual")
|
||||
(onFullSupported "nixos.tests.networking.networkd.vlan")
|
||||
(onFullSupported "nixos.tests.systemd-networkd-ipv6-prefix-delegation")
|
||||
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314
|
||||
#(onFullSupported "nixos.tests.nfs3.simple")
|
||||
(onFullSupported "nixos.tests.nfs3.simple")
|
||||
(onFullSupported "nixos.tests.nfs4.simple")
|
||||
(onSystems ["x86_64-linux"] "nixos.tests.oci-containers.podman")
|
||||
(onFullSupported "nixos.tests.openssh")
|
||||
|
|
7
third_party/nixpkgs/nixos/release-small.nix
vendored
7
third_party/nixpkgs/nixos/release-small.nix
vendored
|
@ -39,8 +39,7 @@ in rec {
|
|||
login
|
||||
misc
|
||||
nat
|
||||
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314
|
||||
#nfs3
|
||||
nfs3
|
||||
openssh
|
||||
php
|
||||
predictable-interface-names
|
||||
|
@ -119,11 +118,9 @@ in rec {
|
|||
"nixos.tests.ipv6"
|
||||
"nixos.tests.login"
|
||||
"nixos.tests.misc"
|
||||
"nixos.tests.nat.firewall-conntrack"
|
||||
"nixos.tests.nat.firewall"
|
||||
"nixos.tests.nat.standalone"
|
||||
# fails with kernel >= 5.15 https://github.com/NixOS/nixpkgs/pull/152505#issuecomment-1005049314
|
||||
#"nixos.tests.nfs3.simple"
|
||||
"nixos.tests.nfs3.simple"
|
||||
"nixos.tests.openssh"
|
||||
"nixos.tests.php.fpm"
|
||||
"nixos.tests.php.pcre"
|
||||
|
|
|
@ -228,6 +228,7 @@ in {
|
|||
fluentd = handleTest ./fluentd.nix {};
|
||||
fluidd = handleTest ./fluidd.nix {};
|
||||
fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
|
||||
forgejo = handleTest ./gitea.nix { giteaPackage = pkgs.forgejo; };
|
||||
freenet = handleTest ./freenet.nix {};
|
||||
freeswitch = handleTest ./freeswitch.nix {};
|
||||
freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
|
||||
|
@ -241,7 +242,7 @@ in {
|
|||
geth = handleTest ./geth.nix {};
|
||||
ghostunnel = handleTest ./ghostunnel.nix {};
|
||||
gitdaemon = handleTest ./gitdaemon.nix {};
|
||||
gitea = handleTest ./gitea.nix {};
|
||||
gitea = handleTest ./gitea.nix { giteaPackage = pkgs.gitea; };
|
||||
gitlab = handleTest ./gitlab.nix {};
|
||||
gitolite = handleTest ./gitolite.nix {};
|
||||
gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
|
||||
|
@ -432,10 +433,8 @@ in {
|
|||
nagios = handleTest ./nagios.nix {};
|
||||
nar-serve = handleTest ./nar-serve.nix {};
|
||||
nat.firewall = handleTest ./nat.nix { withFirewall = true; };
|
||||
nat.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; };
|
||||
nat.standalone = handleTest ./nat.nix { withFirewall = false; };
|
||||
nat.nftables.firewall = handleTest ./nat.nix { withFirewall = true; nftables = true; };
|
||||
nat.nftables.firewall-conntrack = handleTest ./nat.nix { withFirewall = true; withConntrackHelpers = true; nftables = true; };
|
||||
nat.nftables.standalone = handleTest ./nat.nix { withFirewall = false; nftables = true; };
|
||||
nats = handleTest ./nats.nix {};
|
||||
navidrome = handleTest ./navidrome.nix {};
|
||||
|
|
2
third_party/nixpkgs/nixos/tests/atuin.nix
vendored
2
third_party/nixpkgs/nixos/tests/atuin.nix
vendored
|
@ -54,7 +54,7 @@ with lib;
|
|||
client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml")
|
||||
|
||||
# log in to atuin server on client node
|
||||
client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k {key}")
|
||||
client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k \"{key}\"")
|
||||
|
||||
# pull records from atuin server
|
||||
client.succeed("${atuin}/bin/atuin sync -f")
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import ./make-test-python.nix ({ pkgs, ... }: {
|
||||
name = "clickhouse";
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ ma27 ];
|
||||
meta.maintainers = with pkgs.lib.maintainers; [ ];
|
||||
|
||||
nodes.machine = {
|
||||
services.clickhouse.enable = true;
|
||||
|
|
31
third_party/nixpkgs/nixos/tests/gitea.nix
vendored
31
third_party/nixpkgs/nixos/tests/gitea.nix
vendored
|
@ -1,5 +1,6 @@
|
|||
{ system ? builtins.currentSystem,
|
||||
config ? {},
|
||||
giteaPackage ? pkgs.gitea,
|
||||
pkgs ? import ../.. { inherit system config; }
|
||||
}:
|
||||
|
||||
|
@ -7,10 +8,25 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
|
|||
with pkgs.lib;
|
||||
|
||||
let
|
||||
## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
|
||||
signingPrivateKey = ''
|
||||
-----BEGIN PGP PRIVATE KEY BLOCK-----
|
||||
|
||||
lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
|
||||
5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
|
||||
ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
|
||||
ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
|
||||
TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
|
||||
GCqGCRf9O/hzBA==
|
||||
=9Uy3
|
||||
-----END PGP PRIVATE KEY BLOCK-----
|
||||
'';
|
||||
signingPrivateKeyId = "4D642DE8B678C79D";
|
||||
|
||||
supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
|
||||
makeGiteaTest = type: nameValuePair type (makeTest {
|
||||
name = "gitea-${type}";
|
||||
meta.maintainers = with maintainers; [ aanderse kolaente ma27 ];
|
||||
name = "${giteaPackage.pname}-${type}";
|
||||
meta.maintainers = with maintainers; [ aanderse indeednotjames kolaente ma27 ];
|
||||
|
||||
nodes = {
|
||||
server = { config, pkgs, ... }: {
|
||||
|
@ -18,9 +34,11 @@ let
|
|||
services.gitea = {
|
||||
enable = true;
|
||||
database = { inherit type; };
|
||||
package = giteaPackage;
|
||||
settings.service.DISABLE_REGISTRATION = true;
|
||||
settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
|
||||
};
|
||||
environment.systemPackages = [ pkgs.gitea pkgs.jq ];
|
||||
environment.systemPackages = [ giteaPackage pkgs.gnupg pkgs.jq ];
|
||||
services.openssh.enable = true;
|
||||
};
|
||||
client1 = { config, pkgs, ... }: {
|
||||
|
@ -56,6 +74,13 @@ let
|
|||
server.wait_for_open_port(3000)
|
||||
server.succeed("curl --fail http://localhost:3000/")
|
||||
|
||||
server.succeed(
|
||||
"su -l gitea -c 'gpg --homedir /var/lib/gitea/data/home/.gnupg "
|
||||
+ "--import ${toString (pkgs.writeText "gitea.key" signingPrivateKey)}'"
|
||||
)
|
||||
|
||||
assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
|
||||
|
||||
server.succeed(
|
||||
"curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
|
||||
+ "Please contact your site administrator.'"
|
||||
|
|
1
third_party/nixpkgs/nixos/tests/haproxy.nix
vendored
1
third_party/nixpkgs/nixos/tests/haproxy.nix
vendored
|
@ -2,7 +2,6 @@ import ./make-test-python.nix ({ pkgs, ...}: {
|
|||
name = "haproxy";
|
||||
nodes = {
|
||||
machine = { ... }: {
|
||||
imports = [ ../modules/profiles/minimal.nix ];
|
||||
services.haproxy = {
|
||||
enable = true;
|
||||
config = ''
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue