Project import generated by Copybara.

GitOrigin-RevId: 9f918d616c5321ad374ae6cb5ea89c9e04bf3e58
This commit is contained in:
Default email 2024-07-31 10:19:44 +00:00
parent 17c115a2eb
commit 5ca88bfbb9
4121 changed files with 71962 additions and 48575 deletions

View file

@ -44,6 +44,10 @@ indent_size = 4
indent_size = 2
indent_style = space
# Match package.json, which are generally pulled from upstream and accept them as they are
[package.json]
indent_style = unset
# Disable file types or individual files
# some of these files may be auto-generated and/or require significant changes

View file

@ -153,3 +153,6 @@ bdfde18037f8d9f9b641a4016c8ada4dc4cbf856
# nixos/ollama: format with nixfmt-rfc-style (#329561)
246d1ee533810ac1946d863bbd9de9b525818d56
# nixos/nvidia: apply nixfmt-rfc-style (#313440)
fbdcdde04a7caa007e825a8b822c75fab9adb2d6

View file

@ -15,7 +15,7 @@ permissions:
jobs:
nixos:
runs-on: ubuntu-latest
if: "github.repository_owner == 'NixOS' && !contains(github.event.pull_request.title, '[skip treewide]')"
if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
@ -76,7 +76,7 @@ jobs:
if [[ -n "$source" ]] && ! nixfmt --check ${{ env.base }}/"$source" 2>/dev/null; then
echo "Ignoring file $file because it's not formatted in the base commit"
elif ! nixfmt --check "$dest"; then
unformattedFiles+=("$file")
unformattedFiles+=("$dest")
fi
done < <(git diff -z --name-status ${{ env.baseRev }} -- '*.nix')

View file

@ -0,0 +1,128 @@
name: Check changed Nix files with nixf-tidy (experimental)
on:
pull_request_target:
types: [opened, synchronize, reopened, edited]
permissions:
contents: read
jobs:
nixos:
runs-on: ubuntu-latest
if: "!contains(github.event.pull_request.title, '[skip treewide]')"
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
# pull_request_target checks out the base branch by default
ref: refs/pull/${{ github.event.pull_request.number }}/merge
# Fetches the merge commit and its parents
fetch-depth: 2
- name: Checking out base branch
run: |
base=$(mktemp -d)
baseRev=$(git rev-parse HEAD^1)
git worktree add "$base" "$baseRev"
echo "baseRev=$baseRev" >> "$GITHUB_ENV"
echo "base=$base" >> "$GITHUB_ENV"
- name: Get Nixpkgs revision for nixf
run: |
# pin to a commit from nixpkgs-unstable to avoid e.g. building nixf
# from staging
# This should not be a URL, because it would allow PRs to run arbitrary code in CI!
rev=$(jq -r .rev ci/pinned-nixpkgs.json)
echo "url=https://github.com/NixOS/nixpkgs/archive/$rev.tar.gz" >> "$GITHUB_ENV"
- uses: cachix/install-nix-action@ba0dd844c9180cbf77aa72a116d6fbc515d0e87b # v27
with:
# explicitly enable sandbox
extra_nix_config: sandbox = true
nix_path: nixpkgs=${{ env.url }}
- name: Install nixf and jq
# provided jq is incompatible with our expression
run: "nix-env -f '<nixpkgs>' -iAP nixf jq"
- name: Check that Nix files pass nixf-tidy
run: |
# Filtering error messages we don't like
nixf_wrapper(){
nixf-tidy --variable-lookup < "$1" | jq -r '
[
"sema-escaping-with"
]
as $ignored_errors|[.[]|select(.sname as $s|$ignored_errors|index($s)|not)]
'
}
failedFiles=()
# Don't report errors to file overview
# to avoid duplicates when editing title and description
if [[ "${{ github.event.action }}" == 'edited' ]] && [[ -z "${{ github.event.edited.changes.base }}" ]]; then
DONT_REPORT_ERROR=1
else
DONT_REPORT_ERROR=
fi
# TODO: Make this more parallel
# Loop through all Nix files touched by the PR
while readarray -d '' -n 2 entry && (( ${#entry[@]} != 0 )); do
type=${entry[0]}
file=${entry[1]}
case $type in
A*)
source=""
dest=$file
;;
M*)
source=$file
dest=$file
;;
C*|R*)
source=$file
read -r -d '' dest
;;
*)
echo "Ignoring file $file with type $type"
continue
esac
if [[ -n "$source" ]] && [[ "$(nixf_wrapper ${{ env.base }}/"$source")" != '[]' ]] 2>/dev/null; then
echo "Ignoring file $file because it doesn't pass nixf-tidy in the base commit"
echo # insert blank line
else
nixf_report="$(nixf_wrapper "$dest")"
if [[ "$nixf_report" != '[]' ]]; then
echo "$dest doesn't pass nixf-tidy. Reported by nixf-tidy:"
errors=$(echo "$nixf_report" | jq -r --arg dest "$dest" '
def getLCur: "line=" + (.line+1|tostring) + ",col=" + (.column|tostring);
def getRCur: "endLine=" + (.line+1|tostring) + ",endColumn=" + (.column|tostring);
def getRange: "file=\($dest)," + (.lCur|getLCur) + "," + (.rCur|getRCur);
def getBody: . as $top|(.range|getRange) + ",title="+ .sname + "::" +
(.message|sub("{}" ; ($top.args.[]|tostring)));
def getNote: "\n::notice " + (.|getBody);
def getMessage: "::error " + (.|getBody) + (if (.notes|length)>0 then
([.notes.[]|getNote]|add) else "" end);
.[]|getMessage
')
if [[ -z "$DONT_REPORT_ERROR" ]]; then
echo "$errors"
else
# just print in plain text
echo "$errors" | sed 's/^:://'
echo # add one empty line
fi
failedFiles+=("$dest")
fi
fi
done < <(git diff -z --name-status ${{ env.baseRev }} -- '*.nix')
if [[ -n "$DONT_REPORT_ERROR" ]]; then
echo "Edited the PR but didn't change the base branch, only the description/title."
echo "Not reporting errors again to avoid duplication."
echo # add one empty line
fi
if (( "${#failedFiles[@]}" > 0 )); then
echo "Some new/changed Nix files don't pass nixf-tidy."
echo "See ${{ github.event.pull_request.html_url }}/files for reported errors."
echo "If you believe this is a false positive, ping @Aleksanaa and @inclyc in this PR."
exit 1
fi

View file

@ -27,3 +27,49 @@ devShellTools.valueToString (builtins.toFile "foo" "bar")
devShellTools.valueToString false
=> ""
```
:::
## `devShellTools.unstructuredDerivationInputEnv` {#sec-devShellTools-unstructuredDerivationInputEnv}
Convert a set of derivation attributes (as would be passed to [`derivation`]) to a set of environment variables that can be used in a shell script.
This function does not support `__structuredAttrs`, but does support `passAsFile`.
:::{.example}
## `unstructuredDerivationInputEnv` usage example
```nix
devShellTools.unstructuredDerivationInputEnv {
drvAttrs = {
name = "foo";
buildInputs = [ hello figlet ];
builder = bash;
args = [ "-c" "${./builder.sh}" ];
};
}
=> {
name = "foo";
buildInputs = "/nix/store/...-hello /nix/store/...-figlet";
builder = "/nix/store/...-bash";
}
```
Note that `args` is not included, because Nix does not added it to the builder process environment.
:::
## `devShellTools.derivationOutputEnv` {#sec-devShellTools-derivationOutputEnv}
Takes the relevant parts of a derivation and returns a set of environment variables, that would be present in the derivation.
:::{.example}
## `derivationOutputEnv` usage example
```nix
let
pkg = hello;
in
devShellTools.derivationOutputEnv { outputList = pkg.outputs; outputMap = pkg; }
```
:::

View file

@ -116,6 +116,55 @@ It has two modes:
: The `lychee` package to use.
## `shellcheck` {#tester-shellcheck}
Runs files through `shellcheck`, a static analysis tool for shell scripts.
:::{.example #ex-shellcheck}
# Run `testers.shellcheck`
A single script
```nix
testers.shellcheck {
name = "shellcheck";
src = ./script.sh;
}
```
Multiple files
```nix
let
inherit (lib) fileset;
in
testers.shellcheck {
name = "shellcheck";
src = fileset.toSource {
root = ./.;
fileset = fileset.unions [
./lib.sh
./nixbsd-activate
];
};
}
```
:::
### Inputs {#tester-shellcheck-inputs}
[`src` (path or string)]{#tester-shellcheck-param-src}
: The path to the shell script(s) to check.
This can be a single file or a directory containing shell files.
All files in `src` will be checked, so you may want to provide `fileset`-based source instead of a whole directory.
### Return value {#tester-shellcheck-return}
A derivation that runs `shellcheck` on the given script(s).
The build will fail if `shellcheck` finds any issues.
## `testVersion` {#tester-testVersion}
Checks that the output from running a command contains the specified version string in it as a whole word.

View file

@ -60,7 +60,7 @@ stdenvNoCC.mkDerivation (
nixos-render-docs manual html \
--manpage-urls ./manpage-urls.json \
--revision ${lib.trivial.revisionWithDefault (nixpkgs.rev or "master")} \
--revision ${nixpkgs.rev or "master"} \
--stylesheet style.css \
--stylesheet highlightjs/mono-blue.css \
--script ./highlightjs/highlight.pack.js \

View file

@ -0,0 +1,5 @@
# Interoperability Standards {#part-interoperability}
```{=include=} chapters
interoperability/cyclonedx.md
```

View file

@ -0,0 +1,79 @@
# CycloneDX {#chap-interop-cyclonedx}
[OWASP](https://owasp.org/) [CycloneDX](https://cyclonedx.org/) is a Software [Bill of Materials](https://en.wikipedia.org/wiki/Bill_of_materials) (SBOM) standard.
The standards described here are for including Nix specific information within SBOMs in a way that is interoperable with external SBOM tooling.
## `nix` Namespace Property Taxonomy {#sec-interop.cylonedx-nix}
The following tables describe namespaces for [properties](https://cyclonedx.org/docs/1.6/json/#components_items_properties) that may be attached to components within SBOMs.
Component properties are lists of name-value-pairs where values must be strings.
Properties with the same name may appear more than once.
Names and values are case-sensitive.
| Property | Description |
|------------------|-------------|
| `nix:store_path` | A Nix store path for the given component. This property should be contextualized by additional properties that describe the production of the store path, such as those from the `nix:narinfo:` and `nix:fod` namespaces. |
| Namespace | Description |
|---------------|-------------|
| [`nix:narinfo`](#sec-interop.cylonedx-narinfo) | Namespace for properties that are specific to how a component is stored as a [Nix archive](https://nixos.org/manual/nix/stable/glossary#gloss-nar) (NAR) in a [binary cache](https://nixos.org/manual/nix/stable/glossary#gloss-binary-cache). |
| [`nix:fod`](#sec-interop.cylonedx-fod) | Namespace for properties that describe a [fixed-output derivation](https://nixos.org/manual/nix/stable/glossary#gloss-fixed-output-derivation). |
### `nix:narinfo` {#sec-interop.cylonedx-narinfo}
Narinfo properties describe component archives that may be available from binary caches.
The `nix:narinfo` properties should be accompanied by a `nix:store_path` property within the same property list.
| Property | Description |
|---------------------------|-------------|
| `nix:narinfo:store_path` | Store path for the given store component. |
| `nix:narinfo:url` | URL path component. |
| `nix:narinfo:nar_hash` | Hash of the file system object part of the component when serialized as a Nix Archive. |
| `nix:narinfo:nar_size` | Size of the component when serialized as a Nix Archive. |
| `nix:narinfo:compression` | The compression format that component archive is in. |
| `nix:narinfo:file_hash` | A digest for the compressed component archive itself, as opposed to the data contained within. |
| `nix:narinfo:file_size` | The size of the compressed component archive itself. |
| `nix:narinfo:deriver` | The path to the derivation from which this component is produced. |
| `nix:narinfo:system` | The hardware and software platform on which this component is produced. |
| `nix:narinfo:sig` | Signatures claiming that this component is what it claims to be. |
| `nix:narinfo:ca` | Content address of this store object's file system object, used to compute its store path. |
| `nix:narinfo:references` | A whitespace separated array of store paths that this component references. |
### `nix:fod` {#sec-interop.cylonedx-fod}
FOD properties describe a [fixed-output derivation](https://nixos.org/manual/nix/stable/glossary#gloss-fixed-output-derivation).
The `nix:fod:method` property is required and must be accompanied by a `nix:store_path` property within the same property list.
All other properties in this namespace are method-specific.
To reproduce the build of a component the `nix:fod:method` value is resolved to an [appropriate function](#chap-pkgs-fetchers) within Nixpkgs whose arguments intersect with the given properties.
When generating `nix:fod` properties the method selected should be a stable function with a minimal number arguments.
For example, the `fetchFromGitHub` is commonly used within Nixpkgs but should be reduced to a call to the function by which it is implemented, `fetchzip`.
| Property | Description |
|------------------|-------------|
| `nix:fod:method` | Nixpkg function that produces this FOD. Required. Examples: `"fetchzip"`, `"fetchgit"` |
| `nix:fod:name` | Derivation name, present when method is `"fetchzip"` |
| `nix:fod:ref` | [Git ref](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefrefaref), present when method is `"fetchgit"` |
| `nix:fod:rev` | [Git rev](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefrevisionarevision), present when method is `"fetchgit"` |
| `nix:fod:sha256` | FOD hash |
| `nix:fod:url` | URL to fetch |
`nix:fod` properties may be extracted and evaluated to a derivation using code similar to the following, assuming a fictitious function `filterPropertiesToAttrs`:
```nix
{ pkgs, filterPropertiesToAttrs, properties }:
let
fodProps = filterPropertiesToAttrs "nix:fod:" properties;
methods = {
fetchzip =
{ name, url, sha256, ... }:
pkgs.fetchzip {
inherit name url sha256;
};
};
in methods.${fodProps.method} fodProps
```

View file

@ -162,7 +162,8 @@ following are specific to `buildPythonPackage`:
* `dontWrapPythonPrograms ? false`: Skip wrapping of Python programs.
* `permitUserSite ? false`: Skip setting the `PYTHONNOUSERSITE` environment
variable in wrapped programs.
* `pyproject`: Whether the pyproject format should be used. When set to `true`,
* `pyproject`: Whether the pyproject format should be used. As all other formats
are deprecated, you are recommended to set this to `true`. When you do so,
`pypaBuildHook` will be used, and you can add the required build dependencies
from `build-system.requires` to `build-system`. Note that the pyproject
format falls back to using `setuptools`, so you can use `pyproject = true`

View file

@ -41,16 +41,21 @@ rustPlatform.buildRustPackage rec {
description = "Fast line-oriented regex search tool, similar to ag and ack";
homepage = "https://github.com/BurntSushi/ripgrep";
license = lib.licenses.unlicense;
maintainers = [];
maintainers = [ ];
};
}
```
`buildRustPackage` requires either a `cargoHash` (preferred) or a
`cargoSha256` attribute, computed over all crate sources of this package.
`cargoHash` supports [SRI](https://www.w3.org/TR/SRI/) hashes and should be
preferred over `cargoSha256` which was used for traditional Nix SHA-256 hashes.
For example:
`buildRustPackage` requires a `cargoHash` attribute, computed over all crate sources of this package.
::: {.warning}
`cargoSha256` is already deprecated, and is subject to removal in favor of
`cargoHash` which supports [SRI](https://www.w3.org/TR/SRI/) hashes.
If you are still using `cargoSha256`, you can simply replace it with
`cargoHash` and recompute the hash, or convert the original sha256 to SRI
hash using `nix-hash --to-sri --type sha256 "<original sha256>"`.
:::
```nix
{
@ -58,7 +63,7 @@ For example:
}
```
Exception: If the application has cargo `git` dependencies, the `cargoHash`/`cargoSha256`
Exception: If the application has cargo `git` dependencies, the `cargoHash`
approach will not work, and you will need to copy the `Cargo.lock` file of the application
to nixpkgs and continue with the next section for specifying the options of the `cargoLock`
section.
@ -76,14 +81,6 @@ then be taken from the failed build. A fake hash can be used for
}
```
For `cargoSha256` you can use:
```nix
{
cargoSha256 = lib.fakeSha256;
}
```
Per the instructions in the [Cargo Book](https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html)
best practices guide, Rust applications should always commit the `Cargo.lock`
file in git to ensure a reproducible build. However, a few packages do not, and
@ -98,7 +95,7 @@ directory into a tar.gz archive.
The tarball with vendored dependencies contains a directory with the
package's `name`, which is normally composed of `pname` and
`version`. This means that the vendored dependencies hash
(`cargoHash`/`cargoSha256`) is dependent on the package name and
(`cargoHash`) is dependent on the package name and
version. The `cargoDepsName` attribute can be used to use another name
for the directory of vendored dependencies. For example, the hash can
be made invariant to the version by setting `cargoDepsName` to
@ -123,7 +120,7 @@ rustPlatform.buildRustPackage rec {
### Importing a `Cargo.lock` file {#importing-a-cargo.lock-file}
Using a vendored hash (`cargoHash`/`cargoSha256`) is tedious when using
Using a vendored hash (`cargoHash`) is tedious when using
`buildRustPackage` within a project, since it requires that the hash
is updated after every change to `Cargo.lock`. Therefore,
`buildRustPackage` also supports vendoring dependencies directly from
@ -645,6 +642,7 @@ builds the `retworkx` Python package. `fetchCargoTarball` and
buildPythonPackage rec {
pname = "retworkx";
version = "0.6.0";
pyproject = true;
src = fetchFromGitHub {
owner = "Qiskit";
@ -659,8 +657,6 @@ buildPythonPackage rec {
hash = "sha256-heOBK8qi2nuc/Ib+I/vLzZ1fUUD/G/KTw9d7M4Hz5O0=";
};
format = "pyproject";
nativeBuildInputs = with rustPlatform; [ cargoSetupHook maturinBuildHook ];
# ...

View file

@ -12,4 +12,5 @@ stdenv.md
build-helpers.md
development.md
contributing.md
interoperability.md
```

File diff suppressed because it is too large Load diff

View file

@ -822,6 +822,12 @@
githubId = 20405311;
name = "Aksh Gupta";
};
akssri = {
email = "akssri@vakra.xyz";
github = "akssri";
githubId = 108771991;
name = "Akaya Śrīnivāsan";
};
aktaboot = {
email = "akhtaboo@protonmail.com";
github = "aktaboot";
@ -2369,6 +2375,13 @@
githubId = 164148;
name = "Ben Darwin";
};
bchmnn = {
email = "jacob.bachmann@posteo.de";
matrix = "@trilloyd:matrix.tu-berlin.de";
github = "bchmnn";
githubId = 34620799;
name = "Jacob Bachmann";
};
bdd = {
email = "bdd@mindcast.org";
github = "bdd";
@ -4680,6 +4693,11 @@
githubId = 3179832;
name = "D. Bohdan";
};
d-brasher = {
github = "d-brasher";
githubId = 175485311;
name = "D. Brasher";
};
dbrgn = {
email = "nix@dbrgn.ch";
github = "dbrgn";
@ -4762,6 +4780,12 @@
githubId = 41747605;
keys = [ { fingerprint = "6130 3BBA D7D1 BF74 EFA4 4E3B E7FE 2087 E438 0E64"; } ];
};
definfo = {
name = "Adrien SUN";
email = "hjsdbb1@gmail.com";
github = "definfo";
githubId = 66514911;
};
deifactor = {
name = "Ash Zahlen";
email = "ext0l@riseup.net";
@ -7353,7 +7377,7 @@
};
getpsyched = {
name = "Priyanshu Tripathi";
email = "priyanshu@getpsyched.dev";
email = "nixos@getpsyched.dev";
matrix = "@getpsyched:matrix.org";
github = "getpsyched";
githubId = 43472218;
@ -14907,6 +14931,12 @@
githubId = 16027994;
name = "Nathan Viets";
};
nw = {
email = "nixpkgs@nwhirschfeld.de";
github = "nwhirschfeld";
githubId = 5047052;
name = "Niclas Hirschfeld";
};
nyadiia = {
email = "nyadiia@pm.me";
github = "nyadiia";
@ -15036,6 +15066,12 @@
githubId = 158758;
name = "Oliver Dunkl";
};
odygrd = {
email = "odysseas.georgoudis@gmail.com";
github = "odygrd";
githubId = 7397786;
name = "Odysseas Georgoudis";
};
ofek = {
email = "oss@ofek.dev";
github = "ofek";
@ -15940,6 +15976,12 @@
githubId = 34967;
name = "Julius de Bruijn";
};
pinage404 = {
email = "pinage404+nixpkgs@gmail.com";
github = "pinage404";
githubId = 6325757;
name = "pinage404";
};
pineapplehunter = {
email = "peshogo+nixpkgs@gmail.com";
github = "pineapplehunter";
@ -20101,6 +20143,12 @@
githubId = 29044;
name = "Jacek Galowicz";
};
tfkhdyt = {
email = "tfkhdyt@proton.me";
name = "Taufik Hidayat";
github = "tfkhdyt";
githubId = 47195537;
};
tfmoraes = {
name = "Thiago Franco de Moraes";
github = "tfmoraes";
@ -20241,6 +20289,13 @@
githubId = 71843723;
keys = [ { fingerprint = "EEFB CC3A C529 CFD1 943D A75C BDD5 7BE9 9D55 5965"; } ];
};
thepuzzlemaker = {
name = "ThePuzzlemaker";
email = "tpzker@thepuzzlemaker.info";
github = "ThePuzzlemaker";
githubId = 12666617;
keys = [ { fingerprint = "7095 C20A 9224 3DB6 5177 07B0 968C D9D7 1C9F BB6C"; } ];
};
therealansh = {
email = "tyagiansh23@gmail.com";
github = "therealansh";
@ -21559,6 +21614,12 @@
githubId = 70410;
name = "Rahul Gopinath";
};
vsharathchandra = {
email = "chandrasharath.v@gmail.com";
github = "vsharathchandra";
githubId = 12689380;
name = "sharath chandra";
};
vskilet = {
email = "victor@sene.ovh";
github = "Vskilet";

View file

@ -1,5 +1,17 @@
with import ../../../. { };
{
pkgs ? import ../../.. { },
}:
let
inherit (pkgs) lib stdenv mkShell;
in
mkShell {
packages = [ rustc cargo clippy rustfmt ] ++ lib.optional stdenv.isDarwin libiconv;
packages =
with pkgs;
[
rustc
cargo
clippy
rustfmt
]
++ lib.optional stdenv.isDarwin pkgs.libiconv;
}

View file

@ -35,6 +35,7 @@ ldoc,,,,,,
lgi,,,,,,
linenoise,https://raw.githubusercontent.com/hoelzro/lua-linenoise/master/linenoise-0.9-1.rockspec,,,,,
ljsyscall,,,,,5.1,lblasc
llscheck,,,,,,mrcjkb
lmathx,,,,,5.3,alexshpilkin
lmpfrlib,,,,,5.3,alexshpilkin
loadkit,,,,,,alerque

1 name rockspec ref server version luaversion maintainers
35 lgi
36 linenoise https://raw.githubusercontent.com/hoelzro/lua-linenoise/master/linenoise-0.9-1.rockspec
37 ljsyscall 5.1 lblasc
38 llscheck mrcjkb
39 lmathx 5.3 alexshpilkin
40 lmpfrlib 5.3 alexshpilkin
41 loadkit alerque

View file

@ -749,7 +749,10 @@ with lib.maintainers;
};
openstack = {
members = [ SuperSandro2000 ];
members = [
SuperSandro2000
anthonyroussel
];
scope = "Maintain the ecosystem around OpenStack";
shortName = "OpenStack";
};
@ -1016,10 +1019,7 @@ with lib.maintainers;
};
zig = {
members = [
AndersonTorres
figsoda
];
members = [ figsoda ];
scope = "Maintain the Zig compiler toolchain and nixpkgs integration.";
shortName = "Zig";
enableFeatureFreezePing = true;

View file

@ -16,6 +16,8 @@
- `hardware.display` is a new module implementing workarounds for misbehaving monitors
through setting up custom EDID files and forcing kernel/framebuffer modes.
- NixOS now has support for *automatic boot assessment* (see [here](https://systemd.io/AUTOMATIC_BOOT_ASSESSMENT/)) for detailed description of the feature) for `systemd-boot` users. Available as [boot.loader.systemd-boot.bootCounting](#opt-boot.loader.systemd-boot.bootCounting.enable).
## New Services {#sec-release-24.11-new-services}
- [FlareSolverr](https://github.com/FlareSolverr/FlareSolverr), proxy server to bypass Cloudflare protection. Available as [services.flaresolverr](#opt-services.flaresolverr.enable) service.
@ -105,7 +107,7 @@
The option `services.fgciwrap` now takes an attribute set of the
configuration of each individual instance.
This requires migrating any previous configuration keys from
`services.fcgiwrap.*` to `services.fcgiwrap.some-instance.*`.
`services.fcgiwrap.*` to `services.fcgiwrap.instances.some-instance.*`.
The ownership and mode of the UNIX sockets created by this service are now
configurable and private by default.
Processes also now run as a dynamically allocated user by default instead of
@ -138,6 +140,8 @@
Refer to upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/)
and [release notes for v16](https://goteleport.com/docs/changelog/#1600-061324).
- `tests.overriding` has its `passthru.tests` restructured as an attribute set instead of a list, making individual tests accessible by their names.
- `vaultwarden` lost the capability to bind to privileged ports. If you rely on
this behavior, override the systemd unit to allow `CAP_NET_BIND_SERVICE` in
your local configuration.
@ -227,6 +231,8 @@
Explicitly set `kubelet.hostname` to `networking.fqdnOrHostName` to get back
the old default behavior.
- Docker now defaults to 27.x, because version 24.x stopped receiving security updates and bug fixes after [February 1, 2024](https://github.com/moby/moby/pull/46772#discussion_r1686464084).
- `keycloak` was updated to version 25, which introduces new hostname related options.
See [Upgrading Guide](https://www.keycloak.org/docs/25.0.1/upgrading/#migrating-to-25-0-0) for instructions.
@ -250,6 +256,9 @@
- The `services.mxisd` module has been removed as both [mxisd](https://github.com/kamax-matrix/mxisd) and [ma1sd](https://github.com/ma1uta/ma1sd) are not maintained any longer.
Consequently the package `pkgs.ma1sd` has also been removed.
- `ffmpeg_5` has been removed. Please use the unversioned `ffmpeg`,
pin a newer version, or if necessary pin `ffmpeg_4` for compatibility.
## Other Notable Changes {#sec-release-24.11-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -258,6 +267,11 @@
- The `stackclashprotection` hardening flag has been added, though disabled by default.
- `cargoSha256` in `rustPlatform.buildRustPackage` has been deprecated in favor
of `cargoHash` which supports SRI hashes. See
[buildRustPackage: Compiling Rust applications with Cargo](https://nixos.org/manual/nixpkgs/unstable/#compiling-rust-applications-with-cargo)
for more information.
- `hareHook` has been added as the language framework for Hare. From now on, it,
not the `hare` package, should be added to `nativeBuildInputs` when building
Hare programs.
@ -285,12 +299,16 @@
- Nemo is now built with gtk-layer-shell support, note that for now it will be expected to see nemo-desktop
listed as a regular entry in Cinnamon Wayland session's window list applet.
- `restic` module now has an option for inhibiting system sleep while backups are running, defaulting to off (not inhibiting sleep), available as [`services.restic.backups.<name>.inhibitsSleep`](#opt-services.restic.backups._name_.inhibitsSleep).
- Support for *runner registration tokens* has been [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/380872)
in `gitlab-runner` 15.6 and is expected to be removed in `gitlab-runner` 18.0. Configuration of existing runners
should be changed to using *runner authentication tokens* by configuring
{option}`services.gitlab-runner.services.<name>.authenticationTokenConfigFile` instead of the former
{option}`services.gitlab-runner.services.<name>.registrationConfigFile` option.
- `iproute2` now has libbpf support.
- `nix.channel.enable = false` no longer implies `nix.settings.nix-path = []`.
Since Nix 2.13, a `nix-path` set in `nix.conf` cannot be overriden by the `NIX_PATH` configuration variable.

View file

@ -169,6 +169,10 @@ in rec {
optional (attr ? ${name} && !isInt attr.${name})
"Systemd ${group} field `${name}' is not an integer";
assertRemoved = name: see: group: attr:
optional (attr ? ${name})
"Systemd ${group} field `${name}' has been removed. See ${see}";
checkUnitConfig = group: checks: attrs: let
# We're applied at the top-level type (attrsOf unitOption), so the actual
# unit options might contain attributes from mkOverride and mkIf that we need to

View file

@ -45,12 +45,61 @@ let
inherit (lib.types)
attrsOf
coercedTo
enum
lines
listOf
nullOr
oneOf
package
path
singleLineStr
submodule
;
initrdStorePathModule = { config, ... }: {
options = {
enable = (mkEnableOption "copying of this file and symlinking it") // { default = true; };
target = mkOption {
type = nullOr path;
description = ''
Path of the symlink.
'';
default = null;
};
source = mkOption {
type = path;
description = "Path of the source file.";
};
dlopen = {
usePriority = mkOption {
type = enum [ "required" "recommended" "suggested" ];
default = "recommended";
description = ''
Priority of dlopen ELF notes to include. "required" is
minimal, "recommended" includes "required", and
"suggested" includes "recommended".
See: https://systemd.io/ELF_DLOPEN_METADATA/
'';
};
features = mkOption {
type = listOf singleLineStr;
default = [ ];
description = ''
Features to enable via dlopen ELF notes. These will be in
addition to anything included via 'usePriority',
regardless of their priority.
'';
};
};
};
};
in
{
@ -86,31 +135,23 @@ in
automounts = listOf (submodule [ stage2AutomountOptions unitConfig automountConfig ]);
initrdAutomounts = attrsOf (submodule [ stage1AutomountOptions unitConfig automountConfig ]);
initrdStorePath = listOf (coercedTo
(oneOf [ singleLineStr package ])
(source: { inherit source; })
(submodule initrdStorePathModule));
initrdContents = attrsOf (submodule ({ config, options, name, ... }: {
imports = [ initrdStorePathModule ];
options = {
enable = (mkEnableOption "copying of this file and symlinking it") // { default = true; };
target = mkOption {
type = path;
description = ''
Path of the symlink.
'';
default = name;
};
text = mkOption {
default = null;
type = nullOr lines;
description = "Text of the file.";
};
source = mkOption {
type = path;
description = "Path of the source file.";
};
};
config = {
target = mkDefault name;
source = mkIf (config.text != null) (
let name' = "initrd-" + baseNameOf name;
in mkDerivedConfig options.text (pkgs.writeText name')

View file

@ -1,2 +1,4 @@
with import ../../.. {};
pkgs.callPackage ./default.nix {}
{
pkgs ? import ../../.. { },
}:
pkgs.callPackage ./default.nix { }

View file

@ -1,13 +1,16 @@
with (import ../../../../default.nix {});
stdenv.mkDerivation {
{
pkgs ? import ../../../../default.nix { },
}:
pkgs.stdenv.mkDerivation {
name = "nixcfg-azure-devenv";
nativeBuildInputs = [
nativeBuildInputs = with pkgs; [
azure-cli
bash
cacert
azure-storage-azcopy
];
AZURE_CONFIG_DIR="/tmp/azure-cli/.azure";
AZURE_CONFIG_DIR = "/tmp/azure-cli/.azure";
}

View file

@ -12,6 +12,7 @@ let
mkDefault
mkIf
mkOption
stringAfter
types
;
@ -97,5 +98,8 @@ in
systemd.tmpfiles.rules = lib.mkIf cfg.channel.enable [
''f /root/.nix-channels - - - - ${config.system.defaultChannel} nixos\n''
];
system.activationScripts.no-nix-channel = mkIf (!cfg.channel.enable)
(stringAfter [ "etc" "users" ] (builtins.readFile ./nix-channel/activation-check.sh));
};
}

View file

@ -0,0 +1,21 @@
# shellcheck shell=bash
explainChannelWarning=0
if [[ -e "/root/.nix-defexpr/channels" ]]; then
warn '/root/.nix-defexpr/channels exists, but channels have been disabled.'
explainChannelWarning=1
fi
if [[ -e "/nix/var/nix/profiles/per-user/root/channels" ]]; then
warn "/nix/var/nix/profiles/per-user/root/channels exists, but channels have been disabled."
explainChannelWarning=1
fi
while IFS=: read -r _ _ _ _ _ home _ ; do
if [[ -n "$home" && -e "$home/.nix-defexpr/channels" ]]; then
warn "$home/.nix-defexpr/channels exists, but channels have been disabled." 1>&2
explainChannelWarning=1
fi
done < <(getent passwd)
if [[ $explainChannelWarning -eq 1 ]]; then
echo "Due to https://github.com/NixOS/nix/issues/9574, Nix may still use these channels when NIX_PATH is unset." 1>&2
echo "Delete the above directory or directories to prevent this." 1>&2
fi

View file

@ -0,0 +1,19 @@
# Run:
# nix-build -A nixosTests.nix-channel
{ lib, testers }:
let
inherit (lib) fileset;
runShellcheck = testers.shellcheck {
src = fileset.toSource {
root = ./.;
fileset = fileset.unions [
./activation-check.sh
];
};
};
in
lib.recurseIntoAttrs {
inherit runShellcheck;
}

View file

@ -33,7 +33,6 @@ with lib;
fastfetch = super.fastfetch.override { vulkanSupport = false; waylandSupport = false; x11Support = false; };
ffmpeg = super.ffmpeg.override { ffmpegVariant = "headless"; };
ffmpeg_4 = super.ffmpeg_4.override { ffmpegVariant = "headless"; };
ffmpeg_5 = super.ffmpeg_5.override { ffmpegVariant = "headless"; };
ffmpeg_6 = super.ffmpeg_6.override { ffmpegVariant = "headless"; };
ffmpeg_7 = super.ffmpeg_7.override { ffmpegVariant = "headless"; };
# dep of graphviz, libXpm is optional for Xpm support

View file

@ -41,6 +41,6 @@ in
};
meta = {
maintainers = with lib.maintainers; [ ];
maintainers = [ ];
};
}

View file

@ -40,7 +40,7 @@ let
homepage = "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/";
license = licenses.unfreeRedistributable;
platforms = [ "x86_64-linux" ];
maintainers = with maintainers; [ ];
maintainers = [ ];
};
};
in {

View file

@ -102,8 +102,8 @@ in
driver causes it to provide its own framebuffer device, which can cause
Wayland compositors to work when they otherwise wouldn't.
'' // {
default = lib.versionAtLeast nvidia_x11.version "535";
defaultText = lib.literalExpression "lib.versionAtLeast nvidia_x11.version \"535\"";
default = lib.versionAtLeast cfg.package.version "535";
defaultText = lib.literalExpression "lib.versionAtLeast cfg.package.version \"535\"";
};
prime.nvidiaBusId = lib.mkOption {
@ -256,7 +256,9 @@ in
open = lib.mkEnableOption ''
the open source NVIDIA kernel module
'';
'' // {
defaultText = lib.literalExpression ''lib.versionAtLeast config.hardware.nvidia.package.version "560"'';
};
};
};
@ -305,6 +307,8 @@ in
extraPackages32 = [ nvidia_x11.lib32 ];
};
environment.systemPackages = [ nvidia_x11.bin ];
hardware.nvidia.open = lib.mkDefault (lib.versionAtLeast nvidia_x11.version "560");
})
# X11
@ -472,7 +476,6 @@ in
hardware.graphics = {
extraPackages = [ pkgs.nvidia-vaapi-driver ];
extraPackages32 = [ pkgs.pkgsi686Linux.nvidia-vaapi-driver ];
};
environment.systemPackages =

View file

@ -304,6 +304,7 @@
./programs/wayland/hyprlock.nix
./programs/wayland/hyprland.nix
./programs/wayland/labwc.nix
./programs/wayland/miracle-wm.nix
./programs/wayland/river.nix
./programs/wayland/sway.nix
./programs/wayland/waybar.nix
@ -356,6 +357,7 @@
./security/systemd-confinement.nix
./security/tpm2.nix
./security/wrappers/default.nix
./services/accessibility/speechd.nix
./services/admin/docuum.nix
./services/admin/meshcentral.nix
./services/admin/oxidized.nix
@ -414,6 +416,7 @@
./services/blockchain/ethereum/geth.nix
./services/blockchain/ethereum/lighthouse.nix
./services/cluster/corosync/default.nix
./services/cluster/druid/default.nix
./services/cluster/hadoop/default.nix
./services/cluster/k3s/default.nix
./services/cluster/kubernetes/addon-manager.nix
@ -1242,6 +1245,7 @@
./services/networking/websockify.nix
./services/networking/wg-access-server.nix
./services/networking/wg-netmanager.nix
./services/networking/wvdial.nix
./services/networking/webhook.nix
./services/networking/wg-quick.nix
./services/networking/wgautomesh.nix

View file

@ -126,5 +126,15 @@ with lib;
# allow nix-copy to live system
nix.settings.trusted-users = [ "root" "nixos" ];
# Install less voices for speechd to save some space
services.speechd.package = pkgs.speechd.override {
mbrola = pkgs.mbrola.override {
mbrola-voices = pkgs.mbrola-voices.override {
# only ship with one voice per language
languages = [ "*1" ];
};
};
};
};
}

View file

@ -3,9 +3,18 @@
config,
pkgs,
...
}: let
}:
let
cfg = config.programs.direnv;
in {
enabledOption =
x:
lib.mkEnableOption x
// {
default = true;
example = false;
};
in
{
options.programs.direnv = {
enable = lib.mkEnableOption ''
@ -14,7 +23,17 @@ in {
integration. Note that you need to logout and login for this change to apply
'';
package = lib.mkPackageOption pkgs "direnv" {};
package = lib.mkPackageOption pkgs "direnv" { };
enableBashIntegration = enabledOption ''
Bash integration
'';
enableZshIntegration = enabledOption ''
Zsh integration
'';
enableFishIntegration = enabledOption ''
Fish integration
'';
direnvrcExtra = lib.mkOption {
type = lib.types.lines;
@ -32,22 +51,14 @@ in {
the hiding of direnv logging
'';
loadInNixShell =
lib.mkEnableOption ''
loading direnv in `nix-shell` `nix shell` or `nix develop`
''
// {
default = true;
};
loadInNixShell = enabledOption ''
loading direnv in `nix-shell` `nix shell` or `nix develop`
'';
nix-direnv = {
enable =
(lib.mkEnableOption ''
a faster, persistent implementation of use_nix and use_flake, to replace the built-in one
'')
// {
default = true;
};
enable = enabledOption ''
a faster, persistent implementation of use_nix and use_flake, to replace the builtin one
'';
package = lib.mkOption {
default = pkgs.nix-direnv.override { nix = config.nix.package; };
@ -60,14 +71,10 @@ in {
};
};
imports = [
(lib.mkRemovedOptionModule ["programs" "direnv" "persistDerivations"] "persistDerivations was removed as it is no longer necessary")
];
config = lib.mkIf cfg.enable {
programs = {
zsh.interactiveShellInit = ''
zsh.interactiveShellInit = lib.mkIf cfg.enableZshIntegration ''
if ${lib.boolToString cfg.loadInNixShell} || printenv PATH | grep -vqc '/nix/store'; then
eval "$(${lib.getExe cfg.package} hook zsh)"
fi
@ -75,13 +82,13 @@ in {
#$NIX_GCROOT for "nix develop" https://github.com/NixOS/nix/blob/6db66ebfc55769edd0c6bc70fcbd76246d4d26e0/src/nix/develop.cc#L530
#$IN_NIX_SHELL for "nix-shell"
bash.interactiveShellInit = ''
bash.interactiveShellInit = lib.mkIf cfg.enableBashIntegration ''
if ${lib.boolToString cfg.loadInNixShell} || [ -z "$IN_NIX_SHELL$NIX_GCROOT$(printenv PATH | grep '/nix/store')" ] ; then
eval "$(${lib.getExe cfg.package} hook bash)"
fi
'';
fish.interactiveShellInit = ''
fish.interactiveShellInit = lib.mkIf cfg.enableFishIntegration ''
if ${lib.boolToString cfg.loadInNixShell};
or printenv PATH | grep -vqc '/nix/store';
${lib.getExe cfg.package} hook fish | source
@ -90,18 +97,17 @@ in {
};
environment = {
systemPackages =
if cfg.loadInNixShell then [cfg.package]
else [
#direnv has a fish library which sources direnv for some reason
(cfg.package.overrideAttrs (old: {
installPhase =
(old.installPhase or "")
+ ''
rm -rf $out/share/fish
'';
}))
];
systemPackages = [
# direnv has a fish library which automatically sources direnv for some reason
# I don't see any harm in doing this if we're sourcing it with fish.interactiveShellInit
(pkgs.symlinkJoin {
inherit (cfg.package) name;
paths = [ cfg.package ];
postBuild = ''
rm -rf $out/share/fish
'';
})
];
variables = {
DIRENV_CONFIG = "/etc/direnv";
@ -141,4 +147,5 @@ in {
};
};
};
meta.maintainers = with lib.maintainers; [ gerg-l ];
}

View file

@ -4,7 +4,7 @@ let
cfg = config.programs.dmrconfig;
in {
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
###### interface
options = {

View file

@ -47,7 +47,7 @@ let
);
driverPaths = [
pkgs.addOpenGLRunpath.driverLink
pkgs.addDriverRunpath.driverLink
# mesa:
config.hardware.opengl.package
@ -84,7 +84,7 @@ in
{
opengl.paths = config.hardware.opengl.extraPackages ++ [
config.hardware.opengl.package
pkgs.addOpenGLRunpath.driverLink
pkgs.addDriverRunpath.driverLink
"/dev/dri"
];
}

View file

@ -0,0 +1,43 @@
{
config,
pkgs,
lib,
...
}:
let
cfg = config.programs.wayland.miracle-wm;
in
{
options.programs.wayland.miracle-wm = {
enable = lib.mkEnableOption ''
miracle-wm, a tiling Mir based Wayland compositor. You can manually launch miracle-wm by
executing "exec miracle-wm" on a TTY, or launch it from a display manager.
Consult the USERGUIDE.md at <https://github.com/mattkae/miracle-wm> for information on
how to use & configure it
'';
};
config = lib.mkIf cfg.enable (
lib.mkMerge [
{
environment = {
systemPackages = [ pkgs.miracle-wm ];
};
# To make the miracle-wm session available if a display manager like SDDM is enabled:
services.displayManager.sessionPackages = [ pkgs.miracle-wm ];
}
(import ./wayland-session.nix {
inherit lib pkgs;
# Hardcoded path in Mir, not really possible to disable
enableXWayland = true;
# No portal support yet: https://github.com/mattkae/miracle-wm/issues/164
enableWlrPortal = false;
})
]
);
meta.maintainers = with lib.maintainers; [ OPNA2608 ];
}

View file

@ -1,4 +1,9 @@
{ config, lib, pkgs, ...}:
{
config,
lib,
pkgs,
...
}:
let
cfg = config.programs.wayfire;
in
@ -12,7 +17,10 @@ in
plugins = lib.mkOption {
type = lib.types.listOf lib.types.package;
default = with pkgs.wayfirePlugins; [ wcm wf-shell ];
default = with pkgs.wayfirePlugins; [
wcm
wf-shell
];
defaultText = lib.literalExpression "with pkgs.wayfirePlugins; [ wcm wf-shell ]";
example = lib.literalExpression ''
with pkgs.wayfirePlugins; [
@ -25,26 +33,39 @@ in
Additional plugins to use with the wayfire window manager.
'';
};
};
config = let
finalPackage = pkgs.wayfire-with-plugins.override {
wayfire = cfg.package;
plugins = cfg.plugins;
};
in
lib.mkIf cfg.enable {
environment.systemPackages = [
finalPackage
];
services.displayManager.sessionPackages = [ finalPackage ];
xdg.portal = {
enable = lib.mkDefault true;
wlr.enable = lib.mkDefault true;
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050914
config.wayfire.default = lib.mkDefault [ "wlr" "gtk" ];
xwayland.enable = lib.mkEnableOption "XWayland" // {
default = true;
};
};
config =
let
finalPackage = pkgs.wayfire-with-plugins.override {
wayfire = cfg.package;
plugins = cfg.plugins;
};
in
lib.mkIf cfg.enable (
lib.mkMerge [
{
environment.systemPackages = [ finalPackage ];
services.displayManager.sessionPackages = [ finalPackage ];
xdg.portal = {
enable = lib.mkDefault true;
wlr.enable = lib.mkDefault true;
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1050914
config.wayfire.default = lib.mkDefault [
"wlr"
"gtk"
];
};
}
(import ./wayland-session.nix {
inherit lib pkgs;
enableXWayland = cfg.xwayland.enable;
})
]
);
}

View file

@ -99,6 +99,7 @@ let
}));
};
package = config.security.pam.package;
parentConfig = config;
pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in {
@ -648,16 +649,16 @@ let
# The required pam_unix.so module has to come after all the sufficient modules
# because otherwise, the account lookup will fail if the user does not exist
# locally, for example with MySQL- or LDAP-auth.
{ name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
{ name = "unix"; control = "required"; modulePath = "${package}/lib/security/pam_unix.so"; }
];
auth = autoOrderRules ([
{ name = "oslogin_login"; enable = cfg.googleOsLoginAuthentication; control = "[success=done perm_denied=die default=ignore]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so"; }
{ name = "rootok"; enable = cfg.rootOK; control = "sufficient"; modulePath = "pam_rootok.so"; }
{ name = "wheel"; enable = cfg.requireWheel; control = "required"; modulePath = "pam_wheel.so"; settings = {
{ name = "rootok"; enable = cfg.rootOK; control = "sufficient"; modulePath = "${package}/lib/security/pam_rootok.so"; }
{ name = "wheel"; enable = cfg.requireWheel; control = "required"; modulePath = "${package}/lib/security/pam_wheel.so"; settings = {
use_uid = true;
}; }
{ name = "faillock"; enable = cfg.logFailures; control = "required"; modulePath = "pam_faillock.so"; }
{ name = "faillock"; enable = cfg.logFailures; control = "required"; modulePath = "${package}/lib/security/pam_faillock.so"; }
{ name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
config_file = "/etc/security/pam_mysql.conf";
}; }
@ -710,7 +711,7 @@ let
|| cfg.zfs))
[
{ name = "systemd_home-early"; enable = config.services.homed.enable; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
{ name = "unix-early"; enable = cfg.unixAuth; control = "optional"; modulePath = "pam_unix.so"; settings = {
{ name = "unix-early"; enable = cfg.unixAuth; control = "optional"; modulePath = "${package}/lib/security/pam_unix.so"; settings = {
nullok = cfg.allowNullPassword;
inherit (cfg) nodelay;
likeauth = true;
@ -731,7 +732,7 @@ let
{ name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
store-only = cfg.gnupg.storeOnly;
}; }
{ name = "faildelay"; enable = cfg.failDelay.enable; control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_faildelay.so"; settings = {
{ name = "faildelay"; enable = cfg.failDelay.enable; control = "optional"; modulePath = "${package}/lib/security/pam_faildelay.so"; settings = {
inherit (cfg.failDelay) delay;
}; }
{ name = "google_authenticator"; enable = cfg.googleAuthenticator.enable; control = "required"; modulePath = "${pkgs.google-authenticator}/lib/security/pam_google_authenticator.so"; settings = {
@ -740,7 +741,7 @@ let
{ name = "duo"; enable = cfg.duoSecurity.enable; control = "required"; modulePath = "${pkgs.duo-unix}/lib/security/pam_duo.so"; }
]) ++ [
{ name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
{ name = "unix"; enable = cfg.unixAuth; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
{ name = "unix"; enable = cfg.unixAuth; control = "sufficient"; modulePath = "${package}/lib/security/pam_unix.so"; settings = {
nullok = cfg.allowNullPassword;
inherit (cfg) nodelay;
likeauth = true;
@ -768,12 +769,12 @@ let
action = "store";
use_first_pass = true;
}; }
{ name = "deny"; control = "required"; modulePath = "pam_deny.so"; }
{ name = "deny"; control = "required"; modulePath = "${package}/lib/security/pam_deny.so"; }
]);
password = autoOrderRules [
{ name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
{ name = "unix"; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
{ name = "unix"; control = "sufficient"; modulePath = "${package}/lib/security/pam_unix.so"; settings = {
nullok = true;
yescrypt = true;
}; }
@ -798,24 +799,24 @@ let
];
session = autoOrderRules [
{ name = "env"; enable = cfg.setEnvironment; control = "required"; modulePath = "pam_env.so"; settings = {
{ name = "env"; enable = cfg.setEnvironment; control = "required"; modulePath = "${package}/lib/security/pam_env.so"; settings = {
conffile = "/etc/pam/environment";
readenv = 0;
}; }
{ name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
{ name = "loginuid"; enable = cfg.setLoginUid; control = if config.boot.isContainer then "optional" else "required"; modulePath = "pam_loginuid.so"; }
{ name = "tty_audit"; enable = cfg.ttyAudit.enable; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_tty_audit.so"; settings = {
{ name = "unix"; control = "required"; modulePath = "${package}/lib/security/pam_unix.so"; }
{ name = "loginuid"; enable = cfg.setLoginUid; control = if config.boot.isContainer then "optional" else "required"; modulePath = "${package}/lib/security/pam_loginuid.so"; }
{ name = "tty_audit"; enable = cfg.ttyAudit.enable; control = "required"; modulePath = "${package}/lib/security/pam_tty_audit.so"; settings = {
open_only = cfg.ttyAudit.openOnly;
enable = cfg.ttyAudit.enablePattern;
disable = cfg.ttyAudit.disablePattern;
}; }
{ name = "systemd_home"; enable = config.services.homed.enable; control = "required"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
{ name = "mkhomedir"; enable = cfg.makeHomeDir; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_mkhomedir.so"; settings = {
{ name = "mkhomedir"; enable = cfg.makeHomeDir; control = "required"; modulePath = "${package}/lib/security/pam_mkhomedir.so"; settings = {
silent = true;
skel = config.security.pam.makeHomeDir.skelDirectory;
inherit (config.security.pam.makeHomeDir) umask;
}; }
{ name = "lastlog"; enable = cfg.updateWtmp; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_lastlog.so"; settings = {
{ name = "lastlog"; enable = cfg.updateWtmp; control = "required"; modulePath = "${package}/lib/security/pam_lastlog.so"; settings = {
silent = true;
}; }
{ name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; }
@ -823,11 +824,11 @@ let
# Skips the pam_fscrypt module for systemd-user sessions which do not have a password
# anyways.
# See also https://github.com/google/fscrypt/issues/95
{ name = "fscrypt-skip-systemd"; enable = config.security.pam.enableFscrypt; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
{ name = "fscrypt-skip-systemd"; enable = config.security.pam.enableFscrypt; control = "[success=1 default=ignore]"; modulePath = "${package}/lib/security/pam_succeed_if.so"; args = [
"service" "=" "systemd-user"
]; }
{ name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
{ name = "zfs_key-skip-systemd"; enable = cfg.zfs; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
{ name = "zfs_key-skip-systemd"; enable = cfg.zfs; control = "[success=1 default=ignore]"; modulePath = "${package}/lib/security/pam_succeed_if.so"; args = [
"service" "=" "systemd-user"
]; }
{ name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
@ -846,14 +847,14 @@ let
{ name = "krb5"; enable = config.security.pam.krb5.enable; control = "optional"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; }
{ name = "otpw"; enable = cfg.otpwAuth; control = "optional"; modulePath = "${pkgs.otpw}/lib/security/pam_otpw.so"; }
{ name = "systemd"; enable = cfg.startSession; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd.so"; }
{ name = "xauth"; enable = cfg.forwardXAuth; control = "optional"; modulePath = "pam_xauth.so"; settings = {
{ name = "xauth"; enable = cfg.forwardXAuth; control = "optional"; modulePath = "${package}/lib/security/pam_xauth.so"; settings = {
xauthpath = "${pkgs.xorg.xauth}/bin/xauth";
systemuser = 99;
}; }
{ name = "limits"; enable = cfg.limits != []; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_limits.so"; settings = {
{ name = "limits"; enable = cfg.limits != []; control = "required"; modulePath = "${package}/lib/security/pam_limits.so"; settings = {
conf = "${makeLimitsConf cfg.limits}";
}; }
{ name = "motd"; enable = cfg.showMotd && (config.users.motd != null || config.users.motdFile != null); control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_motd.so"; settings = {
{ name = "motd"; enable = cfg.showMotd && (config.users.motd != null || config.users.motdFile != null); control = "optional"; modulePath = "${package}/lib/security/pam_motd.so"; settings = {
inherit motd;
}; }
{ name = "apparmor"; enable = cfg.enableAppArmor && config.security.apparmor.enable; control = "optional"; modulePath = "${pkgs.apparmor-pam}/lib/security/pam_apparmor.so"; settings = {
@ -967,6 +968,8 @@ in
options = {
security.pam.package = mkPackageOption pkgs "pam" { };
security.pam.loginLimits = mkOption {
default = [];
type = limitsType;
@ -1515,7 +1518,7 @@ in
environment.systemPackages =
# Include the PAM modules in the system path mostly for the manpages.
[ pkgs.pam ]
[ package ]
++ optional config.users.ldap.enable pam_ldap
++ optional config.services.kanidm.enablePam config.services.kanidm.package
++ optional config.services.sssd.enable pkgs.sssd
@ -1533,7 +1536,7 @@ in
setuid = true;
owner = "root";
group = "root";
source = "${pkgs.pam}/bin/unix_chkpwd";
source = "${package}/bin/unix_chkpwd";
};
};
@ -1574,11 +1577,6 @@ in
lib.concatMapStrings
(name: "r ${config.environment.etc."pam.d/${name}".source},\n")
(attrNames config.security.pam.services) +
''
mr ${getLib pkgs.pam}/lib/security/pam_filter/*,
mr ${getLib pkgs.pam}/lib/security/pam_*.so,
r ${getLib pkgs.pam}/lib/security/,
'' +
(with lib; pipe config.security.pam.services [
attrValues
(catAttrs "rules")
@ -1586,6 +1584,12 @@ in
(concatMap attrValues)
(filter (rule: rule.enable))
(catAttrs "modulePath")
# TODO(@uninsane): replace this warning + filter with just an assertion
(map (modulePath: lib.warnIfNot
(hasPrefix "/" modulePath)
''non-absolute PAM modulePath "${modulePath}" is unsupported by apparmor and will be treated as an error by future versions of nixpkgs; see <https://github.com/NixOS/nixpkgs/pull/314791>''
modulePath
))
(filter (hasPrefix "/"))
unique
(map (module: "mr ${module},"))

View file

@ -0,0 +1,32 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.speechd;
inherit (lib)
getExe
mkEnableOption
mkIf
mkPackageOption
;
in
{
options.services.speechd = {
# FIXME: figure out how to deprecate this EXTREMELY CAREFULLY
# default guessed conservatively in ../misc/graphical-desktop.nix
enable = mkEnableOption "speech-dispatcher speech synthesizer daemon";
package = mkPackageOption pkgs "speechd" { };
};
# FIXME: speechd 0.12 (or whatever the next version is)
# will support socket activation, so switch to that once it's out.
config = mkIf cfg.enable {
environment = {
systemPackages = [ cfg.package ];
sessionVariables.SPEECHD_CMD = getExe cfg.package;
};
};
}

View file

@ -40,5 +40,5 @@ in
};
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -20,7 +20,7 @@ let
'';
backupDatabaseScript = db: ''
dest="${cfg.location}/${db}.gz"
if ${mariadb}/bin/mysqldump ${optionalString cfg.singleTransaction "--single-transaction"} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
if ${mariadb}/bin/mysqldump ${optionalString cfg.singleTransaction "--single-transaction"} ${db} | ${gzip}/bin/gzip -c ${cfg.gzipOptions} > $dest.tmp; then
mv $dest.tmp $dest
echo "Backed up to $dest"
else
@ -78,6 +78,14 @@ in
Whether to create database dump in a single transaction
'';
};
gzipOptions = mkOption {
default = "--no-name --rsyncable";
type = types.str;
description = ''
Command line options to use when invoking `gzip`.
'';
};
};
};

View file

@ -15,7 +15,7 @@ in
default = "8000";
example = "127.0.0.1:8080";
type = types.str;
description = "Listen on a specific IP address and port.";
description = "Listen on a specific IP address and port or unix socket.";
};
dataDir = mkOption {

View file

@ -83,6 +83,15 @@ in
'';
};
inhibitsSleep = mkOption {
default = false;
type = types.bool;
example = true;
description = ''
Prevents the system from sleeping while backing up.
'';
};
repository = mkOption {
type = with types; nullOr str;
default = null;
@ -299,7 +308,14 @@ in
(name: backup:
let
extraOptions = concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
resticCmd = "${backup.package}/bin/restic${extraOptions}";
inhibitCmd = concatStringsSep " " [
"${pkgs.systemd}/bin/systemd-inhibit"
"--mode='block'"
"--who='restic'"
"--what='sleep'"
"--why=${escapeShellArg "Scheduled backup ${name}"} "
];
resticCmd = "${optionalString backup.inhibitsSleep inhibitCmd}${backup.package}/bin/restic${extraOptions}";
excludeFlags = optional (backup.exclude != []) "--exclude-file=${pkgs.writeText "exclude-patterns" (concatStringsSep "\n" backup.exclude)}";
filesFromTmpFile = "/run/restic-backups-${name}/includes";
doBackup = (backup.dynamicFilesFrom != null) || (backup.paths != null && backup.paths != []);

View file

@ -0,0 +1,296 @@
{
config,
lib,
pkgs,
...
}:
let
cfg = config.services.druid;
inherit (lib)
concatStrings
concatStringsSep
mapAttrsToList
concatMap
attrByPath
mkIf
mkMerge
mkEnableOption
mkOption
types
mkPackageOption
;
druidServiceOption = serviceName: {
enable = mkEnableOption serviceName;
restartIfChanged = mkOption {
type = types.bool;
description = ''
Automatically restart the service on config change.
This can be set to false to defer restarts on clusters running critical applications.
Please consider the security implications of inadvertently running an older version,
and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
'';
default = false;
};
config = mkOption {
default = { };
type = types.attrsOf types.anything;
description = ''
(key=value) Configuration to be written to runtime.properties of the druid ${serviceName}
<https://druid.apache.org/docs/latest/configuration/index.html>
'';
example = {
"druid.plainTextPort" = "8082";
"druid.service" = "servicename";
};
};
jdk = mkPackageOption pkgs "JDK" { default = [ "jdk17_headless" ]; };
jvmArgs = mkOption {
type = types.str;
default = "";
description = "Arguments to pass to the JVM";
};
openFirewall = mkOption {
type = types.bool;
default = false;
description = "Open firewall ports for ${serviceName}.";
};
internalConfig = mkOption {
default = { };
type = types.attrsOf types.anything;
internal = true;
description = "Internal Option to add to runtime.properties for ${serviceName}.";
};
};
druidServiceConfig =
{
name,
serviceOptions ? cfg."${name}",
allowedTCPPorts ? [ ],
tmpDirs ? [ ],
extraConfig ? { },
}:
(mkIf serviceOptions.enable (mkMerge [
{
systemd = {
services."druid-${name}" = {
after = [ "network.target" ];
description = "Druid ${name}";
wantedBy = [ "multi-user.target" ];
inherit (serviceOptions) restartIfChanged;
path = [
cfg.package
serviceOptions.jdk
];
script =
let
cfgFile =
fileName: properties:
pkgs.writeTextDir fileName (
concatStringsSep "\n" (mapAttrsToList (n: v: "${n}=${toString v}") properties)
);
commonConfigFile = cfgFile "common.runtime.properties" cfg.commonConfig;
configFile = cfgFile "runtime.properties" (serviceOptions.config // serviceOptions.internalConfig);
extraClassPath = concatStrings (map (path: ":" + path) cfg.extraClassPaths);
extraConfDir = concatStrings (map (dir: ":" + dir + "/*") cfg.extraConfDirs);
in
''
run-java -Dlog4j.configurationFile=file:${cfg.log4j} \
-Ddruid.extensions.directory=${cfg.package}/extensions \
-Ddruid.extensions.hadoopDependenciesDir=${cfg.package}/hadoop-dependencies \
-classpath ${commonConfigFile}:${configFile}:${cfg.package}/lib/\*${extraClassPath}${extraConfDir} \
${serviceOptions.jvmArgs} \
org.apache.druid.cli.Main server ${name}
'';
serviceConfig = {
User = "druid";
SyslogIdentifier = "druid-${name}";
Restart = "always";
};
};
tmpfiles.rules = concatMap (x: [ "d ${x} 0755 druid druid" ]) (cfg.commonTmpDirs ++ tmpDirs);
};
networking.firewall.allowedTCPPorts = mkIf (attrByPath [
"openFirewall"
] false serviceOptions) allowedTCPPorts;
users = {
users.druid = {
description = "Druid user";
group = "druid";
isNormalUser = true;
};
groups.druid = { };
};
}
extraConfig
]));
in
{
options.services.druid = {
package = mkPackageOption pkgs "apache-druid" { default = [ "druid" ]; };
commonConfig = mkOption {
default = { };
type = types.attrsOf types.anything;
description = "(key=value) Configuration to be written to common.runtime.properties";
example = {
"druid.zk.service.host" = "localhost:2181";
"druid.metadata.storage.type" = "mysql";
"druid.metadata.storage.connector.connectURI" = "jdbc:mysql://localhost:3306/druid";
"druid.extensions.loadList" = ''[ "mysql-metadata-storage" ]'';
};
};
commonTmpDirs = mkOption {
default = [ "/var/log/druid/requests" ];
type = types.listOf types.str;
description = "Common List of directories used by druid processes";
};
log4j = mkOption {
type = types.path;
description = "Log4j Configuration for the druid process";
};
extraClassPaths = mkOption {
default = [ ];
type = types.listOf types.str;
description = "Extra classpath to include in the jvm";
};
extraConfDirs = mkOption {
default = [ ];
type = types.listOf types.path;
description = "Extra Conf Dirs to include in the jvm";
};
overlord = druidServiceOption "Druid Overlord";
coordinator = druidServiceOption "Druid Coordinator";
broker = druidServiceOption "Druid Broker";
historical = (druidServiceOption "Druid Historical") // {
segmentLocations = mkOption {
default = null;
description = "Locations where the historical will store its data.";
type =
with types;
nullOr (
listOf (submodule {
options = {
path = mkOption {
type = path;
description = "the path to store the segments";
};
maxSize = mkOption {
type = str;
description = "Max size the druid historical can occupy";
};
freeSpacePercent = mkOption {
type = float;
default = 1.0;
description = "Druid Historical will fail to write if it exceeds this value";
};
};
})
);
};
};
middleManager = druidServiceOption "Druid middleManager";
router = druidServiceOption "Druid Router";
};
config = mkMerge [
(druidServiceConfig rec {
name = "overlord";
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8090 cfg."${name}".config) ];
})
(druidServiceConfig rec {
name = "coordinator";
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8081 cfg."${name}".config) ];
})
(druidServiceConfig rec {
name = "broker";
tmpDirs = [ (attrByPath [ "druid.lookup.snapshotWorkingDir" ] "" cfg."${name}".config) ];
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8082 cfg."${name}".config) ];
})
(druidServiceConfig rec {
name = "historical";
tmpDirs = [
(attrByPath [ "druid.lookup.snapshotWorkingDir" ] "" cfg."${name}".config)
] ++ (map (x: x.path) cfg."${name}".segmentLocations);
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8083 cfg."${name}".config) ];
extraConfig.services.druid.historical.internalConfig."druid.segmentCache.locations" = builtins.toJSON cfg.historical.segmentLocations;
})
(druidServiceConfig rec {
name = "middleManager";
tmpDirs = [
"/var/log/druid/indexer"
] ++ [ (attrByPath [ "druid.indexer.task.baseTaskDir" ] "" cfg."${name}".config) ];
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8091 cfg."${name}".config) ];
extraConfig = {
services.druid.middleManager.internalConfig = {
"druid.indexer.runner.javaCommand" = "${cfg.middleManager.jdk}/bin/java";
"druid.indexer.runner.javaOpts" =
(attrByPath [ "druid.indexer.runner.javaOpts" ] "" cfg.middleManager.config)
+ " -Dlog4j.configurationFile=file:${cfg.log4j}";
};
networking.firewall.allowedTCPPortRanges = mkIf cfg.middleManager.openFirewall [
{
from = attrByPath [ "druid.indexer.runner.startPort" ] 8100 cfg.middleManager.config;
to = attrByPath [ "druid.indexer.runner.endPort" ] 65535 cfg.middleManager.config;
}
];
};
})
(druidServiceConfig rec {
name = "router";
allowedTCPPorts = [ (attrByPath [ "druid.plaintextPort" ] 8888 cfg."${name}".config) ];
})
];
}

View file

@ -164,11 +164,8 @@ in
};
config = lib.mkIf cfg.enable {
services.archisteamfarm = {
# TODO: drop with 24.11
dataDir = lib.mkIf (lib.versionAtLeast config.system.stateVersion "24.05") (lib.mkDefault "/var/lib/asf");
settings.IPC = lib.mkIf (!cfg.web-ui.enable) false;
};
# TODO: drop with 24.11
services.archisteamfarm.dataDir = lib.mkIf (lib.versionAtLeast config.system.stateVersion "24.05") (lib.mkDefault "/var/lib/asf");
users = {
users.archisteamfarm = {

View file

@ -51,5 +51,5 @@ in {
};
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -29,9 +29,6 @@ let
};
nixosRules = ''
# Miscellaneous devices.
KERNEL=="kvm", MODE="0666"
# Needed for gpm.
SUBSYSTEM=="input", KERNEL=="mice", TAG+="systemd"
'';

View file

@ -41,5 +41,5 @@ in {
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -55,6 +55,6 @@ in
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -42,6 +42,8 @@ in
programs.gnupg.agent.pinentryPackage = lib.mkOverride 1100 pkgs.pinentry-gnome3;
services.speechd.enable = lib.mkDefault true;
systemd.defaultUnit = lib.mkIf (xcfg.autorun || dmcfg.enable) "graphical.target";
xdg = {

View file

@ -65,5 +65,5 @@ in
};
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -215,6 +215,7 @@ in
# https://docs.nvidia.com/dgx/pdf/dgx-os-5-user-guide.pdf
"char-nvidiactl"
"char-nvidia-caps"
"char-nvidia-frontend"
"char-nvidia-uvm"
# ROCm
"char-drm"

View file

@ -117,5 +117,5 @@ in
};
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -119,7 +119,8 @@ in
enable = mkEnableOption "Radicle Seed Node";
package = mkPackageOption pkgs "radicle-node" { };
privateKeyFile = mkOption {
type = types.path;
# Note that a key encrypted by systemd-creds is not a path but a str.
type = with types; either path str;
description = ''
Absolute file path to an SSH private key,
usually generated by `rad auth`.

View file

@ -7,7 +7,7 @@ let
toml = pkgs.formats.toml { };
in
{
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
options.services.rkvm = {
enable = mkOption {

View file

@ -202,10 +202,11 @@ in {
];
services = {
fcgiwrap.zoneminder = lib.mkIf useNginx {
fcgiwrap.instances.zoneminder = lib.mkIf useNginx {
process.prefork = cfg.cameras;
process.user = user;
process.group = group;
socket = { inherit (config.services.nginx) user group; };
};
mysql = lib.mkIf cfg.database.createLocally {
@ -255,7 +256,7 @@ in {
fastcgi_param HTTP_PROXY "";
fastcgi_intercept_errors on;
fastcgi_pass unix:${config.services.fcgiwrap.zoneminder.socket.address};
fastcgi_pass unix:${config.services.fcgiwrap.instances.zoneminder.socket.address};
}
location /cache/ {
@ -372,5 +373,5 @@ in {
};
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -2,20 +2,23 @@
let
cfg = config.services.prometheus.exporters.smartctl;
inherit (lib) mkOption types literalExpression;
args = lib.escapeShellArgs ([
"--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
"--smartctl.path=${pkgs.smartmontools}/bin/smartctl"
"--smartctl.interval=${cfg.maxInterval}"
] ++ map (device: "--smartctl.device=${device}") cfg.devices
++ cfg.extraFlags);
in {
in
{
port = 9633;
extraOpts = {
devices = mkOption {
type = types.listOf types.str;
default = [];
default = [ ];
example = literalExpression ''
[ "/dev/sda", "/dev/nvme0n1" ];
'';
@ -24,6 +27,7 @@ in {
all disks if none given.
'';
};
maxInterval = mkOption {
type = types.str;
default = "60s";
@ -50,9 +54,7 @@ in {
"block-sd rw"
"char-nvme rw"
];
ExecStart = ''
${pkgs.prometheus-smartctl-exporter}/bin/smartctl_exporter ${args}
'';
ExecStart = "${pkgs.prometheus-smartctl-exporter}/bin/smartctl_exporter ${args}";
PrivateDevices = lib.mkForce false;
ProtectProc = "invisible";
ProcSubset = "pid";

View file

@ -8,7 +8,7 @@ let
in {
meta = {
# doc = ./bee.xml;
maintainers = with maintainers; [ ];
maintainers = [ ];
};
### interface

View file

@ -32,7 +32,7 @@ let
fastcgi_split_path_info ^(${regexLocation cfg})(/.+)$;
fastcgi_param PATH_INFO $fastcgi_path_info;
''
}fastcgi_pass unix:${config.services.fcgiwrap."cgit-${name}".socket.address};
}fastcgi_pass unix:${config.services.fcgiwrap.instances."cgit-${name}".socket.address};
'';
cgitrcLine = name: value: "${name}=${
@ -171,7 +171,7 @@ in
groups.${cfg.group} = { };
}));
services.fcgiwrap = flip mapAttrs' cfgs (name: cfg:
services.fcgiwrap.instances = flip mapAttrs' cfgs (name: cfg:
nameValuePair "cgit-${name}" {
process = { inherit (cfg) user group; };
socket = { inherit (config.services.nginx) user group; };

View file

@ -90,6 +90,6 @@ in
};
meta = {
maintainers = with lib.maintainers; [ ];
maintainers = [ ];
};
}

View file

@ -316,7 +316,7 @@ in
};
meta = {
maintainers = with lib.maintainers; [ ];
maintainers = [ ];
doc = ./firefox-syncserver.md;
};
}

View file

@ -141,5 +141,5 @@ in
};
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -183,37 +183,45 @@ let
in
pkgs.writeText "i2pd.conf" (concatStringsSep "\n" opts);
tunnelConf = let opts = [
notice
(flip map
(collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
(tun: let outTunOpts = [
(sec tun.name)
"type = client"
(intOpt "port" tun.port)
(strOpt "destination" tun.destination)
tunnelConf = let
mkOutTunnel = tun:
let
outTunOpts = [
(sec tun.name)
"type = client"
(intOpt "port" tun.port)
(strOpt "destination" tun.destination)
] ++ (optionals (tun ? destinationPort) (optionalNullInt "destinationport" tun.destinationPort))
++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
++ (optionals (tun ? address) (optionalNullString "address" tun.address))
++ (optionals (tun ? inbound.length) (optionalNullInt "inbound.length" tun.inbound.length))
++ (optionals (tun ? inbound.quantity) (optionalNullInt "inbound.quantity" tun.inbound.quantity))
++ (optionals (tun ? outbound.length) (optionalNullInt "outbound.length" tun.outbound.length))
++ (optionals (tun ? outbound.quantity) (optionalNullInt "outbound.quantity" tun.outbound.quantity))
++ (optionals (tun ? crypto.tagsToSend) (optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend));
in concatStringsSep "\n" outTunOpts))
(flip map
(collect (tun: tun ? port && tun ? address) cfg.inTunnels)
(tun: let inTunOpts = [
(sec tun.name)
"type = server"
(intOpt "port" tun.port)
(strOpt "host" tun.address)
] ++ (optionals (tun ? destination) (optionalNullString "destination" tun.destination))
++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
++ (optionals (tun ? inPort) (optionalNullInt "inport" tun.inPort))
++ (optionals (tun ? accessList) (optionalEmptyList "accesslist" tun.accessList));
in concatStringsSep "\n" inTunOpts))];
in pkgs.writeText "i2pd-tunnels.conf" opts;
++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
++ (optionals (tun ? address) (optionalNullString "address" tun.address))
++ (optionals (tun ? inbound.length) (optionalNullInt "inbound.length" tun.inbound.length))
++ (optionals (tun ? inbound.quantity) (optionalNullInt "inbound.quantity" tun.inbound.quantity))
++ (optionals (tun ? outbound.length) (optionalNullInt "outbound.length" tun.outbound.length))
++ (optionals (tun ? outbound.quantity) (optionalNullInt "outbound.quantity" tun.outbound.quantity))
++ (optionals (tun ? crypto.tagsToSend) (optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend));
in
concatStringsSep "\n" outTunOpts;
mkInTunnel = tun:
let
inTunOpts = [
(sec tun.name)
"type = server"
(intOpt "port" tun.port)
(strOpt "host" tun.address)
] ++ (optionals (tun ? destination) (optionalNullString "destination" tun.destination))
++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
++ (optionals (tun ? inPort) (optionalNullInt "inport" tun.inPort))
++ (optionals (tun ? accessList) (optionalEmptyList "accesslist" tun.accessList));
in
concatStringsSep "\n" inTunOpts;
allOutTunnels = collect (tun: tun ? port && tun ? destination) cfg.outTunnels;
allInTunnels = collect (tun: tun ? port && tun ? address) cfg.inTunnels;
opts = [ notice ] ++ (map mkOutTunnel allOutTunnels) ++ (map mkInTunnel allInTunnels);
in
pkgs.writeText "i2pd-tunnels.conf" (concatStringsSep "\n" opts);
i2pdFlags = concatStringsSep " " (
optional (cfg.address != null) ("--host=" + cfg.address) ++ [

View file

@ -721,7 +721,7 @@ in
};
meta = {
maintainers = with lib.maintainers; [ ];
maintainers = [ ];
doc = ./mosquitto.md;
};
}

View file

@ -12,6 +12,8 @@ in
services.nar-serve = {
enable = mkEnableOption "serving NAR file contents via HTTP";
package = mkPackageOption pkgs "nar-serve" { };
port = mkOption {
type = types.port;
default = 8383;
@ -32,6 +34,17 @@ in
- gs:// for binary caches stored in Google Cloud Storage
'';
};
domain = mkOption {
type = types.str;
default = "";
description = ''
When set, enables the feature of serving <nar-hash>.<domain>
on top of <domain>/nix/store/<nar-hash>-<pname>.
Useful to preview static websites where paths are absolute.
'';
};
};
};
@ -47,7 +60,7 @@ in
serviceConfig = {
Restart = "always";
RestartSec = "5s";
ExecStart = "${pkgs.nar-serve}/bin/nar-serve";
ExecStart = lib.getExe cfg.package;
DynamicUser = true;
};
};

View file

@ -7,7 +7,7 @@ let
in
{
meta = {
maintainers = with maintainers; [ ];
maintainers = [ ];
};
options = {

View file

@ -291,5 +291,5 @@ in
};
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -337,7 +337,7 @@ in
};
# use nginx to serve the smokeping web service
services.fcgiwrap.smokeping = mkIf cfg.webService {
services.fcgiwrap.instances.smokeping = mkIf cfg.webService {
process.user = cfg.user;
process.group = cfg.user;
socket = { inherit (config.services.nginx) user group; };
@ -353,7 +353,7 @@ in
locations."/smokeping.fcgi" = {
extraConfig = ''
include ${config.services.nginx.package}/conf/fastcgi_params;
fastcgi_pass unix:${config.services.fcgiwrap.smokeping.socket.address};
fastcgi_pass unix:${config.services.fcgiwrap.instances.smokeping.socket.address};
fastcgi_param SCRIPT_FILENAME ${smokepingHome}/smokeping.fcgi;
fastcgi_param DOCUMENT_ROOT ${smokepingHome};
'';

View file

@ -0,0 +1,47 @@
# Global configuration for wvdial.
{
config,
lib,
pkgs,
...
}:
let
cfg = config.environment.wvdial;
in
{
options = {
environment.wvdial = {
dialerDefaults = lib.mkOption {
default = "";
type = lib.types.str;
example = ''Init1 = AT+CGDCONT=1,"IP","internet.t-mobile"'';
description = ''
Contents of the "Dialer Defaults" section of
<filename>/etc/wvdial.conf</filename>.
'';
};
pppDefaults = lib.mkOption {
default = ''
noipdefault
usepeerdns
defaultroute
persist
noauth
'';
type = lib.types.str;
description = "Default ppp settings for wvdial.";
};
};
};
config = lib.mkIf (cfg.dialerDefaults != "") {
environment.etc."wvdial.conf".source = pkgs.writeText "wvdial.conf" ''
[Dialer Defaults]
PPPD PATH = ${pkgs.ppp}/sbin/pppd
${config.environment.wvdial.dialerDefaults}
'';
environment.etc."ppp/peers/wvdial".source = pkgs.writeText "wvdial" cfg.pppDefaults;
};
}

View file

@ -54,5 +54,5 @@ in {
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -70,5 +70,5 @@ in {
};
};
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -4,7 +4,7 @@ let
settingsFormat = (pkgs.formats.json { });
in
{
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
options = {
services.step-ca = {

View file

@ -403,7 +403,7 @@ in
path = with pkgs; [
# unfree:
# config.boot.kernelPackages.nvidiaPackages.latest.bin
ffmpeg_5-headless
ffmpeg-headless
libva-utils
procps
radeontop

View file

@ -452,9 +452,9 @@ in {
extraPackages = mkOption {
type = with types; listOf package;
default = with pkgs; [ exiftool ffmpeg_5-headless graphicsmagick-imagemagick-compat ];
defaultText = literalExpression "with pkgs; [ exiftool graphicsmagick-imagemagick-compat ffmpeg_5-headless ]";
example = literalExpression "with pkgs; [ exiftool imagemagick ffmpeg_5-full ]";
default = with pkgs; [ exiftool ffmpeg-headless graphicsmagick-imagemagick-compat ];
defaultText = literalExpression "with pkgs; [ exiftool ffmpeg-headless graphicsmagick-imagemagick-compat ]";
example = literalExpression "with pkgs; [ exiftool ffmpeg-full imagemagick ]";
description = ''
List of extra packages to include in the executable search path of the service unit.
These are needed by various configurable components such as:

View file

@ -1,49 +1,90 @@
{ pkgs, lib, config, ... }:
with lib;
{
pkgs,
lib,
config,
...
}:
let
cfg = config.services.gotify;
in {
options = {
services.gotify = {
enable = mkEnableOption "Gotify webserver";
in
{
imports = [
(lib.mkRenamedOptionModule
[
"services"
"gotify"
"port"
]
[
"services"
"gotify"
"environment"
"GOTIFY_SERVER_PORT"
]
)
];
port = mkOption {
type = types.port;
description = ''
Port the server listens to.
'';
};
options.services.gotify = {
enable = lib.mkEnableOption "Gotify webserver";
stateDirectoryName = mkOption {
type = types.str;
default = "gotify-server";
description = ''
The name of the directory below {file}`/var/lib` where
gotify stores its runtime data.
'';
package = lib.mkPackageOption pkgs "gotify-server" { };
environment = lib.mkOption {
type = lib.types.attrsOf (
lib.types.oneOf [
lib.types.str
lib.types.int
]
);
default = { };
example = {
GOTIFY_SERVER_PORT = 8080;
GOTIFY_DATABASE_DIALECT = "sqlite3";
};
description = ''
Config environment variables for the gotify-server.
See https://gotify.net/docs/config for more details.
'';
};
environmentFiles = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [ ];
description = ''
Files containing additional config environment variables for gotify-server.
Secrets should be set in environmentFiles instead of environment.
'';
};
stateDirectoryName = lib.mkOption {
type = lib.types.str;
default = "gotify-server";
description = ''
The name of the directory below {file}`/var/lib` where
gotify stores its runtime data.
'';
};
};
config = mkIf cfg.enable {
config = lib.mkIf cfg.enable {
systemd.services.gotify-server = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
description = "Simple server for sending and receiving messages";
environment = {
GOTIFY_SERVER_PORT = toString cfg.port;
};
environment = lib.mapAttrs (_: toString) cfg.environment;
serviceConfig = {
WorkingDirectory = "/var/lib/${cfg.stateDirectoryName}";
StateDirectory = cfg.stateDirectoryName;
EnvironmentFile = cfg.environmentFiles;
Restart = "always";
DynamicUser = "yes";
ExecStart = "${pkgs.gotify-server}/bin/server";
DynamicUser = true;
ExecStart = lib.getExe cfg.package;
};
};
};
meta.maintainers = with lib.maintainers; [ DCsunset ];
}

View file

@ -3,12 +3,26 @@
with lib;
let
forEachInstance = f: flip mapAttrs' config.services.fcgiwrap (name: cfg:
nameValuePair "fcgiwrap-${name}" (f cfg)
forEachInstance = f: flip mapAttrs' config.services.fcgiwrap.instances (
name: cfg: nameValuePair "fcgiwrap-${name}" (f cfg)
);
in {
options.services.fcgiwrap = mkOption {
imports = forEach [
"enable"
"user"
"group"
"socketType"
"socketAddress"
"preforkProcesses"
] (attr: mkRemovedOptionModule [ "services" "fcgiwrap" attr ] ''
The global shared fcgiwrap instance is no longer supported due to
security issues.
Isolated instances should instead be configured through
`services.fcgiwrap.instances.*'.
'');
options.services.fcgiwrap.instances = mkOption {
description = "Configuration for fcgiwrap instances.";
default = { };
type = types.attrsOf (types.submodule ({ config, ... }: { options = {
@ -54,7 +68,6 @@ in {
default = null;
description = ''
User to be set as owner of the UNIX socket.
Defaults to the process running user.
'';
};
@ -63,7 +76,6 @@ in {
default = null;
description = ''
Group to be set as owner of the UNIX socket.
Defaults to the process running group.
'';
};
@ -83,6 +95,14 @@ in {
config = {
assertions = concatLists (mapAttrsToList (name: cfg: [
{
assertion = cfg.socket.type == "unix" -> cfg.socket.user != null;
message = "Socket owner is required for the UNIX socket type.";
}
{
assertion = cfg.socket.type == "unix" -> cfg.socket.group != null;
message = "Socket owner is required for the UNIX socket type.";
}
{
assertion = cfg.socket.user != null -> cfg.socket.type == "unix";
message = "Socket owner can only be set for the UNIX socket type.";
@ -95,7 +115,7 @@ in {
assertion = cfg.socket.mode != null -> cfg.socket.type == "unix";
message = "Socket mode can only be set for the UNIX socket type.";
}
]) config.services.fcgiwrap);
]) config.services.fcgiwrap.instances);
systemd.services = forEachInstance (cfg: {
after = [ "nss-user-lookup.target" ];

View file

@ -89,6 +89,6 @@ in
};
meta.maintainers = with maintainers; [ ];
meta.maintainers = [ ];
}

View file

@ -33,6 +33,8 @@ let
''
#!${pkgs.runtimeShell}
source ${./lib/lib.sh}
systemConfig='@out@'
export PATH=/empty

View file

@ -0,0 +1,5 @@
# shellcheck shell=bash
warn() {
printf "\033[1;35mwarning:\033[0m %s\n" "$*" >&2
}

View file

@ -0,0 +1,36 @@
# Run:
# nix-build -A nixosTests.activation-lib
{ lib, stdenv, testers }:
let
inherit (lib) fileset;
runTests = stdenv.mkDerivation {
name = "tests-activation-lib";
src = fileset.toSource {
root = ./.;
fileset = fileset.unions [
./lib.sh
./test.sh
];
};
buildPhase = ":";
doCheck = true;
postUnpack = ''
patchShebangs --build .
'';
checkPhase = ''
./test.sh
'';
installPhase = ''
touch $out
'';
};
runShellcheck = testers.shellcheck {
src = runTests.src;
};
in
lib.recurseIntoAttrs {
inherit runTests runShellcheck;
}

View file

@ -0,0 +1,34 @@
#!/usr/bin/env bash
# Run:
# ./test.sh
# or:
# nix-build -A nixosTests.activation-lib
cd "$(dirname "${BASH_SOURCE[0]}")"
set -euo pipefail
# report failure
onerr() {
set +e
# find failed statement
echo "call trace:"
local i=0
while t="$(caller $i)"; do
line="${t%% *}"
file="${t##* }"
echo " $file:$line" >&2
((i++))
done
# red
printf "\033[1;31mtest failed\033[0m\n" >&2
exit 1
}
trap onerr ERR
source ./lib.sh
(warn hi, this works >/dev/null) 2>&1 | grep -E $'.*warning:.* hi, this works' >/dev/null
# green
printf "\033[1;32mok\033[0m\n"

View file

@ -0,0 +1,38 @@
# Automatic boot assessment with systemd-boot {#sec-automatic-boot-assessment}
## Overview {#sec-automatic-boot-assessment-overview}
Automatic boot assessment (or boot-counting) is a feature of `systemd-boot` that allows for automatically detecting invalid boot entries.
When the feature is active, each boot entry has an associated counter with a user defined number of trials. Whenever `systemd-boot` boots an entry, its counter is decreased by one, ultimately being marked as *bad* if the counter ever reaches zero. However, if an entry is successfully booted, systemd will permanently mark it as *good* and remove the counter altogether. Whenever an entry is marked as *bad*, it is sorted last in the `systemd-boot` menu.
A complete explanation of how that feature works can be found [here](https://systemd.io/AUTOMATIC_BOOT_ASSESSMENT/).
## Enabling the feature {#sec-automatic-boot-assessment-enable}
The feature can be enabled by toogling the [boot.loader.systemd-boot.bootCounting](#opt-boot.loader.systemd-boot.bootCounting.enable) option.
## The boot-complete.target unit {#sec-automatic-boot-assessment-boot-complete-target}
A *successful boot* for an entry is defined in terms of the `boot-complete.target` synchronisation point. It is up to the user to schedule all necessary units for the machine to be considered successfully booted before that synchronisation point.
For example, if you are running `docker` on a machine and you want to be sure that a *good* entry is an entry where docker is started successfully.
A configuration for that NixOS machine could look like that:
```
boot.loader.systemd-boot.bootCounting.enable = true;
services.docker.enable = true;
systemd.services.docker = {
before = [ "boot-complete.target" ];
wantedBy = [ "boot-complete.target" ];
unitConfig.FailureAction = "reboot";
};
```
The systemd service type must be of type `notify` or `oneshot` for systemd to dectect the startup error properly.
## Interaction with specialisations {#sec-automatic-boot-assessment-specialisations}
When the boot-counting feature is enabled, `systemd-boot` will still try the boot entries in the same order as they are displayed in the boot menu. This means that the specialisations of a given generation will be tried directly after that generation, but that behavior is customizable with the [boot.loader.systemd-boot.sortKey](#opt-boot.loader.systemd-boot.sortKey) option.
## Limitations {#sec-automatic-boot-assessment-limitations}
This feature has to be used wisely to not risk any data integrity issues. Rollbacking into past generations can sometimes be dangerous, for example if some of the services may have undefined behaviors in the presence of unrecognized data migrations from future versions of themselves.

View file

@ -12,8 +12,9 @@ import subprocess
import sys
import warnings
import json
from typing import NamedTuple, Any
from typing import NamedTuple, Any, Type
from dataclasses import dataclass
from pathlib import Path
# These values will be replaced with actual values during the package build
EFI_SYS_MOUNT_POINT = "@efiSysMountPoint@"
@ -32,6 +33,8 @@ CAN_TOUCH_EFI_VARIABLES = "@canTouchEfiVariables@"
GRACEFUL = "@graceful@"
COPY_EXTRA_FILES = "@copyExtraFiles@"
CHECK_MOUNTPOINTS = "@checkMountpoints@"
BOOT_COUNTING_TRIES = "@bootCountingTries@"
BOOT_COUNTING = "@bootCounting@" == "True"
@dataclass
class BootSpec:
@ -46,6 +49,104 @@ class BootSpec:
sortKey: str # noqa: N815
initrdSecrets: str | None = None # noqa: N815
@dataclass
class Entry:
profile: str | None
generation_number: int
specialisation: str | None
@classmethod
def from_path(cls: Type["Entry"], path: Path) -> "Entry":
filename = path.name
# Matching nixos-$profile-generation-*.conf
rex_profile = re.compile(r"^nixos-(.*)-generation-.*\.conf$")
# Matching nixos*-generation-$number*.conf
rex_generation = re.compile(r"^nixos.*-generation-([0-9]+).*\.conf$")
# Matching nixos*-generation-$number-specialisation-$specialisation_name*.conf
rex_specialisation = re.compile(r"^nixos.*-generation-([0-9]+)-specialisation-([a-zA-Z0-9]+).*\.conf$")
profile = rex_profile.sub(r"\1", filename) if rex_profile.match(filename) else None
specialisation = rex_specialisation.sub(r"\2", filename) if rex_specialisation.match(filename) else None
try:
generation_number = int(rex_generation.sub(r"\1", filename))
except ValueError:
raise
return cls(profile, generation_number, specialisation)
@dataclass
class DiskEntry:
entry: Entry
default: bool
counters: str | None
title: str | None
description: str | None
kernel: str
initrd: str
kernel_params: str | None
machine_id: str | None
sort_key: str
@classmethod
def from_path(cls: Type["DiskEntry"], path: Path) -> "DiskEntry":
entry = Entry.from_path(path)
data = path.read_text().splitlines()
if '' in data:
data.remove('')
entry_map = dict(lines.split(' ', 1) for lines in data)
assert "linux" in entry_map
assert "initrd" in entry_map
filename = path.name
# Matching nixos*-generation-*$counters.conf
rex_counters = re.compile(r"^nixos.*-generation-.*(\+\d(-\d)?)\.conf$")
counters = rex_counters.sub(r"\1", filename) if rex_counters.match(filename) else None
disk_entry = cls(
entry=entry,
default=(entry_map.get("sort-key") == "default"),
counters=counters,
title=entry_map.get("title"),
description=entry_map.get("version"),
kernel=entry_map["linux"],
initrd=entry_map["initrd"],
kernel_params=entry_map.get("options"),
machine_id=entry_map.get("machine-id"),
sort_key=entry_map.get("sort_key", "nixos"))
return disk_entry
def write(self, sorted_first: str) -> None:
# Compute a sort-key sorted before sorted_first
# This will compute something like: nixos -> nixor-default to make sure we come before other nixos entries,
# while allowing users users can pre-pend their own entries before.
default_sort_key = sorted_first[:-1] + chr(ord(sorted_first[-1])-1) + "-default"
tmp_path = self.path.with_suffix(".tmp")
with tmp_path.open('w') as f:
# We use "sort-key" to sort the default generation first.
# The "default" string is sorted before "non-default" (alphabetically)
boot_entry = [
f"title {self.title}" if self.title is not None else None,
f"version {self.description}" if self.description is not None else None,
f"linux {self.kernel}",
f"initrd {self.initrd}",
f"options {self.kernel_params}" if self.kernel_params is not None else None,
f"machine-id {self.machine_id}" if self.machine_id is not None else None,
f"sort-key {default_sort_key if self.default else self.sort_key}"
]
f.write("\n".join(filter(None, boot_entry)))
f.flush()
os.fsync(f.fileno())
tmp_path.rename(self.path)
@property
def path(self) -> Path:
pieces = [
"nixos",
self.entry.profile or None,
"generation",
str(self.entry.generation_number),
f"specialisation-{self.entry.specialisation}" if self.entry.specialisation else None,
]
prefix = "-".join(p for p in pieces if p)
return Path(f"{BOOT_MOUNT_POINT}/loader/entries/{prefix}{self.counters if self.counters else ''}.conf")
libc = ctypes.CDLL("libc.so.6")
@ -78,30 +179,14 @@ def system_dir(profile: str | None, generation: int, specialisation: str | None)
else:
return d
BOOT_ENTRY = """title {title}
sort-key {sort_key}
version Generation {generation} {description}
linux {kernel}
initrd {initrd}
options {kernel_params}
"""
def generation_conf_filename(profile: str | None, generation: int, specialisation: str | None) -> str:
pieces = [
"nixos",
profile or None,
"generation",
str(generation),
f"specialisation-{specialisation}" if specialisation else None,
]
return "-".join(p for p in pieces if p) + ".conf"
def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
with open(f"{LOADER_CONF}.tmp", 'w') as f:
def write_loader_conf(profile: str | None) -> None:
with open(f"{EFI_SYS_MOUNT_POINT}/loader/loader.conf.tmp", 'w') as f:
if TIMEOUT != "":
f.write(f"timeout {TIMEOUT}\n")
f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
if profile:
f.write("default nixos-%s-generation-*\n" % profile)
else:
f.write("default nixos-generation-*\n")
if not EDITOR:
f.write("editor 0\n")
f.write(f"console-mode {CONSOLE_MODE}\n")
@ -109,6 +194,19 @@ def write_loader_conf(profile: str | None, generation: int, specialisation: str
os.fsync(f.fileno())
os.rename(f"{LOADER_CONF}.tmp", LOADER_CONF)
def scan_entries() -> list[DiskEntry]:
"""
Scan all entries in $ESP/loader/entries/*
Does not support Type 2 entries as we do not support them for now.
Returns a generator of Entry.
"""
entries = []
for path in Path(f"{EFI_SYS_MOUNT_POINT}/loader/entries/").glob("nixos*-generation-[1-9]*.conf"):
try:
entries.append(DiskEntry.from_path(path))
except ValueError:
continue
return entries
def get_bootspec(profile: str | None, generation: int) -> BootSpec:
system_directory = system_dir(profile, generation, None)
@ -151,8 +249,14 @@ def copy_from_file(file: str, dry_run: bool = False) -> str:
copy_if_not_exists(store_file_path, f"{BOOT_MOUNT_POINT}{efi_file_path}")
return efi_file_path
def write_entry(profile: str | None, generation: int, specialisation: str | None,
machine_id: str, bootspec: BootSpec, current: bool) -> None:
def write_entry(profile: str | None,
generation: int,
specialisation: str | None,
machine_id: str,
bootspec: BootSpec,
entries: list[DiskEntry],
sorted_first: str,
current: bool) -> None:
if specialisation:
bootspec = bootspec.specialisations[specialisation]
kernel = copy_from_file(bootspec.kernel)
@ -175,29 +279,32 @@ def write_entry(profile: str | None, generation: int, specialisation: str | None
f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr)
print("note: this is normal after having removed "
"or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
entry_file = f"{BOOT_MOUNT_POINT}/loader/entries/%s" % (
generation_conf_filename(profile, generation, specialisation))
tmp_path = "%s.tmp" % (entry_file)
kernel_params = "init=%s " % bootspec.init
kernel_params = kernel_params + " ".join(bootspec.kernelParams)
build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
counters = f"+{BOOT_COUNTING_TRIES}" if BOOT_COUNTING else ""
entry = Entry(profile, generation, specialisation)
# We check if the entry we are writing is already on disk
# and we update its "default entry" status
for entry_on_disk in entries:
if entry == entry_on_disk.entry:
entry_on_disk.default = current
entry_on_disk.write(sorted_first)
return
with open(tmp_path, 'w') as f:
f.write(BOOT_ENTRY.format(title=title,
sort_key=bootspec.sortKey,
generation=generation,
kernel=kernel,
initrd=initrd,
kernel_params=kernel_params,
description=f"{bootspec.label}, built on {build_date}"))
if machine_id is not None:
f.write("machine-id %s\n" % machine_id)
f.flush()
os.fsync(f.fileno())
os.rename(tmp_path, entry_file)
DiskEntry(
entry=entry,
title=title,
kernel=kernel,
initrd=initrd,
counters=counters,
kernel_params=kernel_params,
machine_id=machine_id,
description=f"Generation {generation} {bootspec.label}, built on {build_date}",
sort_key=bootspec.sortKey,
default=current
).write(sorted_first)
def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
gen_list = run(
@ -225,30 +332,19 @@ def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
return configurations[-configurationLimit:]
def remove_old_entries(gens: list[SystemIdentifier]) -> None:
rex_profile = re.compile(r"^" + re.escape(BOOT_MOUNT_POINT) + r"/loader/entries/nixos-(.*)-generation-.*\.conf$")
rex_generation = re.compile(r"^" + re.escape(BOOT_MOUNT_POINT) + r"/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
def remove_old_entries(gens: list[SystemIdentifier], disk_entries: list[DiskEntry]) -> None:
known_paths = []
for gen in gens:
bootspec = get_bootspec(gen.profile, gen.generation)
known_paths.append(copy_from_file(bootspec.kernel, True))
known_paths.append(copy_from_file(bootspec.initrd, True))
for path in glob.iglob(f"{BOOT_MOUNT_POINT}/loader/entries/nixos*-generation-[1-9]*.conf"):
if rex_profile.match(path):
prof = rex_profile.sub(r"\1", path)
else:
prof = None
try:
gen_number = int(rex_generation.sub(r"\1", path))
except ValueError:
continue
if (prof, gen_number, None) not in gens:
os.unlink(path)
for path in glob.iglob(f"{BOOT_MOUNT_POINT}/{NIXOS_DIR}/*"):
for disk_entry in disk_entries:
if (disk_entry.entry.profile, disk_entry.entry.generation_number, None) not in gens:
os.unlink(disk_entry.path)
for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/efi/nixos/*"):
if path not in known_paths and not os.path.isdir(path):
os.unlink(path)
def cleanup_esp() -> None:
for path in glob.iglob(f"{EFI_SYS_MOUNT_POINT}/loader/entries/nixos*"):
os.unlink(path)
@ -267,7 +363,7 @@ def get_profiles() -> list[str]:
def install_bootloader(args: argparse.Namespace) -> None:
try:
with open("/etc/machine-id") as machine_file:
machine_id = machine_file.readlines()[0]
machine_id = machine_file.readlines()[0].strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
@ -351,18 +447,32 @@ def install_bootloader(args: argparse.Namespace) -> None:
gens = get_generations()
for profile in get_profiles():
gens += get_generations(profile)
remove_old_entries(gens)
entries = scan_entries()
remove_old_entries(gens, entries)
# Compute the sort-key that will be sorted first.
sorted_first = ""
for gen in gens:
try:
bootspec = get_bootspec(gen.profile, gen.generation)
if bootspec.sortKey < sorted_first or sorted_first == "":
sorted_first = bootspec.sortKey
except OSError as e:
# See https://github.com/NixOS/nixpkgs/issues/114552
if e.errno == errno.EINVAL:
profile = f"profile '{gen.profile}'" if gen.profile else "default profile"
print("ignoring {} in the list of boot entries because of the following error:\n{}".format(profile, e), file=sys.stderr)
else:
raise e
for gen in gens:
try:
bootspec = get_bootspec(gen.profile, gen.generation)
is_default = os.path.dirname(bootspec.init) == args.default_config
write_entry(*gen, machine_id, bootspec, current=is_default)
write_entry(*gen, machine_id, bootspec, entries, sorted_first, current=is_default)
for specialisation in bootspec.specialisations.keys():
write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, current=is_default)
write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, entries, sorted_first, current=(is_default and bootspec.specialisations[specialisation].sortKey == bootspec.sortKey))
if is_default:
write_loader_conf(*gen)
write_loader_conf(gen.profile)
except OSError as e:
# See https://github.com/NixOS/nixpkgs/issues/114552
if e.errno == errno.EINVAL:

View file

@ -80,6 +80,8 @@ let
${pkgs.coreutils}/bin/install -D $empty_file "${bootMountPoint}/${nixosDir}/.extra-files/loader/entries/"${escapeShellArg n}
'') cfg.extraEntries)}
'';
bootCountingTries = cfg.bootCounting.tries;
bootCounting = if cfg.bootCounting.enable then "True" else "False";
};
finalSystemdBootBuilder = pkgs.writeScript "install-systemd-boot.sh" ''
@ -89,7 +91,10 @@ let
'';
in {
meta.maintainers = with lib.maintainers; [ julienmalka ];
meta = {
maintainers = with lib.maintainers; [ julienmalka ];
doc = ./boot-counting.md;
};
imports =
[ (mkRenamedOptionModule [ "boot" "loader" "gummiboot" "enable" ] [ "boot" "loader" "systemd-boot" "enable" ])
@ -319,6 +324,15 @@ in {
'';
};
bootCounting = {
enable = mkEnableOption "automatic boot assessment";
tries = mkOption {
default = 3;
type = types.int;
description = "number of tries each entry should start with";
};
};
};
config = mkIf cfg.enable {

View file

@ -18,12 +18,16 @@ let
"ManageForeignRoutes"
"RouteTable"
"IPv6PrivacyExtensions"
"IPv4Forwarding"
"IPv6Forwarding"
])
(assertValueOneOf "SpeedMeter" boolValues)
(assertInt "SpeedMeterIntervalSec")
(assertValueOneOf "ManageForeignRoutingPolicyRules" boolValues)
(assertValueOneOf "ManageForeignRoutes" boolValues)
(assertValueOneOf "IPv6PrivacyExtensions" (boolValues ++ ["prefer-public" "kernel"]))
(assertValueOneOf "IPv4Forwarding" boolValues)
(assertValueOneOf "IPv6Forwarding" boolValues)
];
sectionDHCPv4 = checkUnitConfig "DHCPv4" [
@ -652,6 +656,8 @@ let
"DNSDefaultRoute"
"NTP"
"IPForward"
"IPv4Forwarding"
"IPv6Forwarding"
"IPMasquerade"
"IPv6PrivacyExtensions"
"IPv6AcceptRA"
@ -700,7 +706,9 @@ let
(assertValueOneOf "LLDP" (boolValues ++ ["routers-only"]))
(assertValueOneOf "EmitLLDP" (boolValues ++ ["nearest-bridge" "non-tpmr-bridge" "customer-bridge"]))
(assertValueOneOf "DNSDefaultRoute" boolValues)
(assertValueOneOf "IPForward" (boolValues ++ ["ipv4" "ipv6"]))
(assertRemoved "IPForward" "IPv4Forwarding and IPv6Forwarding in systemd.network(5) and networkd.conf(5)")
(assertValueOneOf "IPv4Forwarding" boolValues)
(assertValueOneOf "IPv6Forwarding" boolValues)
(assertValueOneOf "IPMasquerade" (boolValues ++ ["ipv4" "ipv6" "both"]))
(assertValueOneOf "IPv6PrivacyExtensions" (boolValues ++ ["prefer-public" "kernel"]))
(assertValueOneOf "IPv6AcceptRA" boolValues)
@ -2835,6 +2843,7 @@ let
"systemd-networkd-wait-online.service"
"systemd-networkd.service"
"systemd-networkd.socket"
"systemd-networkd-persistent-storage.service"
];
environment.etc."systemd/networkd.conf" = renderConfig cfg.config;

View file

@ -219,7 +219,7 @@ in
# Fonts
"/etc/plymouth/fonts".source = pkgs.runCommand "plymouth-initrd-fonts" {} ''
mkdir -p $out
cp ${cfg.font} $out
cp ${escapeShellArg cfg.font} $out
'';
"/etc/fonts/fonts.conf".text = ''
<?xml version="1.0"?>

View file

@ -131,6 +131,7 @@ let
# Copy udev.
copy_bin_and_libs ${udev}/bin/udevadm
cp ${lib.getLib udev.kmod}/lib/libkmod.so* $out/lib
copy_bin_and_libs ${udev}/lib/systemd/systemd-sysctl
for BIN in ${udev}/lib/udev/*_id; do
copy_bin_and_libs $BIN

View file

@ -37,6 +37,8 @@ let
"cryptsetup.target"
"cryptsetup-pre.target"
"remote-cryptsetup.target"
] ++ optionals cfg.package.withTpm2Tss [
"tpm2.target"
] ++ [
"sigpwr.target"
"timers.target"
@ -105,6 +107,10 @@ let
"systemd-rfkill.service"
"systemd-rfkill.socket"
# Boot counting
"boot-complete.target"
] ++ lib.optional config.boot.loader.systemd-boot.bootCounting.enable "systemd-bless-boot.service" ++ [
# Hibernate / suspend.
"hibernate.target"
"suspend.target"
@ -112,6 +118,7 @@ let
"sleep.target"
"hybrid-sleep.target"
"systemd-hibernate.service"
"systemd-hibernate-clear.service"
"systemd-hybrid-sleep.service"
"systemd-suspend.service"
"systemd-suspend-then-hibernate.service"
@ -136,6 +143,16 @@ let
"systemd-ask-password-wall.path"
"systemd-ask-password-wall.service"
# Varlink APIs
"systemd-bootctl@.service"
"systemd-bootctl.socket"
"systemd-creds@.service"
"systemd-creds.socket"
] ++ lib.optional cfg.package.withTpm2Tss [
"systemd-pcrlock@.service"
"systemd-pcrlock.socket"
] ++ [
# Slices / containers.
"slices.target"
] ++ optionals cfg.package.withImportd [
@ -158,6 +175,7 @@ let
] ++ optionals cfg.package.withHostnamed [
"dbus-org.freedesktop.hostname1.service"
"systemd-hostnamed.service"
"systemd-hostnamed.socket"
] ++ optionals cfg.package.withPortabled [
"dbus-org.freedesktop.portable1.service"
"systemd-portabled.service"

View file

@ -70,6 +70,7 @@ let
"systemd-tmpfiles-setup-dev.service"
"systemd-tmpfiles-setup.service"
"timers.target"
"tpm2.target"
"umount.target"
"systemd-bsod.service"
] ++ cfg.additionalUpstreamUnits;
@ -111,8 +112,7 @@ let
inherit (config.boot.initrd) compressor compressorArgs prepend;
inherit (cfg) strip;
contents = map (path: { object = path; symlink = ""; }) (subtractLists cfg.suppressedStorePaths cfg.storePaths)
++ mapAttrsToList (_: v: { object = v.source; symlink = v.target; }) (filterAttrs (_: v: v.enable) cfg.contents);
contents = lib.filter ({ source, ... }: !lib.elem source cfg.suppressedStorePaths) cfg.storePaths;
};
in {
@ -171,7 +171,7 @@ in {
description = ''
Store paths to copy into the initrd as well.
'';
type = with types; listOf (oneOf [ singleLineStr package ]);
type = utils.systemdUtils.types.initrdStorePath;
default = [];
};
@ -344,7 +344,8 @@ in {
};
enableTpm2 = mkOption {
default = true;
default = cfg.package.withTpm2Tss;
defaultText = "boot.initrd.systemd.package.withTpm2Tss";
type = types.bool;
description = ''
Whether to enable TPM2 support in the initrd.
@ -460,6 +461,7 @@ in {
"${cfg.package}/lib/systemd/systemd-sulogin-shell"
"${cfg.package}/lib/systemd/systemd-sysctl"
"${cfg.package}/lib/systemd/systemd-bsod"
"${cfg.package}/lib/systemd/systemd-sysroot-fstab-check"
# generators
"${cfg.package}/lib/systemd/system-generators/systemd-debug-generator"
@ -486,7 +488,8 @@ in {
# fido2 support
"${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-fido2.so"
"${pkgs.libfido2}/lib/libfido2.so.1"
] ++ jobScripts;
] ++ jobScripts
++ map (c: builtins.removeAttrs c ["text"]) (builtins.attrValues cfg.contents);
targets.initrd.aliases = ["default.target"];
units =

View file

@ -96,6 +96,7 @@ in {
"systemd-journald@.service"
"systemd-journal-flush.service"
"systemd-journal-catalog-update.service"
"systemd-journald-sync@.service"
] ++ (optional (!config.boot.isContainer) "systemd-journald-audit.socket") ++ [
"systemd-journald-dev-log.socket"
"syslog.socket"

View file

@ -2,10 +2,7 @@
cfg = config.systemd.shutdownRamfs;
ramfsContents = let
storePaths = map (p: "${p}\n") cfg.storePaths;
contents = lib.mapAttrsToList (_: v: "${v.source}\n${v.target}") (lib.filterAttrs (_: v: v.enable) cfg.contents);
in pkgs.writeText "shutdown-ramfs-contents" (lib.concatStringsSep "\n" (storePaths ++ contents));
ramfsContents = pkgs.writeText "shutdown-ramfs-contents.json" (builtins.toJSON cfg.storePaths);
in {
options.systemd.shutdownRamfs = {
@ -24,7 +21,7 @@ in {
description = ''
Store paths to copy into the shutdown ramfs as well.
'';
type = lib.types.listOf lib.types.singleLineStr;
type = utils.systemdUtils.types.initrdStorePath;
default = [];
};
};
@ -35,7 +32,8 @@ in {
"/etc/initrd-release".source = config.environment.etc.os-release.source;
"/etc/os-release".source = config.environment.etc.os-release.source;
};
systemd.shutdownRamfs.storePaths = [pkgs.runtimeShell "${pkgs.coreutils}/bin"];
systemd.shutdownRamfs.storePaths = [pkgs.runtimeShell "${pkgs.coreutils}/bin"]
++ map (c: builtins.removeAttrs c ["text"]) (builtins.attrValues cfg.contents);
systemd.mounts = [{
what = "tmpfs";

View file

@ -69,7 +69,7 @@ in
type = types.bool;
default = false;
description = ''
**Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
**Deprecated**, please use hardware.nvidia-container-toolkit.enable instead.
Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
'';
@ -186,7 +186,7 @@ in
# wrappers.
warnings = lib.optionals (cfg.enableNvidia && (lib.strings.versionAtLeast cfg.package.version "25")) [
''
You have set virtualisation.docker.enableNvidia. This option is deprecated, please set virtualisation.containers.cdi.dynamic.nvidia.enable instead.
You have set virtualisation.docker.enableNvidia. This option is deprecated, please set hardware.nvidia-container-toolkit.enable instead.
''
];

View file

@ -79,7 +79,7 @@ with lib;
# ec2-get-console-output.
echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
for i in /etc/ssh/ssh_host_*_key.pub; do
${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
${config.programs.ssh.package}/bin/ssh-keygen -l -f $i || true > /dev/console
done
echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
'';

View file

@ -82,7 +82,7 @@ in
type = types.bool;
default = false;
description = ''
**Deprecated**, please use virtualisation.containers.cdi.dynamic.nvidia.enable instead.
**Deprecated**, please use hardware.nvidia-container-toolkit.enable instead.
Enable use of NVidia GPUs from within podman containers.
'';

View file

@ -264,6 +264,7 @@ in {
docker-rootless = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker-rootless.nix {};
docker-registry = handleTest ./docker-registry.nix {};
docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
docker-tools-nix-shell = runTest ./docker-tools-nix-shell.nix;
docker-tools-cross = handleTestOn ["x86_64-linux" "aarch64-linux"] ./docker-tools-cross.nix {};
docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
documize = handleTest ./documize.nix {};
@ -275,6 +276,7 @@ in {
dovecot = handleTest ./dovecot.nix {};
drawterm = discoverTests (import ./drawterm.nix);
drbd = handleTest ./drbd.nix {};
druid = handleTestOn [ "x86_64-linux" ] ./druid {};
dublin-traceroute = handleTest ./dublin-traceroute.nix {};
earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {};
early-mount-options = handleTest ./early-mount-options.nix {};
@ -300,6 +302,7 @@ in {
esphome = handleTest ./esphome.nix {};
etc = pkgs.callPackage ../modules/system/etc/test.nix { inherit evalMinimalConfig; };
activation = pkgs.callPackage ../modules/system/activation/test.nix { };
activation-lib = pkgs.callPackage ../modules/system/activation/lib/test.nix { };
activation-var = runTest ./activation/var.nix;
activation-nix-channel = runTest ./activation/nix-channel.nix;
activation-etc-overlay-mutable = runTest ./activation/etc-overlay-mutable.nix;
@ -576,6 +579,7 @@ in {
minidlna = handleTest ./minidlna.nix {};
miniflux = handleTest ./miniflux.nix {};
minio = handleTest ./minio.nix {};
miracle-wm = runTest ./miracle-wm.nix;
miriway = handleTest ./miriway.nix {};
misc = handleTest ./misc.nix {};
mjolnir = handleTest ./matrix/mjolnir.nix {};
@ -623,6 +627,7 @@ in {
nbd = handleTest ./nbd.nix {};
ncdns = handleTest ./ncdns.nix {};
ndppd = handleTest ./ndppd.nix {};
nix-channel = pkgs.callPackage ../modules/config/nix-channel/test.nix { };
nebula = handleTest ./nebula.nix {};
netbird = handleTest ./netbird.nix {};
nimdow = handleTest ./nimdow.nix {};

View file

@ -13,7 +13,7 @@ let
{ imports = [ ./common/user-account.nix ./common/x11.nix ];
hardware.graphics.enable = true;
virtualisation.memorySize = 256;
virtualisation.memorySize = 384;
environment = {
systemPackages = [ pkgs.armagetronad ];
variables.XAUTHORITY = "/home/${user}/.Xauthority";
@ -208,7 +208,7 @@ makeTest {
barrier.wait()
# Get to the Server Bookmarks screen on both clients. This takes a while so do it asynchronously.
barrier = threading.Barrier(3, timeout=120)
barrier = threading.Barrier(len(clients) + 1, timeout=240)
for client in clients:
threading.Thread(target=client_setup, args=(client, servers, barrier)).start()
barrier.wait()

View file

@ -4,7 +4,7 @@ import ./make-test-python.nix (
{
name = "crabfit";
meta.maintainers = with lib.maintainers; [ ];
meta.maintainers = [ ];
nodes = {
machine =

View file

@ -0,0 +1,95 @@
# nix-build -A nixosTests.docker-tools-nix-shell
{ config, lib, ... }:
let
inherit (config.node.pkgs.dockerTools) examples;
in
{
name = "docker-tools-nix-shell";
meta = with lib.maintainers; {
maintainers = [
infinisil
roberth
];
};
nodes = {
docker =
{ ... }:
{
virtualisation = {
diskSize = 3072;
docker.enable = true;
};
};
};
testScript = ''
docker.wait_for_unit("sockets.target")
with subtest("buildImageWithNixDB: Has a nix database"):
docker.succeed(
"docker load --input='${examples.nix}'",
"docker run --rm ${examples.nix.imageName} nix-store -q --references /bin/bash"
)
with subtest("buildNixShellImage: Can build a basic derivation"):
docker.succeed(
"${examples.nix-shell-basic} | docker load",
"docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
)
with subtest("buildNixShellImage: Runs the shell hook"):
docker.succeed(
"${examples.nix-shell-hook} | docker load",
"docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
)
with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
docker.succeed(
"${examples.nix-shell-inputs} | docker load",
"docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
)
with subtest("buildNixShellImage: passAsFile works"):
docker.succeed(
"${examples.nix-shell-pass-as-file} | docker load",
"docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
)
with subtest("buildNixShellImage: run argument works"):
docker.succeed(
"${examples.nix-shell-run} | docker load",
"docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
)
with subtest("buildNixShellImage: command argument works"):
docker.succeed(
"${examples.nix-shell-command} | docker load",
"docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
)
with subtest("buildNixShellImage: home directory is writable by default"):
docker.succeed(
"${examples.nix-shell-writable-home} | docker load",
"docker run --rm -it nix-shell-writable-home"
)
with subtest("buildNixShellImage: home directory can be made non-existent"):
docker.succeed(
"${examples.nix-shell-nonexistent-home} | docker load",
"docker run --rm -it nix-shell-nonexistent-home"
)
with subtest("buildNixShellImage: can build derivations"):
docker.succeed(
"${examples.nix-shell-build-derivation} | docker load",
"docker run --rm -it nix-shell-build-derivation"
)
with subtest("streamLayeredImage: with nix db"):
docker.succeed(
"${examples.nix-layered} | docker load",
"docker run --rm ${examples.nix-layered.imageName} nix-store -q --references /bin/bash"
)
'';
}

View file

@ -60,7 +60,7 @@ let
};
nonRootTestImage =
pkgs.dockerTools.streamLayeredImage rec {
pkgs.dockerTools.streamLayeredImage {
name = "non-root-test";
tag = "latest";
uid = 1000;
@ -567,66 +567,6 @@ in {
docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
docker.succeed("docker image rm image-with-certs:latest")
with subtest("buildImageWithNixDB: Has a nix database"):
docker.succeed(
"docker load --input='${examples.nix}'",
"docker run --rm ${examples.nix.imageName} nix-store -q --references /bin/bash"
)
with subtest("buildNixShellImage: Can build a basic derivation"):
docker.succeed(
"${examples.nix-shell-basic} | docker load",
"docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
)
with subtest("buildNixShellImage: Runs the shell hook"):
docker.succeed(
"${examples.nix-shell-hook} | docker load",
"docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
)
with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
docker.succeed(
"${examples.nix-shell-inputs} | docker load",
"docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
)
with subtest("buildNixShellImage: passAsFile works"):
docker.succeed(
"${examples.nix-shell-pass-as-file} | docker load",
"docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
)
with subtest("buildNixShellImage: run argument works"):
docker.succeed(
"${examples.nix-shell-run} | docker load",
"docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
)
with subtest("buildNixShellImage: command argument works"):
docker.succeed(
"${examples.nix-shell-command} | docker load",
"docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
)
with subtest("buildNixShellImage: home directory is writable by default"):
docker.succeed(
"${examples.nix-shell-writable-home} | docker load",
"docker run --rm -it nix-shell-writable-home"
)
with subtest("buildNixShellImage: home directory can be made non-existent"):
docker.succeed(
"${examples.nix-shell-nonexistent-home} | docker load",
"docker run --rm -it nix-shell-nonexistent-home"
)
with subtest("buildNixShellImage: can build derivations"):
docker.succeed(
"${examples.nix-shell-build-derivation} | docker load",
"docker run --rm -it nix-shell-build-derivation"
)
with subtest("streamLayeredImage: chown is persistent in fakeRootCommands"):
docker.succeed(
"${chownTestImage} | docker load",
@ -638,11 +578,5 @@ in {
"${nonRootTestImage} | docker load",
"docker run --rm ${chownTestImage.imageName} | diff /dev/stdin <(echo 12345:12345)"
)
with subtest("streamLayeredImage: with nix db"):
docker.succeed(
"${examples.nix-layered} | docker load",
"docker run --rm ${examples.nix-layered.imageName} nix-store -q --references /bin/bash"
)
'';
})

View file

@ -0,0 +1,289 @@
{ pkgs, ... }:
let
inherit (pkgs) lib;
commonConfig = {
"druid.zk.service.host" = "zk1:2181";
"druid.extensions.loadList" = ''[ "druid-histogram", "druid-datasketches", "mysql-metadata-storage", "druid-avro-extensions", "druid-parquet-extensions", "druid-lookups-cached-global", "druid-hdfs-storage","druid-kafka-indexing-service","druid-basic-security","druid-kinesis-indexing-service"]'';
"druid.startup.logging.logProperties" = "true";
"druid.metadata.storage.connector.connectURI" = "jdbc:mysql://mysql:3306/druid";
"druid.metadata.storage.connector.user" = "druid";
"druid.metadata.storage.connector.password" = "druid";
"druid.request.logging.type" = "file";
"druid.request.logging.dir" = "/var/log/druid/requests";
"druid.javascript.enabled" = "true";
"druid.sql.enable" = "true";
"druid.metadata.storage.type" = "mysql";
"druid.storage.type" = "hdfs";
"druid.storage.storageDirectory" = "/druid-deepstore";
};
log4jConfig = ''
<?xml version="1.0" encoding="UTF-8" ?>
<Configuration status="WARN">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">
<PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
</Console>
</Appenders>
<Loggers>
<Root level="error">
<AppenderRef ref="Console"/>
</Root>
</Loggers>
</Configuration>
'';
log4j = pkgs.writeText "log4j2.xml" log4jConfig;
coreSite = {
"fs.defaultFS" = "hdfs://namenode:8020";
};
tests = {
default = testsForPackage {
druidPackage = pkgs.druid;
hadoopPackage = pkgs.hadoop_3_2;
};
};
testsForPackage =
args:
lib.recurseIntoAttrs {
druidCluster = testDruidCluster args;
passthru.override = args': testsForPackage (args // args');
};
testDruidCluster =
{ druidPackage, hadoopPackage, ... }:
pkgs.testers.nixosTest {
name = "druid-hdfs";
nodes = {
zk1 =
{ ... }:
{
services.zookeeper.enable = true;
networking.firewall.allowedTCPPorts = [ 2181 ];
};
namenode =
{ ... }:
{
services.hadoop = {
package = hadoopPackage;
hdfs = {
namenode = {
enable = true;
openFirewall = true;
formatOnInit = true;
};
};
inherit coreSite;
};
};
datanode =
{ ... }:
{
services.hadoop = {
package = hadoopPackage;
hdfs.datanode = {
enable = true;
openFirewall = true;
};
inherit coreSite;
};
};
mm =
{ ... }:
{
virtualisation.memorySize = 1024;
services.druid = {
inherit commonConfig log4j;
package = druidPackage;
extraClassPaths = [ "/etc/hadoop-conf" ];
middleManager = {
config = {
"druid.indexer.task.baseTaskDir" = "/tmp/druid/persistent/task";
"druid.worker.capacity" = 1;
"druid.indexer.logs.type" = "file";
"druid.indexer.logs.directory" = "/var/log/druid/indexer";
"druid.indexer.runner.startPort" = 8100;
"druid.indexer.runner.endPort" = 8101;
};
enable = true;
openFirewall = true;
};
};
services.hadoop = {
gatewayRole.enable = true;
package = hadoopPackage;
inherit coreSite;
};
};
overlord =
{ ... }:
{
services.druid = {
inherit commonConfig log4j;
package = druidPackage;
extraClassPaths = [ "/etc/hadoop-conf" ];
overlord = {
config = {
"druid.indexer.runner.type" = "remote";
"druid.indexer.storage.type" = "metadata";
};
enable = true;
openFirewall = true;
};
};
services.hadoop = {
gatewayRole.enable = true;
package = hadoopPackage;
inherit coreSite;
};
};
broker =
{ ... }:
{
services.druid = {
package = druidPackage;
inherit commonConfig log4j;
extraClassPaths = [ "/etc/hadoop-conf" ];
broker = {
config = {
"druid.plaintextPort" = 8082;
"druid.broker.http.numConnections" = "2";
"druid.server.http.numThreads" = "2";
"druid.processing.buffer.sizeBytes" = "100";
"druid.processing.numThreads" = "1";
"druid.processing.numMergeBuffers" = "1";
"druid.broker.cache.unCacheable" = ''["groupBy"]'';
"druid.lookup.snapshotWorkingDir" = "/opt/broker/lookups";
};
enable = true;
openFirewall = true;
};
};
services.hadoop = {
gatewayRole.enable = true;
package = hadoopPackage;
inherit coreSite;
};
};
historical =
{ ... }:
{
services.druid = {
package = druidPackage;
inherit commonConfig log4j;
extraClassPaths = [ "/etc/hadoop-conf" ];
historical = {
config = {
"maxSize" = 200000000;
"druid.lookup.snapshotWorkingDir" = "/opt/historical/lookups";
};
segmentLocations = [
{
"path" = "/tmp/1";
"maxSize" = "100000000";
}
{
"path" = "/tmp/2";
"maxSize" = "100000000";
}
];
enable = true;
openFirewall = true;
};
};
services.hadoop = {
gatewayRole.enable = true;
package = hadoopPackage;
inherit coreSite;
};
};
coordinator =
{ ... }:
{
services.druid = {
package = druidPackage;
inherit commonConfig log4j;
extraClassPaths = [ "/etc/hadoop-conf" ];
coordinator = {
config = {
"druid.plaintextPort" = 9091;
"druid.service" = "coordinator";
"druid.coordinator.startDelay" = "PT10S";
"druid.coordinator.period" = "PT10S";
"druid.manager.config.pollDuration" = "PT10S";
"druid.manager.segments.pollDuration" = "PT10S";
"druid.manager.rules.pollDuration" = "PT10S";
};
enable = true;
openFirewall = true;
};
};
services.hadoop = {
gatewayRole.enable = true;
package = hadoopPackage;
inherit coreSite;
};
};
mysql =
{ ... }:
{
services.mysql = {
enable = true;
package = pkgs.mariadb;
initialDatabases = [ { name = "druid"; } ];
initialScript = pkgs.writeText "mysql-init.sql" ''
CREATE USER 'druid'@'%' IDENTIFIED BY 'druid';
GRANT ALL PRIVILEGES ON druid.* TO 'druid'@'%';
'';
};
networking.firewall.allowedTCPPorts = [ 3306 ];
};
};
testScript = ''
start_all()
namenode.wait_for_unit("hdfs-namenode")
namenode.wait_for_unit("network.target")
namenode.wait_for_open_port(8020)
namenode.succeed("ss -tulpne | systemd-cat")
namenode.succeed("cat /etc/hadoop*/hdfs-site.xml | systemd-cat")
namenode.wait_for_open_port(9870)
datanode.wait_for_unit("hdfs-datanode")
datanode.wait_for_unit("network.target")
mm.succeed("mkdir -p /quickstart/")
mm.succeed("cp -r ${pkgs.druid}/quickstart/* /quickstart/")
mm.succeed("touch /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
mm.succeed("zcat /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz | head -n 10 > /quickstart/tutorial/wikiticker-2015-09-12-sampled.json || true")
mm.succeed("rm /quickstart/tutorial/wikiticker-2015-09-12-sampled.json.gz && gzip /quickstart/tutorial/wikiticker-2015-09-12-sampled.json")
namenode.succeed("sudo -u hdfs hdfs dfs -mkdir /druid-deepstore")
namenode.succeed("HADOOP_USER_NAME=druid sudo -u hdfs hdfs dfs -chown druid:hadoop /druid-deepstore")
### Druid tests
coordinator.wait_for_unit("druid-coordinator")
overlord.wait_for_unit("druid-overlord")
historical.wait_for_unit("druid-historical")
mm.wait_for_unit("druid-middleManager")
coordinator.wait_for_open_port(9091)
overlord.wait_for_open_port(8090)
historical.wait_for_open_port(8083)
mm.wait_for_open_port(8091)
broker.wait_for_unit("network.target")
broker.wait_for_open_port(8082)
broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-index.json http://coordinator:9091/druid/indexer/v1/task")
broker.wait_until_succeeds("curl http://coordinator:9091/druid/coordinator/v1/metadata/datasources | grep 'wikipedia'")
broker.wait_until_succeeds("curl http://localhost:8082/druid/v2/datasources/ | grep wikipedia")
broker.succeed("curl -X 'POST' -H 'Content-Type:application/json' -d @${pkgs.druid}/quickstart/tutorial/wikipedia-top-pages.json http://localhost:8082/druid/v2/")
'';
};
in
tests

View file

@ -24,7 +24,7 @@ import ./make-test-python.nix (
{
networking.firewall.allowedTCPPorts = [ 80 ];
services.fcgiwrap.gitolite = {
services.fcgiwrap.instances.gitolite = {
process.user = "gitolite";
process.group = "gitolite";
socket = { inherit (config.services.nginx) user group; };
@ -64,7 +64,7 @@ import ./make-test-python.nix (
fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell;
# use Unix domain socket or inet socket
fastcgi_pass unix:${config.services.fcgiwrap.gitolite.socket.address};
fastcgi_pass unix:${config.services.fcgiwrap.instances.gitolite.socket.address};
'';
};

Some files were not shown because too many files have changed in this diff Show more