Project import generated by Copybara.
GitOrigin-RevId: ac718d02867a84b42522a0ece52d841188208f2c
This commit is contained in:
parent
e0ebec161a
commit
83405b6dd2
3567 changed files with 46874 additions and 24147 deletions
1
third_party/nixpkgs/.github/CODEOWNERS
vendored
1
third_party/nixpkgs/.github/CODEOWNERS
vendored
|
@ -237,6 +237,7 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
|
||||||
/pkgs/applications/editors/vim/plugins @figsoda @jonringer
|
/pkgs/applications/editors/vim/plugins @figsoda @jonringer
|
||||||
|
|
||||||
# VsCode Extensions
|
# VsCode Extensions
|
||||||
|
/pkgs/applications/editors/vscode @superherointj
|
||||||
/pkgs/applications/editors/vscode/extensions @jonringer
|
/pkgs/applications/editors/vscode/extensions @jonringer
|
||||||
|
|
||||||
# Prometheus exporter modules and tests
|
# Prometheus exporter modules and tests
|
||||||
|
|
21
third_party/nixpkgs/.github/workflows/check-maintainers-sorted.yaml
vendored
Normal file
21
third_party/nixpkgs/.github/workflows/check-maintainers-sorted.yaml
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
name: "Check that maintainer list is sorted"
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'maintainers/maintainer-list.nix'
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
nixos:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.repository_owner == 'NixOS'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: cachix/install-nix-action@v19
|
||||||
|
with:
|
||||||
|
# explicitly enable sandbox
|
||||||
|
extra_nix_config: sandbox = true
|
||||||
|
- name: Check that maintainer-list.nix is sorted
|
||||||
|
run: nix-instantiate --eval maintainers/scripts/check-maintainers-sorted.nix
|
|
@ -71,6 +71,7 @@ The main difference between `fetchurl` and `fetchzip` is in how they store the c
|
||||||
|
|
||||||
- `relative`: Similar to using `git-diff`'s `--relative` flag, only keep changes inside the specified directory, making paths relative to it.
|
- `relative`: Similar to using `git-diff`'s `--relative` flag, only keep changes inside the specified directory, making paths relative to it.
|
||||||
- `stripLen`: Remove the first `stripLen` components of pathnames in the patch.
|
- `stripLen`: Remove the first `stripLen` components of pathnames in the patch.
|
||||||
|
- `decode`: Pipe the downloaded data through this command before processing it as a patch.
|
||||||
- `extraPrefix`: Prefix pathnames by this string.
|
- `extraPrefix`: Prefix pathnames by this string.
|
||||||
- `excludes`: Exclude files matching these patterns (applies after the above arguments).
|
- `excludes`: Exclude files matching these patterns (applies after the above arguments).
|
||||||
- `includes`: Include only files matching these patterns (applies after the above arguments).
|
- `includes`: Include only files matching these patterns (applies after the above arguments).
|
||||||
|
|
|
@ -116,10 +116,6 @@ For convenience, it also adds `dconf.lib` for a GIO module implementing a GSetti
|
||||||
|
|
||||||
- []{#ssec-gnome-hooks-gobject-introspection} `gobject-introspection` setup hook populates `GI_TYPELIB_PATH` variable with `lib/girepository-1.0` directories of dependencies, which is then added to wrapper by `wrapGAppsHook`. It also adds `share` directories of dependencies to `XDG_DATA_DIRS`, which is intended to promote GIR files but it also [pollutes the closures](https://github.com/NixOS/nixpkgs/issues/32790) of packages using `wrapGAppsHook`.
|
- []{#ssec-gnome-hooks-gobject-introspection} `gobject-introspection` setup hook populates `GI_TYPELIB_PATH` variable with `lib/girepository-1.0` directories of dependencies, which is then added to wrapper by `wrapGAppsHook`. It also adds `share` directories of dependencies to `XDG_DATA_DIRS`, which is intended to promote GIR files but it also [pollutes the closures](https://github.com/NixOS/nixpkgs/issues/32790) of packages using `wrapGAppsHook`.
|
||||||
|
|
||||||
::: {.warning}
|
|
||||||
The setup hook [currently](https://github.com/NixOS/nixpkgs/issues/56943) does not work in expressions with `strictDeps` enabled, like Python packages. In those cases, you will need to disable it with `strictDeps = false;`.
|
|
||||||
:::
|
|
||||||
|
|
||||||
- []{#ssec-gnome-hooks-gst-grl-plugins} Setup hooks of `gst_all_1.gstreamer` and `grilo` will populate the `GST_PLUGIN_SYSTEM_PATH_1_0` and `GRL_PLUGIN_PATH` variables, respectively, which will then be added to the wrapper by `wrapGAppsHook`.
|
- []{#ssec-gnome-hooks-gst-grl-plugins} Setup hooks of `gst_all_1.gstreamer` and `grilo` will populate the `GST_PLUGIN_SYSTEM_PATH_1_0` and `GRL_PLUGIN_PATH` variables, respectively, which will then be added to the wrapper by `wrapGAppsHook`.
|
||||||
|
|
||||||
You can also pass additional arguments to `makeWrapper` using `gappsWrapperArgs` in `preFixup` hook:
|
You can also pass additional arguments to `makeWrapper` using `gappsWrapperArgs` in `preFixup` hook:
|
||||||
|
|
|
@ -71,8 +71,10 @@ $ nix-env -f '<nixpkgs>' -qaP -A haskell.compiler
|
||||||
haskell.compiler.ghc810 ghc-8.10.7
|
haskell.compiler.ghc810 ghc-8.10.7
|
||||||
haskell.compiler.ghc88 ghc-8.8.4
|
haskell.compiler.ghc88 ghc-8.8.4
|
||||||
haskell.compiler.ghc90 ghc-9.0.2
|
haskell.compiler.ghc90 ghc-9.0.2
|
||||||
haskell.compiler.ghc92 ghc-9.2.4
|
haskell.compiler.ghc924 ghc-9.2.4
|
||||||
haskell.compiler.ghc925 ghc-9.2.5
|
haskell.compiler.ghc925 ghc-9.2.5
|
||||||
|
haskell.compiler.ghc926 ghc-9.2.6
|
||||||
|
haskell.compiler.ghc92 ghc-9.2.7
|
||||||
haskell.compiler.ghc942 ghc-9.4.2
|
haskell.compiler.ghc942 ghc-9.4.2
|
||||||
haskell.compiler.ghc943 ghc-9.4.3
|
haskell.compiler.ghc943 ghc-9.4.3
|
||||||
haskell.compiler.ghc94 ghc-9.4.4
|
haskell.compiler.ghc94 ghc-9.4.4
|
||||||
|
@ -86,13 +88,15 @@ haskell.compiler.ghc924Binary ghc-binary-9.2.4
|
||||||
haskell.compiler.ghc924BinaryMinimal ghc-binary-9.2.4
|
haskell.compiler.ghc924BinaryMinimal ghc-binary-9.2.4
|
||||||
haskell.compiler.integer-simple.ghc810 ghc-integer-simple-8.10.7
|
haskell.compiler.integer-simple.ghc810 ghc-integer-simple-8.10.7
|
||||||
haskell.compiler.integer-simple.ghc8107 ghc-integer-simple-8.10.7
|
haskell.compiler.integer-simple.ghc8107 ghc-integer-simple-8.10.7
|
||||||
haskell.compiler.integer-simple.ghc884 ghc-integer-simple-8.8.4
|
|
||||||
haskell.compiler.integer-simple.ghc88 ghc-integer-simple-8.8.4
|
haskell.compiler.integer-simple.ghc88 ghc-integer-simple-8.8.4
|
||||||
|
haskell.compiler.integer-simple.ghc884 ghc-integer-simple-8.8.4
|
||||||
haskell.compiler.native-bignum.ghc90 ghc-native-bignum-9.0.2
|
haskell.compiler.native-bignum.ghc90 ghc-native-bignum-9.0.2
|
||||||
haskell.compiler.native-bignum.ghc902 ghc-native-bignum-9.0.2
|
haskell.compiler.native-bignum.ghc902 ghc-native-bignum-9.0.2
|
||||||
haskell.compiler.native-bignum.ghc92 ghc-native-bignum-9.2.4
|
|
||||||
haskell.compiler.native-bignum.ghc924 ghc-native-bignum-9.2.4
|
haskell.compiler.native-bignum.ghc924 ghc-native-bignum-9.2.4
|
||||||
haskell.compiler.native-bignum.ghc925 ghc-native-bignum-9.2.5
|
haskell.compiler.native-bignum.ghc925 ghc-native-bignum-9.2.5
|
||||||
|
haskell.compiler.native-bignum.ghc926 ghc-native-bignum-9.2.6
|
||||||
|
haskell.compiler.native-bignum.ghc92 ghc-native-bignum-9.2.7
|
||||||
|
haskell.compiler.native-bignum.ghc927 ghc-native-bignum-9.2.7
|
||||||
haskell.compiler.native-bignum.ghc942 ghc-native-bignum-9.4.2
|
haskell.compiler.native-bignum.ghc942 ghc-native-bignum-9.4.2
|
||||||
haskell.compiler.native-bignum.ghc943 ghc-native-bignum-9.4.3
|
haskell.compiler.native-bignum.ghc943 ghc-native-bignum-9.4.3
|
||||||
haskell.compiler.native-bignum.ghc94 ghc-native-bignum-9.4.4
|
haskell.compiler.native-bignum.ghc94 ghc-native-bignum-9.4.4
|
||||||
|
@ -105,15 +109,15 @@ Each of those compiler versions has a corresponding attribute set built using
|
||||||
it. However, the non-standard package sets are not tested regularly and, as a
|
it. However, the non-standard package sets are not tested regularly and, as a
|
||||||
result, contain fewer working packages. The corresponding package set for GHC
|
result, contain fewer working packages. The corresponding package set for GHC
|
||||||
9.4.4 is `haskell.packages.ghc944`. In fact `haskellPackages` is just an alias
|
9.4.4 is `haskell.packages.ghc944`. In fact `haskellPackages` is just an alias
|
||||||
for `haskell.packages.ghc924`:
|
for `haskell.packages.ghc927`:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ nix-env -f '<nixpkgs>' -qaP -A haskell.packages.ghc924
|
$ nix-env -f '<nixpkgs>' -qaP -A haskell.packages.ghc927
|
||||||
haskell.packages.ghc924.a50 a50-0.5
|
haskell.packages.ghc927.a50 a50-0.5
|
||||||
haskell.packages.ghc924.AAI AAI-0.2.0.1
|
haskell.packages.ghc927.AAI AAI-0.2.0.1
|
||||||
haskell.packages.ghc924.aasam aasam-0.2.0.0
|
haskell.packages.ghc927.aasam aasam-0.2.0.0
|
||||||
haskell.packages.ghc924.abacate abacate-0.0.0.0
|
haskell.packages.ghc927.abacate abacate-0.0.0.0
|
||||||
haskell.packages.ghc924.abc-puzzle abc-puzzle-0.2.1
|
haskell.packages.ghc927.abc-puzzle abc-puzzle-0.2.1
|
||||||
…
|
…
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ into your `configuration.nix` or bring them into scope with `nix-shell -p rustc
|
||||||
|
|
||||||
For other versions such as daily builds (beta and nightly),
|
For other versions such as daily builds (beta and nightly),
|
||||||
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
|
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
|
||||||
or use a community maintained [Rust overlay](#using-community-rust-overlays).
|
or use [community maintained Rust toolchains](#using-community-maintained-rust-toolchains).
|
||||||
|
|
||||||
## `buildRustPackage`: Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
|
## `buildRustPackage`: Compiling Rust applications with Cargo {#compiling-rust-applications-with-cargo}
|
||||||
|
|
||||||
|
@ -686,31 +686,61 @@ $ cargo build
|
||||||
$ cargo test
|
$ cargo test
|
||||||
```
|
```
|
||||||
|
|
||||||
### Controlling Rust Version Inside `nix-shell` {#controlling-rust-version-inside-nix-shell}
|
## Using community maintained Rust toolchains {#using-community-maintained-rust-toolchains}
|
||||||
|
|
||||||
To control your rust version (i.e. use nightly) from within `shell.nix` (or
|
::: {.note}
|
||||||
other nix expressions) you can use the following `shell.nix`
|
Note: The following projects cannot be used within nixpkgs since [IFD](#ssec-import-from-derivation) is disallowed.
|
||||||
|
To package things that require Rust nightly, `RUSTC_BOOTSTRAP = true;` can sometimes be used as a hack.
|
||||||
|
:::
|
||||||
|
|
||||||
|
There are two community maintained approaches to Rust toolchain management:
|
||||||
|
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
|
||||||
|
- [fenix](https://github.com/nix-community/fenix)
|
||||||
|
|
||||||
|
Despite their names, both projects provides a similar set of packages and overlays under different APIs.
|
||||||
|
|
||||||
|
Oxalica's overlay allows you to select a particular Rust version without you providing a hash or a flake input,
|
||||||
|
but comes with a larger git repository than fenix.
|
||||||
|
|
||||||
|
Fenix also provides rust-analyzer nightly in addition to the Rust toolchains.
|
||||||
|
|
||||||
|
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
|
||||||
|
Because of this and ergonomics, either of those community projects
|
||||||
|
should be preferred to the Mozilla's Rust overlay ([nixpkgs-mozilla](https://github.com/mozilla/nixpkgs-mozilla)).
|
||||||
|
|
||||||
|
The following documentation demonstrates examples using fenix and oxalica's Rust overlay
|
||||||
|
with `nix-shell` and building derivations. More advanced usages like flake usage
|
||||||
|
are documented in their own repositories.
|
||||||
|
|
||||||
|
### Using Rust nightly with `nix-shell` {#using-rust-nightly-with-nix-shell}
|
||||||
|
|
||||||
|
Here is a simple `shell.nix` that provides Rust nightly (default profile) using fenix:
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
# Latest Nightly
|
with import <nixpkgs> { };
|
||||||
with import <nixpkgs> {};
|
let
|
||||||
let src = fetchFromGitHub {
|
fenix = callPackage
|
||||||
owner = "mozilla";
|
(fetchFromGitHub {
|
||||||
repo = "nixpkgs-mozilla";
|
owner = "nix-community";
|
||||||
# commit from: 2019-05-15
|
repo = "fenix";
|
||||||
rev = "9f35c4b09fd44a77227e79ff0c1b4b6a69dff533";
|
# commit from: 2023-03-03
|
||||||
hash = "sha256-18h0nvh55b5an4gmlgfbvwbyqj91bklf1zymis6lbdh75571qaz0=";
|
rev = "e2ea04982b892263c4d939f1cc3bf60a9c4deaa1";
|
||||||
};
|
hash = "sha256-AsOim1A8KKtMWIxG+lXh5Q4P2bhOZjoUhFWJ1EuZNNk=";
|
||||||
|
})
|
||||||
|
{ };
|
||||||
in
|
in
|
||||||
with import "${src.out}/rust-overlay.nix" pkgs pkgs;
|
mkShell {
|
||||||
stdenv.mkDerivation {
|
|
||||||
name = "rust-env";
|
name = "rust-env";
|
||||||
buildInputs = [
|
nativeBuildInputs = [
|
||||||
# Note: to use stable, just replace `nightly` with `stable`
|
# Note: to use stable, just replace `default` with `stable`
|
||||||
latest.rustChannels.nightly.rust
|
fenix.default.toolchain
|
||||||
|
|
||||||
# Add some extra dependencies from `pkgs`
|
# Example Build-time Additional Dependencies
|
||||||
pkg-config openssl
|
pkg-config
|
||||||
|
];
|
||||||
|
buildInputs = [
|
||||||
|
# Example Run-time Additional Dependencies
|
||||||
|
openssl
|
||||||
];
|
];
|
||||||
|
|
||||||
# Set Environment Variables
|
# Set Environment Variables
|
||||||
|
@ -718,116 +748,66 @@ stdenv.mkDerivation {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Now run:
|
Save this to `shell.nix`, then run:
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
$ rustc --version
|
$ rustc --version
|
||||||
rustc 1.26.0-nightly (188e693b3 2018-03-26)
|
rustc 1.69.0-nightly (13471d3b2 2023-03-02)
|
||||||
```
|
```
|
||||||
|
|
||||||
To see that you are using nightly.
|
To see that you are using nightly.
|
||||||
|
|
||||||
## Using community Rust overlays {#using-community-rust-overlays}
|
Oxalica's Rust overlay has more complete examples of `shell.nix` (and cross compilation) under its
|
||||||
|
[`examples` directory](https://github.com/oxalica/rust-overlay/tree/e53e8853aa7b0688bc270e9e6a681d22e01cf299/examples).
|
||||||
|
|
||||||
There are two community maintained approaches to Rust toolchain management:
|
### Using Rust nightly in a derivation with `buildRustPackage` {#using-rust-nightly-in-a-derivation-with-buildrustpackage}
|
||||||
- [oxalica's Rust overlay](https://github.com/oxalica/rust-overlay)
|
|
||||||
- [fenix](https://github.com/nix-community/fenix)
|
|
||||||
|
|
||||||
Oxalica's overlay allows you to select a particular Rust version and components.
|
You can also use Rust nightly to build rust packages using `makeRustPlatform`.
|
||||||
See [their documentation](https://github.com/oxalica/rust-overlay#rust-overlay) for more
|
The below snippet demonstrates invoking `buildRustPackage` with a Rust toolchain from oxalica's overlay:
|
||||||
detailed usage.
|
|
||||||
|
|
||||||
Fenix is an alternative to `rustup` and can also be used as an overlay.
|
|
||||||
|
|
||||||
Both oxalica's overlay and fenix better integrate with nix and cache optimizations.
|
|
||||||
Because of this and ergonomics, either of those community projects
|
|
||||||
should be preferred to the Mozilla's Rust overlay (`nixpkgs-mozilla`).
|
|
||||||
|
|
||||||
### How to select a specific `rustc` and toolchain version {#how-to-select-a-specific-rustc-and-toolchain-version}
|
|
||||||
|
|
||||||
You can consume the oxalica overlay and use it to grab a specific Rust toolchain version.
|
|
||||||
Here is an example `shell.nix` showing how to grab the current stable toolchain:
|
|
||||||
```nix
|
```nix
|
||||||
{ pkgs ? import <nixpkgs> {
|
with import <nixpkgs>
|
||||||
overlays = [
|
{
|
||||||
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
|
|
||||||
];
|
|
||||||
}
|
|
||||||
}:
|
|
||||||
pkgs.mkShell {
|
|
||||||
nativeBuildInputs = with pkgs; [
|
|
||||||
pkg-config
|
|
||||||
rust-bin.stable.latest.minimal
|
|
||||||
];
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can try this out by:
|
|
||||||
1. Saving that to `shell.nix`
|
|
||||||
2. Executing `nix-shell --pure --command 'rustc --version'`
|
|
||||||
|
|
||||||
As of writing, this prints out `rustc 1.56.0 (09c42c458 2021-10-18)`.
|
|
||||||
|
|
||||||
### How to use an overlay toolchain in a derivation {#how-to-use-an-overlay-toolchain-in-a-derivation}
|
|
||||||
|
|
||||||
You can also use an overlay's Rust toolchain with `buildRustPackage`.
|
|
||||||
The below snippet demonstrates invoking `buildRustPackage` with an oxalica overlay selected Rust toolchain:
|
|
||||||
```nix
|
|
||||||
with import <nixpkgs> {
|
|
||||||
overlays = [
|
overlays = [
|
||||||
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
|
(import (fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
let
|
||||||
|
rustPlatform = makeRustPlatform {
|
||||||
|
cargo = rust-bin.stable.latest.minimal;
|
||||||
|
rustc = rust-bin.stable.latest.minimal;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
|
||||||
rustPlatform.buildRustPackage rec {
|
rustPlatform.buildRustPackage rec {
|
||||||
pname = "ripgrep";
|
pname = "ripgrep";
|
||||||
version = "12.1.1";
|
version = "12.1.1";
|
||||||
nativeBuildInputs = [
|
|
||||||
rust-bin.stable.latest.minimal
|
|
||||||
];
|
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "BurntSushi";
|
owner = "BurntSushi";
|
||||||
repo = "ripgrep";
|
repo = "ripgrep";
|
||||||
rev = version;
|
rev = version;
|
||||||
hash = "sha256-1hqps7l5qrjh9f914r5i6kmcz6f1yb951nv4lby0cjnp5l253kps=";
|
hash = "sha256-+s5RBC3XSgb8omTbUNLywZnP6jSxZBKSS1BmXOjRF8M=";
|
||||||
};
|
};
|
||||||
|
|
||||||
cargoSha256 = "03wf9r2csi6jpa7v5sw5lpxkrk4wfzwmzx7k3991q3bdjzcwnnwp";
|
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
|
||||||
|
|
||||||
|
doCheck = false;
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
description = "A fast line-oriented regex search tool, similar to ag and ack";
|
description = "A fast line-oriented regex search tool, similar to ag and ack";
|
||||||
homepage = "https://github.com/BurntSushi/ripgrep";
|
homepage = "https://github.com/BurntSushi/ripgrep";
|
||||||
license = licenses.unlicense;
|
license = with licenses; [ mit unlicense ];
|
||||||
maintainers = [ maintainers.tailhook ];
|
maintainers = with maintainers; [ tailhook ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Follow the below steps to try that snippet.
|
Follow the below steps to try that snippet.
|
||||||
1. create a new directory
|
|
||||||
1. save the above snippet as `default.nix` in that directory
|
1. save the above snippet as `default.nix` in that directory
|
||||||
1. cd into that directory and run `nix-build`
|
2. cd into that directory and run `nix-build`
|
||||||
|
|
||||||
### Rust overlay installation {#rust-overlay-installation}
|
Fenix also has examples with `buildRustPackage`,
|
||||||
|
[crane](https://github.com/ipetkov/crane),
|
||||||
You can use this overlay by either changing your local nixpkgs configuration,
|
[naersk](https://github.com/nix-community/naersk),
|
||||||
or by adding the overlay declaratively in a nix expression, e.g. in `configuration.nix`.
|
and cross compilation in its [Examples](https://github.com/nix-community/fenix#examples) section.
|
||||||
For more information see [the manual on installing overlays](#sec-overlays-install).
|
|
||||||
|
|
||||||
### Declarative Rust overlay installation {#declarative-rust-overlay-installation}
|
|
||||||
|
|
||||||
This snippet shows how to use oxalica's Rust overlay.
|
|
||||||
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
|
|
||||||
|
|
||||||
```nix
|
|
||||||
{ pkgs ? import <nixpkgs> {
|
|
||||||
overlays = [
|
|
||||||
(import (builtins.fetchTarball "https://github.com/oxalica/rust-overlay/archive/master.tar.gz"))
|
|
||||||
# Further overlays go here
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that this will fetch the latest overlay version when rebuilding your system.
|
|
||||||
|
|
60
third_party/nixpkgs/lib/attrsets.nix
vendored
60
third_party/nixpkgs/lib/attrsets.nix
vendored
|
@ -333,6 +333,66 @@ rec {
|
||||||
) (attrNames set)
|
) (attrNames set)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
Like builtins.foldl' but for attribute sets.
|
||||||
|
Iterates over every name-value pair in the given attribute set.
|
||||||
|
The result of the callback function is often called `acc` for accumulator. It is passed between callbacks from left to right and the final `acc` is the return value of `foldlAttrs`.
|
||||||
|
|
||||||
|
Attention:
|
||||||
|
There is a completely different function
|
||||||
|
`lib.foldAttrs`
|
||||||
|
which has nothing to do with this function, despite the similar name.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
foldlAttrs
|
||||||
|
(acc: name: value: {
|
||||||
|
sum = acc.sum + value;
|
||||||
|
names = acc.names ++ [name];
|
||||||
|
})
|
||||||
|
{ sum = 0; names = []; }
|
||||||
|
{
|
||||||
|
foo = 1;
|
||||||
|
bar = 10;
|
||||||
|
}
|
||||||
|
->
|
||||||
|
{
|
||||||
|
sum = 11;
|
||||||
|
names = ["bar" "foo"];
|
||||||
|
}
|
||||||
|
|
||||||
|
foldlAttrs
|
||||||
|
(throw "function not needed")
|
||||||
|
123
|
||||||
|
{};
|
||||||
|
->
|
||||||
|
123
|
||||||
|
|
||||||
|
foldlAttrs
|
||||||
|
(_: _: v: v)
|
||||||
|
(throw "initial accumulator not needed")
|
||||||
|
{ z = 3; a = 2; };
|
||||||
|
->
|
||||||
|
3
|
||||||
|
|
||||||
|
The accumulator doesn't have to be an attrset.
|
||||||
|
It can be as simple as a number or string.
|
||||||
|
|
||||||
|
foldlAttrs
|
||||||
|
(acc: _: v: acc * 10 + v)
|
||||||
|
1
|
||||||
|
{ z = 1; a = 2; };
|
||||||
|
->
|
||||||
|
121
|
||||||
|
|
||||||
|
Type:
|
||||||
|
foldlAttrs :: ( a -> String -> b -> a ) -> a -> { ... :: b } -> a
|
||||||
|
*/
|
||||||
|
foldlAttrs = f: init: set:
|
||||||
|
foldl'
|
||||||
|
(acc: name: f acc name set.${name})
|
||||||
|
init
|
||||||
|
(attrNames set);
|
||||||
|
|
||||||
/* Apply fold functions to values grouped by key.
|
/* Apply fold functions to values grouped by key.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
2
third_party/nixpkgs/lib/default.nix
vendored
2
third_party/nixpkgs/lib/default.nix
vendored
|
@ -78,7 +78,7 @@ let
|
||||||
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
|
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
|
||||||
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
|
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
|
||||||
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
|
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
|
||||||
filterAttrsRecursive foldAttrs collect nameValuePair mapAttrs
|
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
|
||||||
mapAttrs' mapAttrsToList concatMapAttrs mapAttrsRecursive mapAttrsRecursiveCond
|
mapAttrs' mapAttrsToList concatMapAttrs mapAttrsRecursive mapAttrsRecursiveCond
|
||||||
genAttrs isDerivation toDerivation optionalAttrs
|
genAttrs isDerivation toDerivation optionalAttrs
|
||||||
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
|
zipAttrsWithNames zipAttrsWith zipAttrs recursiveUpdateUntil
|
||||||
|
|
34
third_party/nixpkgs/lib/modules.nix
vendored
34
third_party/nixpkgs/lib/modules.nix
vendored
|
@ -21,6 +21,7 @@ let
|
||||||
isBool
|
isBool
|
||||||
isFunction
|
isFunction
|
||||||
isList
|
isList
|
||||||
|
isPath
|
||||||
isString
|
isString
|
||||||
length
|
length
|
||||||
mapAttrs
|
mapAttrs
|
||||||
|
@ -45,6 +46,9 @@ let
|
||||||
showOption
|
showOption
|
||||||
unknownModule
|
unknownModule
|
||||||
;
|
;
|
||||||
|
inherit (lib.strings)
|
||||||
|
isConvertibleWithToString
|
||||||
|
;
|
||||||
|
|
||||||
showDeclPrefix = loc: decl: prefix:
|
showDeclPrefix = loc: decl: prefix:
|
||||||
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
|
" - option(s) with prefix `${showOption (loc ++ [prefix])}' in module `${decl._file}'";
|
||||||
|
@ -403,7 +407,7 @@ rec {
|
||||||
key = module.key;
|
key = module.key;
|
||||||
module = module;
|
module = module;
|
||||||
modules = collectedImports.modules;
|
modules = collectedImports.modules;
|
||||||
disabled = module.disabledModules ++ collectedImports.disabled;
|
disabled = (if module.disabledModules != [] then [{ file = module._file; disabled = module.disabledModules; }] else []) ++ collectedImports.disabled;
|
||||||
}) initialModules);
|
}) initialModules);
|
||||||
|
|
||||||
# filterModules :: String -> { disabled, modules } -> [ Module ]
|
# filterModules :: String -> { disabled, modules } -> [ Module ]
|
||||||
|
@ -412,10 +416,30 @@ rec {
|
||||||
# modules recursively. It returns the final list of unique-by-key modules
|
# modules recursively. It returns the final list of unique-by-key modules
|
||||||
filterModules = modulesPath: { disabled, modules }:
|
filterModules = modulesPath: { disabled, modules }:
|
||||||
let
|
let
|
||||||
moduleKey = m: if isString m && (builtins.substring 0 1 m != "/")
|
moduleKey = file: m:
|
||||||
then toString modulesPath + "/" + m
|
if isString m
|
||||||
else toString m;
|
then
|
||||||
disabledKeys = map moduleKey disabled;
|
if builtins.substring 0 1 m == "/"
|
||||||
|
then m
|
||||||
|
else toString modulesPath + "/" + m
|
||||||
|
|
||||||
|
else if isConvertibleWithToString m
|
||||||
|
then
|
||||||
|
if m?key && m.key != toString m
|
||||||
|
then
|
||||||
|
throw "Module `${file}` contains a disabledModules item that is an attribute set that can be converted to a string (${toString m}) but also has a `.key` attribute (${m.key}) with a different value. This makes it ambiguous which module should be disabled."
|
||||||
|
else
|
||||||
|
toString m
|
||||||
|
|
||||||
|
else if m?key
|
||||||
|
then
|
||||||
|
m.key
|
||||||
|
|
||||||
|
else if isAttrs m
|
||||||
|
then throw "Module `${file}` contains a disabledModules item that is an attribute set, presumably a module, that does not have a `key` attribute. This means that the module system doesn't have any means to identify the module that should be disabled. Make sure that you've put the correct value in disabledModules: a string path relative to modulesPath, a path value, or an attribute set with a `key` attribute."
|
||||||
|
else throw "Each disabledModules item must be a path, string, or a attribute set with a key attribute, or a value supported by toString. However, one of the disabledModules items in `${toString file}` is none of that, but is of type ${builtins.typeOf m}.";
|
||||||
|
|
||||||
|
disabledKeys = concatMap ({ file, disabled }: map (moduleKey file) disabled) disabled;
|
||||||
keyFilter = filter (attrs: ! elem attrs.key disabledKeys);
|
keyFilter = filter (attrs: ! elem attrs.key disabledKeys);
|
||||||
in map (attrs: attrs.module) (builtins.genericClosure {
|
in map (attrs: attrs.module) (builtins.genericClosure {
|
||||||
startSet = keyFilter modules;
|
startSet = keyFilter modules;
|
||||||
|
|
77
third_party/nixpkgs/lib/path/default.nix
vendored
77
third_party/nixpkgs/lib/path/default.nix
vendored
|
@ -15,6 +15,9 @@ let
|
||||||
last
|
last
|
||||||
genList
|
genList
|
||||||
elemAt
|
elemAt
|
||||||
|
all
|
||||||
|
concatMap
|
||||||
|
foldl'
|
||||||
;
|
;
|
||||||
|
|
||||||
inherit (lib.strings)
|
inherit (lib.strings)
|
||||||
|
@ -190,6 +193,80 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
||||||
subpathInvalidReason value == null;
|
subpathInvalidReason value == null;
|
||||||
|
|
||||||
|
|
||||||
|
/* Join subpath strings together using `/`, returning a normalised subpath string.
|
||||||
|
|
||||||
|
Like `concatStringsSep "/"` but safer, specifically:
|
||||||
|
|
||||||
|
- All elements must be valid subpath strings, see `lib.path.subpath.isValid`
|
||||||
|
|
||||||
|
- The result gets normalised, see `lib.path.subpath.normalise`
|
||||||
|
|
||||||
|
- The edge case of an empty list gets properly handled by returning the neutral subpath `"./."`
|
||||||
|
|
||||||
|
Laws:
|
||||||
|
|
||||||
|
- Associativity:
|
||||||
|
|
||||||
|
subpath.join [ x (subpath.join [ y z ]) ] == subpath.join [ (subpath.join [ x y ]) z ]
|
||||||
|
|
||||||
|
- Identity - `"./."` is the neutral element for normalised paths:
|
||||||
|
|
||||||
|
subpath.join [ ] == "./."
|
||||||
|
subpath.join [ (subpath.normalise p) "./." ] == subpath.normalise p
|
||||||
|
subpath.join [ "./." (subpath.normalise p) ] == subpath.normalise p
|
||||||
|
|
||||||
|
- Normalisation - the result is normalised according to `lib.path.subpath.normalise`:
|
||||||
|
|
||||||
|
subpath.join ps == subpath.normalise (subpath.join ps)
|
||||||
|
|
||||||
|
- For non-empty lists, the implementation is equivalent to normalising the result of `concatStringsSep "/"`.
|
||||||
|
Note that the above laws can be derived from this one.
|
||||||
|
|
||||||
|
ps != [] -> subpath.join ps == subpath.normalise (concatStringsSep "/" ps)
|
||||||
|
|
||||||
|
Type:
|
||||||
|
subpath.join :: [ String ] -> String
|
||||||
|
|
||||||
|
Example:
|
||||||
|
subpath.join [ "foo" "bar/baz" ]
|
||||||
|
=> "./foo/bar/baz"
|
||||||
|
|
||||||
|
# normalise the result
|
||||||
|
subpath.join [ "./foo" "." "bar//./baz/" ]
|
||||||
|
=> "./foo/bar/baz"
|
||||||
|
|
||||||
|
# passing an empty list results in the current directory
|
||||||
|
subpath.join [ ]
|
||||||
|
=> "./."
|
||||||
|
|
||||||
|
# elements must be valid subpath strings
|
||||||
|
subpath.join [ /foo ]
|
||||||
|
=> <error>
|
||||||
|
subpath.join [ "" ]
|
||||||
|
=> <error>
|
||||||
|
subpath.join [ "/foo" ]
|
||||||
|
=> <error>
|
||||||
|
subpath.join [ "../foo" ]
|
||||||
|
=> <error>
|
||||||
|
*/
|
||||||
|
subpath.join =
|
||||||
|
# The list of subpaths to join together
|
||||||
|
subpaths:
|
||||||
|
# Fast in case all paths are valid
|
||||||
|
if all isValid subpaths
|
||||||
|
then joinRelPath (concatMap splitRelPath subpaths)
|
||||||
|
else
|
||||||
|
# Otherwise we take our time to gather more info for a better error message
|
||||||
|
# Strictly go through each path, throwing on the first invalid one
|
||||||
|
# Tracks the list index in the fold accumulator
|
||||||
|
foldl' (i: path:
|
||||||
|
if isValid path
|
||||||
|
then i + 1
|
||||||
|
else throw ''
|
||||||
|
lib.path.subpath.join: Element at index ${toString i} is not a valid subpath string:
|
||||||
|
${subpathInvalidReason path}''
|
||||||
|
) 0 subpaths;
|
||||||
|
|
||||||
/* Normalise a subpath. Throw an error if the subpath isn't valid, see
|
/* Normalise a subpath. Throw an error if the subpath isn't valid, see
|
||||||
`lib.path.subpath.isValid`
|
`lib.path.subpath.isValid`
|
||||||
|
|
||||||
|
|
30
third_party/nixpkgs/lib/path/tests/unit.nix
vendored
30
third_party/nixpkgs/lib/path/tests/unit.nix
vendored
|
@ -107,6 +107,36 @@ let
|
||||||
expected = true;
|
expected = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Test examples from the lib.path.subpath.join documentation
|
||||||
|
testSubpathJoinExample1 = {
|
||||||
|
expr = subpath.join [ "foo" "bar/baz" ];
|
||||||
|
expected = "./foo/bar/baz";
|
||||||
|
};
|
||||||
|
testSubpathJoinExample2 = {
|
||||||
|
expr = subpath.join [ "./foo" "." "bar//./baz/" ];
|
||||||
|
expected = "./foo/bar/baz";
|
||||||
|
};
|
||||||
|
testSubpathJoinExample3 = {
|
||||||
|
expr = subpath.join [ ];
|
||||||
|
expected = "./.";
|
||||||
|
};
|
||||||
|
testSubpathJoinExample4 = {
|
||||||
|
expr = (builtins.tryEval (subpath.join [ /foo ])).success;
|
||||||
|
expected = false;
|
||||||
|
};
|
||||||
|
testSubpathJoinExample5 = {
|
||||||
|
expr = (builtins.tryEval (subpath.join [ "" ])).success;
|
||||||
|
expected = false;
|
||||||
|
};
|
||||||
|
testSubpathJoinExample6 = {
|
||||||
|
expr = (builtins.tryEval (subpath.join [ "/foo" ])).success;
|
||||||
|
expected = false;
|
||||||
|
};
|
||||||
|
testSubpathJoinExample7 = {
|
||||||
|
expr = (builtins.tryEval (subpath.join [ "../foo" ])).success;
|
||||||
|
expected = false;
|
||||||
|
};
|
||||||
|
|
||||||
# Test examples from the lib.path.subpath.normalise documentation
|
# Test examples from the lib.path.subpath.normalise documentation
|
||||||
testSubpathNormaliseExample1 = {
|
testSubpathNormaliseExample1 = {
|
||||||
expr = subpath.normalise "foo//bar";
|
expr = subpath.normalise "foo//bar";
|
||||||
|
|
3
third_party/nixpkgs/lib/systems/default.nix
vendored
3
third_party/nixpkgs/lib/systems/default.nix
vendored
|
@ -140,6 +140,7 @@ rec {
|
||||||
|
|
||||||
qemuArch =
|
qemuArch =
|
||||||
if final.isAarch32 then "arm"
|
if final.isAarch32 then "arm"
|
||||||
|
else if final.isS390 && !final.isS390x then null
|
||||||
else if final.isx86_64 then "x86_64"
|
else if final.isx86_64 then "x86_64"
|
||||||
else if final.isx86 then "i386"
|
else if final.isx86 then "i386"
|
||||||
else final.uname.processor;
|
else final.uname.processor;
|
||||||
|
@ -193,7 +194,7 @@ rec {
|
||||||
then "${pkgs.runtimeShell} -c '\"$@\"' --"
|
then "${pkgs.runtimeShell} -c '\"$@\"' --"
|
||||||
else if final.isWindows
|
else if final.isWindows
|
||||||
then "${wine}/bin/wine${lib.optionalString (final.parsed.cpu.bits == 64) "64"}"
|
then "${wine}/bin/wine${lib.optionalString (final.parsed.cpu.bits == 64) "64"}"
|
||||||
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux
|
else if final.isLinux && pkgs.stdenv.hostPlatform.isLinux && final.qemuArch != null
|
||||||
then "${qemu-user}/bin/qemu-${final.qemuArch}"
|
then "${qemu-user}/bin/qemu-${final.qemuArch}"
|
||||||
else if final.isWasi
|
else if final.isWasi
|
||||||
then "${pkgs.wasmtime}/bin/wasmtime"
|
then "${pkgs.wasmtime}/bin/wasmtime"
|
||||||
|
|
2
third_party/nixpkgs/lib/systems/doubles.nix
vendored
2
third_party/nixpkgs/lib/systems/doubles.nix
vendored
|
@ -22,7 +22,7 @@ let
|
||||||
"x86_64-solaris"
|
"x86_64-solaris"
|
||||||
|
|
||||||
# JS
|
# JS
|
||||||
"js-ghcjs"
|
"javascript-ghcjs"
|
||||||
|
|
||||||
# Linux
|
# Linux
|
||||||
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"
|
"aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux"
|
||||||
|
|
5
third_party/nixpkgs/lib/systems/examples.nix
vendored
5
third_party/nixpkgs/lib/systems/examples.nix
vendored
|
@ -329,6 +329,9 @@ rec {
|
||||||
|
|
||||||
# Ghcjs
|
# Ghcjs
|
||||||
ghcjs = {
|
ghcjs = {
|
||||||
config = "js-unknown-ghcjs";
|
# This triple is special to GHC/Cabal/GHCJS and not recognized by autotools
|
||||||
|
# See: https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c
|
||||||
|
# https://github.com/ghcjs/ghcjs/issues/53
|
||||||
|
config = "javascript-unknown-ghcjs";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
2
third_party/nixpkgs/lib/systems/inspect.nix
vendored
2
third_party/nixpkgs/lib/systems/inspect.nix
vendored
|
@ -49,7 +49,7 @@ rec {
|
||||||
isM68k = { cpu = { family = "m68k"; }; };
|
isM68k = { cpu = { family = "m68k"; }; };
|
||||||
isS390 = { cpu = { family = "s390"; }; };
|
isS390 = { cpu = { family = "s390"; }; };
|
||||||
isS390x = { cpu = { family = "s390"; bits = 64; }; };
|
isS390x = { cpu = { family = "s390"; bits = 64; }; };
|
||||||
isJavaScript = { cpu = cpuTypes.js; };
|
isJavaScript = { cpu = cpuTypes.javascript; };
|
||||||
|
|
||||||
is32bit = { cpu = { bits = 32; }; };
|
is32bit = { cpu = { bits = 32; }; };
|
||||||
is64bit = { cpu = { bits = 64; }; };
|
is64bit = { cpu = { bits = 64; }; };
|
||||||
|
|
2
third_party/nixpkgs/lib/systems/parse.nix
vendored
2
third_party/nixpkgs/lib/systems/parse.nix
vendored
|
@ -131,7 +131,7 @@ rec {
|
||||||
|
|
||||||
or1k = { bits = 32; significantByte = bigEndian; family = "or1k"; };
|
or1k = { bits = 32; significantByte = bigEndian; family = "or1k"; };
|
||||||
|
|
||||||
js = { bits = 32; significantByte = littleEndian; family = "js"; };
|
javascript = { bits = 32; significantByte = littleEndian; family = "javascript"; };
|
||||||
};
|
};
|
||||||
|
|
||||||
# GNU build systems assume that older NetBSD architectures are using a.out.
|
# GNU build systems assume that older NetBSD architectures are using a.out.
|
||||||
|
|
31
third_party/nixpkgs/lib/tests/misc.nix
vendored
31
third_party/nixpkgs/lib/tests/misc.nix
vendored
|
@ -533,6 +533,37 @@ runTests {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# code from example
|
||||||
|
testFoldlAttrs = {
|
||||||
|
expr = {
|
||||||
|
example = foldlAttrs
|
||||||
|
(acc: name: value: {
|
||||||
|
sum = acc.sum + value;
|
||||||
|
names = acc.names ++ [ name ];
|
||||||
|
})
|
||||||
|
{ sum = 0; names = [ ]; }
|
||||||
|
{
|
||||||
|
foo = 1;
|
||||||
|
bar = 10;
|
||||||
|
};
|
||||||
|
# should just return the initial value
|
||||||
|
emptySet = foldlAttrs (throw "function not needed") 123 { };
|
||||||
|
# should just evaluate to the last value
|
||||||
|
accNotNeeded = foldlAttrs (_acc: _name: v: v) (throw "accumulator not needed") { z = 3; a = 2; };
|
||||||
|
# the accumulator doesnt have to be an attrset it can be as trivial as being just a number or string
|
||||||
|
trivialAcc = foldlAttrs (acc: _name: v: acc * 10 + v) 1 { z = 1; a = 2; };
|
||||||
|
};
|
||||||
|
expected = {
|
||||||
|
example = {
|
||||||
|
sum = 11;
|
||||||
|
names = [ "bar" "foo" ];
|
||||||
|
};
|
||||||
|
emptySet = 123;
|
||||||
|
accNotNeeded = 3;
|
||||||
|
trivialAcc = 121;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
# code from the example
|
# code from the example
|
||||||
testRecursiveUpdateUntil = {
|
testRecursiveUpdateUntil = {
|
||||||
expr = recursiveUpdateUntil (path: l: r: path == ["foo"]) {
|
expr = recursiveUpdateUntil (path: l: r: path == ["foo"]) {
|
||||||
|
|
12
third_party/nixpkgs/lib/tests/modules.sh
vendored
12
third_party/nixpkgs/lib/tests/modules.sh
vendored
|
@ -141,6 +141,14 @@ checkConfigError "The option .*enable.* does not exist. Definition values:\n\s*-
|
||||||
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-define-enable.nix ./disable-declare-enable.nix
|
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-define-enable.nix ./disable-declare-enable.nix
|
||||||
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-enable-modules.nix
|
checkConfigError "attribute .*enable.* in selection path .*config.enable.* not found" "$@" ./disable-enable-modules.nix
|
||||||
|
|
||||||
|
checkConfigOutput '^true$' 'config.positive.enable' ./disable-module-with-key.nix
|
||||||
|
checkConfigOutput '^false$' 'config.negative.enable' ./disable-module-with-key.nix
|
||||||
|
checkConfigError 'Module ..*disable-module-bad-key.nix. contains a disabledModules item that is an attribute set, presumably a module, that does not have a .key. attribute. .*' 'config.enable' ./disable-module-bad-key.nix
|
||||||
|
|
||||||
|
# Not sure if we want to keep supporting module keys that aren't strings, paths or v?key, but we shouldn't remove support accidentally.
|
||||||
|
checkConfigOutput '^true$' 'config.positive.enable' ./disable-module-with-toString-key.nix
|
||||||
|
checkConfigOutput '^false$' 'config.negative.enable' ./disable-module-with-toString-key.nix
|
||||||
|
|
||||||
# Check _module.args.
|
# Check _module.args.
|
||||||
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
|
set -- config.enable ./declare-enable.nix ./define-enable-with-custom-arg.nix
|
||||||
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
|
checkConfigError 'while evaluating the module argument .*custom.* in .*define-enable-with-custom-arg.nix.*:' "$@"
|
||||||
|
@ -358,6 +366,10 @@ checkConfigOutput '^"The option `a\.b. defined in `.*/doRename-warnings\.nix. ha
|
||||||
config.result \
|
config.result \
|
||||||
./doRename-warnings.nix
|
./doRename-warnings.nix
|
||||||
|
|
||||||
|
# Anonymous modules get deduplicated by key
|
||||||
|
checkConfigOutput '^"pear"$' config.once.raw ./merge-module-with-key.nix
|
||||||
|
checkConfigOutput '^"pear\\npear"$' config.twice.raw ./merge-module-with-key.nix
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
====== module tests ======
|
====== module tests ======
|
||||||
$pass Pass
|
$pass Pass
|
||||||
|
|
16
third_party/nixpkgs/lib/tests/modules/disable-module-bad-key.nix
vendored
Normal file
16
third_party/nixpkgs/lib/tests/modules/disable-module-bad-key.nix
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types;
|
||||||
|
|
||||||
|
moduleWithKey = { config, ... }: {
|
||||||
|
config = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./declare-enable.nix
|
||||||
|
];
|
||||||
|
disabledModules = [ { } ];
|
||||||
|
}
|
34
third_party/nixpkgs/lib/tests/modules/disable-module-with-key.nix
vendored
Normal file
34
third_party/nixpkgs/lib/tests/modules/disable-module-with-key.nix
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types;
|
||||||
|
|
||||||
|
moduleWithKey = {
|
||||||
|
key = "disable-module-with-key.nix#moduleWithKey";
|
||||||
|
config = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
positive = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
./declare-enable.nix
|
||||||
|
moduleWithKey
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
negative = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
./declare-enable.nix
|
||||||
|
moduleWithKey
|
||||||
|
];
|
||||||
|
disabledModules = [ moduleWithKey ];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
34
third_party/nixpkgs/lib/tests/modules/disable-module-with-toString-key.nix
vendored
Normal file
34
third_party/nixpkgs/lib/tests/modules/disable-module-with-toString-key.nix
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types;
|
||||||
|
|
||||||
|
moduleWithKey = {
|
||||||
|
key = 123;
|
||||||
|
config = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
positive = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
./declare-enable.nix
|
||||||
|
moduleWithKey
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
negative = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
./declare-enable.nix
|
||||||
|
moduleWithKey
|
||||||
|
];
|
||||||
|
disabledModules = [ 123 ];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
49
third_party/nixpkgs/lib/tests/modules/merge-module-with-key.nix
vendored
Normal file
49
third_party/nixpkgs/lib/tests/modules/merge-module-with-key.nix
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
{ lib, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types;
|
||||||
|
|
||||||
|
moduleWithoutKey = {
|
||||||
|
config = {
|
||||||
|
raw = "pear";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
moduleWithKey = {
|
||||||
|
key = __curPos.file + "#moduleWithKey";
|
||||||
|
config = {
|
||||||
|
raw = "pear";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
decl = {
|
||||||
|
options = {
|
||||||
|
raw = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
once = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
decl
|
||||||
|
moduleWithKey
|
||||||
|
moduleWithKey
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
twice = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
imports = [
|
||||||
|
decl
|
||||||
|
moduleWithoutKey
|
||||||
|
moduleWithoutKey
|
||||||
|
];
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
2563
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
2563
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
File diff suppressed because it is too large
Load diff
57
third_party/nixpkgs/maintainers/scripts/check-maintainers-sorted.nix
vendored
Normal file
57
third_party/nixpkgs/maintainers/scripts/check-maintainers-sorted.nix
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
let
|
||||||
|
lib = import ../../lib;
|
||||||
|
inherit (lib)
|
||||||
|
add attrNames elemAt foldl' genList length replaceStrings sort toLower trace;
|
||||||
|
|
||||||
|
maintainers = import ../maintainer-list.nix;
|
||||||
|
simplify = replaceStrings [ "-" "_" ] [ "" "" ];
|
||||||
|
compare = a: b: simplify (toLower a) < simplify (toLower b);
|
||||||
|
namesSorted =
|
||||||
|
sort
|
||||||
|
(a: b: a.key < b.key)
|
||||||
|
(map
|
||||||
|
(n: let pos = builtins.unsafeGetAttrPos n maintainers;
|
||||||
|
in assert pos == null -> throw "maintainers entry ${n} is malformed";
|
||||||
|
{ name = n; line = pos.line; key = toLower (simplify n); })
|
||||||
|
(attrNames maintainers));
|
||||||
|
before = { name, line, key }:
|
||||||
|
foldl'
|
||||||
|
(acc: n: if n.key < key && (acc == null || n.key > acc.key) then n else acc)
|
||||||
|
null
|
||||||
|
namesSorted;
|
||||||
|
errors = foldl' add 0
|
||||||
|
(map
|
||||||
|
(i: let a = elemAt namesSorted i;
|
||||||
|
b = elemAt namesSorted (i + 1);
|
||||||
|
lim = let t = before a; in if t == null then "the initial {" else t.name;
|
||||||
|
in if a.line >= b.line
|
||||||
|
then trace
|
||||||
|
("maintainer ${a.name} (line ${toString a.line}) should be listed "
|
||||||
|
+ "after ${lim}, not after ${b.name} (line ${toString b.line})")
|
||||||
|
1
|
||||||
|
else 0)
|
||||||
|
(genList (i: i) (length namesSorted - 1)));
|
||||||
|
in
|
||||||
|
assert errors == 0; "all good!"
|
||||||
|
|
||||||
|
# generate edit commands to sort the list.
|
||||||
|
# may everything following the last current entry (closing } ff) in the wrong place
|
||||||
|
# with lib;
|
||||||
|
# concatStringsSep
|
||||||
|
# "\n"
|
||||||
|
# (let first = foldl' (acc: n: if n.line < acc then n.line else acc) 999999999 namesSorted;
|
||||||
|
# commands = map
|
||||||
|
# (i: let e = elemAt namesSorted i;
|
||||||
|
# begin = foldl'
|
||||||
|
# (acc: n: if n.line < e.line && n.line > acc then n.line else acc)
|
||||||
|
# 1
|
||||||
|
# namesSorted;
|
||||||
|
# end =
|
||||||
|
# foldl' (acc: n: if n.line > e.line && n.line < acc then n.line else acc)
|
||||||
|
# 999999999
|
||||||
|
# namesSorted;
|
||||||
|
# in "${toString e.line},${toString (end - 1)} p")
|
||||||
|
# (genList (i: i) (length namesSorted));
|
||||||
|
# in map
|
||||||
|
# (c: "sed -ne '${c}' maintainers/maintainer-list.nix")
|
||||||
|
# ([ "1,${toString (first - 1)} p" ] ++ commands))
|
|
@ -328,6 +328,7 @@ platformIcon (Platform x) = case x of
|
||||||
"x86_64-linux" -> ":penguin:"
|
"x86_64-linux" -> ":penguin:"
|
||||||
"aarch64-linux" -> ":iphone:"
|
"aarch64-linux" -> ":iphone:"
|
||||||
"x86_64-darwin" -> ":apple:"
|
"x86_64-darwin" -> ":apple:"
|
||||||
|
"aarch64-darwin" -> ":green_apple:"
|
||||||
_ -> x
|
_ -> x
|
||||||
|
|
||||||
data BuildResult = BuildResult {state :: BuildState, id :: Int} deriving (Show, Eq, Ord)
|
data BuildResult = BuildResult {state :: BuildState, id :: Int} deriving (Show, Eq, Ord)
|
||||||
|
@ -488,7 +489,8 @@ printBuildSummary eval@Eval{id} fetchTime summary topBrokenRdeps =
|
||||||
if' (isNothing maintainedJob) "No `maintained` job found." <>
|
if' (isNothing maintainedJob) "No `maintained` job found." <>
|
||||||
if' (Unfinished > maybe Success worstState mergeableJob) "`mergeable` jobset failed." <>
|
if' (Unfinished > maybe Success worstState mergeableJob) "`mergeable` jobset failed." <>
|
||||||
if' (outstandingJobs (Platform "x86_64-linux") > 100) "Too many outstanding jobs on x86_64-linux." <>
|
if' (outstandingJobs (Platform "x86_64-linux") > 100) "Too many outstanding jobs on x86_64-linux." <>
|
||||||
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux."
|
if' (outstandingJobs (Platform "aarch64-linux") > 100) "Too many outstanding jobs on aarch64-linux." <>
|
||||||
|
if' (outstandingJobs (Platform "aarch64-darwin") > 100) "Too many outstanding jobs on aarch64-darwin."
|
||||||
if' p e = if p then [e] else mempty
|
if' p e = if p then [e] else mempty
|
||||||
outstandingJobs platform | Table m <- numSummary = Map.findWithDefault 0 (platform, Unfinished) m
|
outstandingJobs platform | Table m <- numSummary = Map.findWithDefault 0 (platform, Unfinished) m
|
||||||
maintainedJob = Map.lookup "maintained" summary
|
maintainedJob = Map.lookup "maintained" summary
|
||||||
|
|
|
@ -142,6 +142,7 @@ with lib.maintainers; {
|
||||||
# gares has no entry in the maintainers list
|
# gares has no entry in the maintainers list
|
||||||
siraben
|
siraben
|
||||||
vbgl
|
vbgl
|
||||||
|
alizter
|
||||||
];
|
];
|
||||||
scope = "Maintain the Coq theorem prover and related packages.";
|
scope = "Maintain the Coq theorem prover and related packages.";
|
||||||
shortName = "Coq";
|
shortName = "Coq";
|
||||||
|
|
|
@ -8,8 +8,15 @@ the system on a stable release.
|
||||||
|
|
||||||
`disabledModules` is a top level attribute like `imports`, `options` and
|
`disabledModules` is a top level attribute like `imports`, `options` and
|
||||||
`config`. It contains a list of modules that will be disabled. This can
|
`config`. It contains a list of modules that will be disabled. This can
|
||||||
either be the full path to the module or a string with the filename
|
either be:
|
||||||
relative to the modules path (eg. \<nixpkgs/nixos/modules> for nixos).
|
- the full path to the module,
|
||||||
|
- or a string with the filename relative to the modules path (eg. \<nixpkgs/nixos/modules> for nixos),
|
||||||
|
- or an attribute set containing a specific `key` attribute.
|
||||||
|
|
||||||
|
The latter allows some modules to be disabled, despite them being distributed
|
||||||
|
via attributes instead of file paths. The `key` should be globally unique, so
|
||||||
|
it is recommended to include a file path in it, or rely on a framework to do it
|
||||||
|
for you.
|
||||||
|
|
||||||
This example will replace the existing postgresql module with the
|
This example will replace the existing postgresql module with the
|
||||||
version defined in the nixos-unstable channel while keeping the rest of
|
version defined in the nixos-unstable channel while keeping the rest of
|
||||||
|
|
|
@ -33,6 +33,9 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
- [Cloudlog](https://www.magicbug.co.uk/cloudlog/), a web-based Amateur Radio logging application. Available as [services.cloudlog](#opt-services.cloudlog.enable).
|
- [Cloudlog](https://www.magicbug.co.uk/cloudlog/), a web-based Amateur Radio logging application. Available as [services.cloudlog](#opt-services.cloudlog.enable).
|
||||||
|
|
||||||
- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
|
- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
|
||||||
|
- [readarr](https://github.com/Readarr/Readarr), Book Manager and Automation (Sonarr for Ebooks). Available as [services.readarr](options.html#opt-services.readarr.enable).
|
||||||
|
|
||||||
|
- [gemstash](https://github.com/rubygems/gemstash), a RubyGems.org cache and private gem server. Available as [services.gemstash](#opt-services.gemstash.enable).
|
||||||
|
|
||||||
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
|
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
|
||||||
|
|
||||||
|
@ -58,12 +61,20 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
|
|
||||||
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
|
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
|
||||||
|
|
||||||
|
- [jellyseerr](https://github.com/Fallenbagel/jellyseerr), a web-based requests manager for Jellyfin, forked from Overseerr. Available as [services.jellyseerr](#opt-services.jellyseerr.enable).
|
||||||
|
|
||||||
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
|
- [photoprism](https://photoprism.app/), a AI-Powered Photos App for the Decentralized Web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
|
||||||
|
|
||||||
- [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met.
|
- [autosuspend](https://github.com/languitar/autosuspend), a python daemon that suspends a system if certain conditions are met, or not met.
|
||||||
|
|
||||||
- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
|
- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
|
||||||
|
|
||||||
|
- [nimdow](https://github.com/avahe-kellenberger/nimdow), a window manager written in Nim, inspired by dwm.
|
||||||
|
|
||||||
|
- [woodpecker-agent](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-agent](#opt-services.woodpecker-agent.enable).
|
||||||
|
|
||||||
|
- [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable).
|
||||||
|
|
||||||
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
|
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
|
||||||
|
|
||||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||||
|
@ -82,6 +93,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
|
|
||||||
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
|
- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
|
||||||
|
|
||||||
|
- `nushell` has been updated to at least version 0.77.0, which includes potential breaking changes in aliases. The old aliases are now available as `old-alias` but it is recommended you migrate to the new format. See [Reworked aliases](https://www.nushell.sh/blog/2023-03-14-nushell_0_77.html#reworked-aliases-breaking-changes-kubouch).
|
||||||
|
|
||||||
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
|
- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
|
||||||
|
|
||||||
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
|
- The `services.kubo.settings` option is now no longer stateful. If you changed any of the options in `services.kubo.settings` in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably /var/lib/ipfs/config) and compare after the update.
|
||||||
|
@ -115,6 +128,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
|
|
||||||
- Calling `makeSetupHook` without passing a `name` argument is deprecated.
|
- Calling `makeSetupHook` without passing a `name` argument is deprecated.
|
||||||
|
|
||||||
|
- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
|
||||||
|
|
||||||
- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
|
- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
|
||||||
|
|
||||||
- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
|
- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
|
||||||
|
@ -191,6 +206,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
|
|
||||||
- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
|
- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
|
||||||
|
|
||||||
|
- To reduce closure size in `nixos/modules/installer/netboot/netboot-minimal.nix` profile disabled load linux firmwares, pre-installing the complete stdenv and `networking.wireless` service.
|
||||||
|
|
||||||
- The minimal ISO image now uses the `nixos/modules/profiles/minimal.nix` profile.
|
- The minimal ISO image now uses the `nixos/modules/profiles/minimal.nix` profile.
|
||||||
|
|
||||||
- The `ghcWithPackages` and `ghcWithHoogle` wrappers will now also symlink GHC's
|
- The `ghcWithPackages` and `ghcWithHoogle` wrappers will now also symlink GHC's
|
||||||
|
|
|
@ -61,7 +61,7 @@ with lib;
|
||||||
pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
|
pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
|
||||||
qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
|
qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
|
||||||
qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
|
qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
|
||||||
qt5 = super.qt5.overrideScope' (const (super': {
|
qt5 = super.qt5.overrideScope (const (super': {
|
||||||
qtbase = super'.qtbase.override { withGtk3 = false; };
|
qtbase = super'.qtbase.override { withGtk3 = false; };
|
||||||
}));
|
}));
|
||||||
stoken = super.stoken.override { withGTK3 = false; };
|
stoken = super.stoken.override { withGTK3 = false; };
|
||||||
|
|
|
@ -9,4 +9,7 @@
|
||||||
];
|
];
|
||||||
|
|
||||||
documentation.man.enable = lib.mkOverride 500 true;
|
documentation.man.enable = lib.mkOverride 500 true;
|
||||||
|
hardware.enableRedistributableFirmware = lib.mkOverride 70 false;
|
||||||
|
system.extraDependencies = lib.mkOverride 70 [];
|
||||||
|
networking.wireless.enable = lib.mkOverride 500 false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,9 +127,6 @@ if (-e "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors") {
|
||||||
push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
|
push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
|
||||||
push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
|
push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
|
||||||
|
|
||||||
push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
|
|
||||||
push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
|
|
||||||
|
|
||||||
|
|
||||||
# Look at the PCI devices and add necessary modules. Note that most
|
# Look at the PCI devices and add necessary modules. Note that most
|
||||||
# modules are auto-detected so we don't need to list them here.
|
# modules are auto-detected so we don't need to list them here.
|
||||||
|
@ -324,11 +321,15 @@ if ($virt eq "systemd-nspawn") {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Provide firmware for devices that are not detected by this script,
|
# Check if we're on bare metal, not in a VM/container.
|
||||||
# unless we're in a VM/container.
|
if ($virt eq "none") {
|
||||||
push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")"
|
# Provide firmware for devices that are not detected by this script.
|
||||||
if $virt eq "none";
|
push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")";
|
||||||
|
|
||||||
|
# Update the microcode.
|
||||||
|
push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
|
||||||
|
push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
|
||||||
|
}
|
||||||
|
|
||||||
# For a device name like /dev/sda1, find a more stable path like
|
# For a device name like /dev/sda1, find a more stable path like
|
||||||
# /dev/disk/by-uuid/X or /dev/disk/by-label/Y.
|
# /dev/disk/by-uuid/X or /dev/disk/by-label/Y.
|
||||||
|
|
|
@ -377,6 +377,8 @@
|
||||||
./services/continuous-integration/jenkins/default.nix
|
./services/continuous-integration/jenkins/default.nix
|
||||||
./services/continuous-integration/jenkins/job-builder.nix
|
./services/continuous-integration/jenkins/job-builder.nix
|
||||||
./services/continuous-integration/jenkins/slave.nix
|
./services/continuous-integration/jenkins/slave.nix
|
||||||
|
./services/continuous-integration/woodpecker/agent.nix
|
||||||
|
./services/continuous-integration/woodpecker/server.nix
|
||||||
./services/databases/aerospike.nix
|
./services/databases/aerospike.nix
|
||||||
./services/databases/cassandra.nix
|
./services/databases/cassandra.nix
|
||||||
./services/databases/clickhouse.nix
|
./services/databases/clickhouse.nix
|
||||||
|
@ -440,6 +442,7 @@
|
||||||
./services/development/blackfire.nix
|
./services/development/blackfire.nix
|
||||||
./services/development/bloop.nix
|
./services/development/bloop.nix
|
||||||
./services/development/distccd.nix
|
./services/development/distccd.nix
|
||||||
|
./services/development/gemstash.nix
|
||||||
./services/development/hoogle.nix
|
./services/development/hoogle.nix
|
||||||
./services/development/jupyter/default.nix
|
./services/development/jupyter/default.nix
|
||||||
./services/development/jupyterhub/default.nix
|
./services/development/jupyterhub/default.nix
|
||||||
|
@ -624,6 +627,7 @@
|
||||||
./services/misc/irkerd.nix
|
./services/misc/irkerd.nix
|
||||||
./services/misc/jackett.nix
|
./services/misc/jackett.nix
|
||||||
./services/misc/jellyfin.nix
|
./services/misc/jellyfin.nix
|
||||||
|
./services/misc/jellyseerr.nix
|
||||||
./services/misc/klipper.nix
|
./services/misc/klipper.nix
|
||||||
./services/misc/languagetool.nix
|
./services/misc/languagetool.nix
|
||||||
./services/misc/leaps.nix
|
./services/misc/leaps.nix
|
||||||
|
@ -663,6 +667,7 @@
|
||||||
./services/misc/prowlarr.nix
|
./services/misc/prowlarr.nix
|
||||||
./services/misc/pykms.nix
|
./services/misc/pykms.nix
|
||||||
./services/misc/radarr.nix
|
./services/misc/radarr.nix
|
||||||
|
./services/misc/readarr.nix
|
||||||
./services/misc/redmine.nix
|
./services/misc/redmine.nix
|
||||||
./services/misc/ripple-data-api.nix
|
./services/misc/ripple-data-api.nix
|
||||||
./services/misc/rippled.nix
|
./services/misc/rippled.nix
|
||||||
|
@ -801,6 +806,7 @@
|
||||||
./services/networking/bitlbee.nix
|
./services/networking/bitlbee.nix
|
||||||
./services/networking/blockbook-frontend.nix
|
./services/networking/blockbook-frontend.nix
|
||||||
./services/networking/blocky.nix
|
./services/networking/blocky.nix
|
||||||
|
./services/networking/cgit.nix
|
||||||
./services/networking/charybdis.nix
|
./services/networking/charybdis.nix
|
||||||
./services/networking/chisel-server.nix
|
./services/networking/chisel-server.nix
|
||||||
./services/networking/cjdns.nix
|
./services/networking/cjdns.nix
|
||||||
|
@ -1129,6 +1135,7 @@
|
||||||
./services/web-apps/baget.nix
|
./services/web-apps/baget.nix
|
||||||
./services/web-apps/bookstack.nix
|
./services/web-apps/bookstack.nix
|
||||||
./services/web-apps/calibre-web.nix
|
./services/web-apps/calibre-web.nix
|
||||||
|
./services/web-apps/coder.nix
|
||||||
./services/web-apps/changedetection-io.nix
|
./services/web-apps/changedetection-io.nix
|
||||||
./services/web-apps/cloudlog.nix
|
./services/web-apps/cloudlog.nix
|
||||||
./services/web-apps/code-server.nix
|
./services/web-apps/code-server.nix
|
||||||
|
@ -1268,6 +1275,7 @@
|
||||||
./services/x11/window-managers/bspwm.nix
|
./services/x11/window-managers/bspwm.nix
|
||||||
./services/x11/window-managers/katriawm.nix
|
./services/x11/window-managers/katriawm.nix
|
||||||
./services/x11/window-managers/metacity.nix
|
./services/x11/window-managers/metacity.nix
|
||||||
|
./services/x11/window-managers/nimdow.nix
|
||||||
./services/x11/window-managers/none.nix
|
./services/x11/window-managers/none.nix
|
||||||
./services/x11/window-managers/twm.nix
|
./services/x11/window-managers/twm.nix
|
||||||
./services/x11/window-managers/windowlab.nix
|
./services/x11/window-managers/windowlab.nix
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# This module defines the software packages included in the "minimal"
|
# This module defines the software packages included in the "minimal"
|
||||||
# installation CD. It might be useful elsewhere.
|
# installation CD. It might be useful elsewhere.
|
||||||
|
|
||||||
{ config, lib, pkgs, ... }:
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
||||||
pkgs.ddrescue
|
pkgs.ddrescue
|
||||||
pkgs.ccrypt
|
pkgs.ccrypt
|
||||||
pkgs.cryptsetup # needed for dm-crypt volumes
|
pkgs.cryptsetup # needed for dm-crypt volumes
|
||||||
pkgs.mkpasswd # for generating password files
|
|
||||||
|
|
||||||
# Some text editors.
|
# Some text editors.
|
||||||
(pkgs.vim.customize {
|
(pkgs.vim.customize {
|
||||||
|
@ -32,7 +31,6 @@
|
||||||
pkgs.fuse
|
pkgs.fuse
|
||||||
pkgs.fuse3
|
pkgs.fuse3
|
||||||
pkgs.sshfs-fuse
|
pkgs.sshfs-fuse
|
||||||
pkgs.rsync
|
|
||||||
pkgs.socat
|
pkgs.socat
|
||||||
pkgs.screen
|
pkgs.screen
|
||||||
pkgs.tcpdump
|
pkgs.tcpdump
|
||||||
|
@ -45,22 +43,14 @@
|
||||||
pkgs.usbutils
|
pkgs.usbutils
|
||||||
pkgs.nvme-cli
|
pkgs.nvme-cli
|
||||||
|
|
||||||
# Tools to create / manipulate filesystems.
|
|
||||||
pkgs.ntfsprogs # for resizing NTFS partitions
|
|
||||||
pkgs.dosfstools
|
|
||||||
pkgs.mtools
|
|
||||||
pkgs.xfsprogs.bin
|
|
||||||
pkgs.jfsutils
|
|
||||||
pkgs.f2fs-tools
|
|
||||||
|
|
||||||
# Some compression/archiver tools.
|
# Some compression/archiver tools.
|
||||||
pkgs.unzip
|
pkgs.unzip
|
||||||
pkgs.zip
|
pkgs.zip
|
||||||
];
|
];
|
||||||
|
|
||||||
# Include support for various filesystems.
|
# Include support for various filesystems and tools to create / manipulate them.
|
||||||
boot.supportedFilesystems =
|
boot.supportedFilesystems =
|
||||||
[ "btrfs" "reiserfs" "vfat" "f2fs" "xfs" "ntfs" "cifs" ] ++
|
[ "btrfs" "cifs" "f2fs" "jfs" "ntfs" "reiserfs" "vfat" "xfs" ] ++
|
||||||
lib.optional (lib.meta.availableOn pkgs.stdenv.hostPlatform config.boot.zfs.package) "zfs";
|
lib.optional (lib.meta.availableOn pkgs.stdenv.hostPlatform config.boot.zfs.package) "zfs";
|
||||||
|
|
||||||
# Configure host id for ZFS to work
|
# Configure host id for ZFS to work
|
||||||
|
|
|
@ -151,6 +151,8 @@ in
|
||||||
if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
|
if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
|
||||||
then
|
then
|
||||||
${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
|
${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
|
||||||
|
else
|
||||||
|
${pkgs.coreutils}/bin/rm -f "$logfile".new
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
'';
|
'';
|
||||||
|
|
|
@ -8,7 +8,6 @@ with lib;
|
||||||
let
|
let
|
||||||
cfg = config.programs.java;
|
cfg = config.programs.java;
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
@ -40,12 +39,35 @@ in
|
||||||
type = types.package;
|
type = types.package;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
binfmt = mkEnableOption (lib.mdDoc "binfmt to execute java jar's and classes");
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
|
boot.binfmt.registrations = mkIf cfg.binfmt {
|
||||||
|
java-class = {
|
||||||
|
recognitionType = "extension";
|
||||||
|
magicOrExtension = "class";
|
||||||
|
interpreter = pkgs.writeShellScript "java-class-wrapper" ''
|
||||||
|
test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
|
||||||
|
classpath=$(dirname "$1")
|
||||||
|
class=$(basename "''${1%%.class}")
|
||||||
|
$JAVA_HOME/bin/java -classpath "$classpath" "$class" "''${@:2}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
java-jar = {
|
||||||
|
recognitionType = "extension";
|
||||||
|
magicOrExtension = "jar";
|
||||||
|
interpreter = pkgs.writeShellScript "java-jar-wrapper" ''
|
||||||
|
test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
|
||||||
|
$JAVA_HOME/bin/java -jar "$@"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
environment.systemPackages = [ cfg.package ];
|
environment.systemPackages = [ cfg.package ];
|
||||||
|
|
||||||
environment.shellInit = ''
|
environment.shellInit = ''
|
||||||
|
|
|
@ -47,7 +47,12 @@ let
|
||||||
then [ "${name} ${value}" ]
|
then [ "${name} ${value}" ]
|
||||||
else concatLists (mapAttrsToList (genSection name) value);
|
else concatLists (mapAttrsToList (genSection name) value);
|
||||||
|
|
||||||
addDefaults = settings: { backend = "btrfs-progs-sudo"; } // settings;
|
sudo_doas =
|
||||||
|
if config.security.sudo.enable then "sudo"
|
||||||
|
else if config.security.doas.enable then "doas"
|
||||||
|
else throw "The btrbk nixos module needs either sudo or doas enabled in the configuration";
|
||||||
|
|
||||||
|
addDefaults = settings: { backend = "btrfs-progs-${sudo_doas}"; } // settings;
|
||||||
|
|
||||||
mkConfigFile = name: settings: pkgs.writeTextFile {
|
mkConfigFile = name: settings: pkgs.writeTextFile {
|
||||||
name = "btrbk-${name}.conf";
|
name = "btrbk-${name}.conf";
|
||||||
|
@ -152,20 +157,41 @@ in
|
||||||
};
|
};
|
||||||
config = mkIf (sshEnabled || serviceEnabled) {
|
config = mkIf (sshEnabled || serviceEnabled) {
|
||||||
environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
|
environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
|
||||||
security.sudo.extraRules = [
|
security.sudo = mkIf (sudo_doas == "sudo") {
|
||||||
{
|
extraRules = [
|
||||||
users = [ "btrbk" ];
|
{
|
||||||
commands = [
|
users = [ "btrbk" ];
|
||||||
{ command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
commands = [
|
||||||
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
{ command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
||||||
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
|
{ command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
||||||
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
|
{ command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
|
||||||
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
|
||||||
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
{ command = "/run/current-system/bin/btrfs"; options = [ "NOPASSWD" ]; }
|
||||||
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
|
{ command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
|
||||||
|
{ command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
security.doas = mkIf (sudo_doas == "doas") {
|
||||||
|
extraRules = let
|
||||||
|
doasCmdNoPass = cmd: { users = [ "btrbk" ]; cmd = cmd; noPass = true; };
|
||||||
|
in
|
||||||
|
[
|
||||||
|
(doasCmdNoPass "${pkgs.btrfs-progs}/bin/btrfs")
|
||||||
|
(doasCmdNoPass "${pkgs.coreutils}/bin/mkdir")
|
||||||
|
(doasCmdNoPass "${pkgs.coreutils}/bin/readlink")
|
||||||
|
# for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
|
||||||
|
(doasCmdNoPass "/run/current-system/bin/btrfs")
|
||||||
|
(doasCmdNoPass "/run/current-system/sw/bin/mkdir")
|
||||||
|
(doasCmdNoPass "/run/current-system/sw/bin/readlink")
|
||||||
|
|
||||||
|
# doas matches command, not binary
|
||||||
|
(doasCmdNoPass "btrfs")
|
||||||
|
(doasCmdNoPass "mkdir")
|
||||||
|
(doasCmdNoPass "readlink")
|
||||||
];
|
];
|
||||||
}
|
};
|
||||||
];
|
|
||||||
users.users.btrbk = {
|
users.users.btrbk = {
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
# ssh needs a home directory
|
# ssh needs a home directory
|
||||||
|
@ -183,8 +209,9 @@ in
|
||||||
"best-effort" = 2;
|
"best-effort" = 2;
|
||||||
"realtime" = 1;
|
"realtime" = 1;
|
||||||
}.${cfg.ioSchedulingClass};
|
}.${cfg.ioSchedulingClass};
|
||||||
|
sudo_doas_flag = "--${sudo_doas}";
|
||||||
in
|
in
|
||||||
''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh --sudo ${options}" ${v.key}''
|
''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh ${sudo_doas_flag} ${options}" ${v.key}''
|
||||||
)
|
)
|
||||||
cfg.sshAccess;
|
cfg.sshAccess;
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,11 +5,95 @@ let
|
||||||
cfg = config.services.hadoop;
|
cfg = config.services.hadoop;
|
||||||
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
|
||||||
mkIfNotNull = x: mkIf (x != null) x;
|
mkIfNotNull = x: mkIf (x != null) x;
|
||||||
|
# generic hbase role options
|
||||||
|
hbaseRoleOption = name: extraOpts: {
|
||||||
|
enable = mkEnableOption (mdDoc "HBase ${name}");
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = mdDoc "Open firewall ports for HBase ${name}.";
|
||||||
|
};
|
||||||
|
|
||||||
|
restartIfChanged = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = mdDoc "Restart ${name} con config change.";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraFlags = mkOption {
|
||||||
|
type = with types; listOf str;
|
||||||
|
default = [];
|
||||||
|
example = literalExpression ''[ "--backup" ]'';
|
||||||
|
description = mdDoc "Extra flags for the ${name} service.";
|
||||||
|
};
|
||||||
|
|
||||||
|
environment = mkOption {
|
||||||
|
type = with types; attrsOf str;
|
||||||
|
default = {};
|
||||||
|
example = literalExpression ''
|
||||||
|
{
|
||||||
|
HBASE_MASTER_OPTS = "-Dcom.sun.management.jmxremote.ssl=true";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
description = mdDoc "Environment variables passed to ${name}.";
|
||||||
|
};
|
||||||
|
} // extraOpts;
|
||||||
|
# generic hbase role configs
|
||||||
|
hbaseRoleConfig = name: ports: (mkIf cfg.hbase."${name}".enable {
|
||||||
|
services.hadoop.gatewayRole = {
|
||||||
|
enable = true;
|
||||||
|
enableHbaseCli = mkDefault true;
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services."hbase-${toLower name}" = {
|
||||||
|
description = "HBase ${name}";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
path = with cfg; [ hbase.package ] ++ optional
|
||||||
|
(with cfg.hbase.master; enable && initHDFS) package;
|
||||||
|
preStart = mkIf (with cfg.hbase.master; enable && initHDFS)
|
||||||
|
(concatStringsSep "\n" (
|
||||||
|
map (x: "HADOOP_USER_NAME=hdfs hdfs --config /etc/hadoop-conf ${x}")[
|
||||||
|
"dfsadmin -safemode wait"
|
||||||
|
"dfs -mkdir -p ${cfg.hbase.rootdir}"
|
||||||
|
"dfs -chown hbase ${cfg.hbase.rootdir}"
|
||||||
|
]
|
||||||
|
));
|
||||||
|
|
||||||
|
inherit (cfg.hbase."${name}") environment;
|
||||||
|
script = concatStringsSep " " (
|
||||||
|
[
|
||||||
|
"hbase --config /etc/hadoop-conf/"
|
||||||
|
"${toLower name} start"
|
||||||
|
]
|
||||||
|
++ cfg.hbase."${name}".extraFlags
|
||||||
|
++ map (x: "--${toLower x} ${toString cfg.hbase.${name}.${x}}")
|
||||||
|
(filter (x: hasAttr x cfg.hbase.${name}) ["port" "infoPort"])
|
||||||
|
);
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
User = "hbase";
|
||||||
|
SyslogIdentifier = "hbase-${toLower name}";
|
||||||
|
Restart = "always";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
firewall.allowedTCPPorts = mkIf cfg.hbase."${name}".openFirewall ports;
|
||||||
|
hosts = mkIf (with cfg.hbase.regionServer; enable && overrideHosts) {
|
||||||
|
"127.0.0.2" = mkForce [ ];
|
||||||
|
"::1" = mkForce [ ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
});
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.services.hadoop = {
|
options.services.hadoop = {
|
||||||
|
|
||||||
gatewayRole.enableHbaseCli = mkEnableOption (lib.mdDoc "HBase CLI tools");
|
gatewayRole.enableHbaseCli = mkEnableOption (mdDoc "HBase CLI tools");
|
||||||
|
|
||||||
hbaseSiteDefault = mkOption {
|
hbaseSiteDefault = mkOption {
|
||||||
default = {
|
default = {
|
||||||
|
@ -21,7 +105,7 @@ in
|
||||||
"hbase.cluster.distributed" = "true";
|
"hbase.cluster.distributed" = "true";
|
||||||
};
|
};
|
||||||
type = types.attrsOf types.anything;
|
type = types.attrsOf types.anything;
|
||||||
description = lib.mdDoc ''
|
description = mdDoc ''
|
||||||
Default options for hbase-site.xml
|
Default options for hbase-site.xml
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -29,8 +113,12 @@ in
|
||||||
default = {};
|
default = {};
|
||||||
type = with types; attrsOf anything;
|
type = with types; attrsOf anything;
|
||||||
example = literalExpression ''
|
example = literalExpression ''
|
||||||
|
{
|
||||||
|
"hbase.hregion.max.filesize" = 20*1024*1024*1024;
|
||||||
|
"hbase.table.normalization.enabled" = "true";
|
||||||
|
}
|
||||||
'';
|
'';
|
||||||
description = lib.mdDoc ''
|
description = mdDoc ''
|
||||||
Additional options and overrides for hbase-site.xml
|
Additional options and overrides for hbase-site.xml
|
||||||
<https://github.com/apache/hbase/blob/rel/2.4.11/hbase-common/src/main/resources/hbase-default.xml>
|
<https://github.com/apache/hbase/blob/rel/2.4.11/hbase-common/src/main/resources/hbase-default.xml>
|
||||||
'';
|
'';
|
||||||
|
@ -39,7 +127,7 @@ in
|
||||||
default = {};
|
default = {};
|
||||||
type = with types; attrsOf anything;
|
type = with types; attrsOf anything;
|
||||||
internal = true;
|
internal = true;
|
||||||
description = lib.mdDoc ''
|
description = mdDoc ''
|
||||||
Internal option to add configs to hbase-site.xml based on module options
|
Internal option to add configs to hbase-site.xml based on module options
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -50,11 +138,11 @@ in
|
||||||
type = types.package;
|
type = types.package;
|
||||||
default = pkgs.hbase;
|
default = pkgs.hbase;
|
||||||
defaultText = literalExpression "pkgs.hbase";
|
defaultText = literalExpression "pkgs.hbase";
|
||||||
description = lib.mdDoc "HBase package";
|
description = mdDoc "HBase package";
|
||||||
};
|
};
|
||||||
|
|
||||||
rootdir = mkOption {
|
rootdir = mkOption {
|
||||||
description = lib.mdDoc ''
|
description = mdDoc ''
|
||||||
This option will set "hbase.rootdir" in hbase-site.xml and determine
|
This option will set "hbase.rootdir" in hbase-site.xml and determine
|
||||||
the directory shared by region servers and into which HBase persists.
|
the directory shared by region servers and into which HBase persists.
|
||||||
The URL should be 'fully-qualified' to include the filesystem scheme.
|
The URL should be 'fully-qualified' to include the filesystem scheme.
|
||||||
|
@ -68,7 +156,7 @@ in
|
||||||
default = "/hbase";
|
default = "/hbase";
|
||||||
};
|
};
|
||||||
zookeeperQuorum = mkOption {
|
zookeeperQuorum = mkOption {
|
||||||
description = lib.mdDoc ''
|
description = mdDoc ''
|
||||||
This option will set "hbase.zookeeper.quorum" in hbase-site.xml.
|
This option will set "hbase.zookeeper.quorum" in hbase-site.xml.
|
||||||
Comma separated list of servers in the ZooKeeper ensemble.
|
Comma separated list of servers in the ZooKeeper ensemble.
|
||||||
'';
|
'';
|
||||||
|
@ -76,107 +164,36 @@ in
|
||||||
example = "zk1.internal,zk2.internal,zk3.internal";
|
example = "zk1.internal,zk2.internal,zk3.internal";
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
master = {
|
} // (let
|
||||||
enable = mkEnableOption (lib.mdDoc "HBase Master");
|
ports = port: infoPort: {
|
||||||
initHDFS = mkEnableOption (lib.mdDoc "initialization of the hbase directory on HDFS");
|
port = mkOption {
|
||||||
|
type = types.int;
|
||||||
openFirewall = mkOption {
|
default = port;
|
||||||
type = types.bool;
|
description = mdDoc "RPC port";
|
||||||
default = false;
|
};
|
||||||
description = lib.mdDoc ''
|
infoPort = mkOption {
|
||||||
Open firewall ports for HBase master.
|
type = types.int;
|
||||||
'';
|
default = infoPort;
|
||||||
|
description = mdDoc "web UI port";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
regionServer = {
|
in mapAttrs hbaseRoleOption {
|
||||||
enable = mkEnableOption (lib.mdDoc "HBase RegionServer");
|
master.initHDFS = mkEnableOption (mdDoc "initialization of the hbase directory on HDFS");
|
||||||
|
regionServer.overrideHosts = mkOption {
|
||||||
overrideHosts = mkOption {
|
type = types.bool;
|
||||||
type = types.bool;
|
default = true;
|
||||||
default = true;
|
description = mdDoc ''
|
||||||
description = lib.mdDoc ''
|
Remove /etc/hosts entries for "127.0.0.2" and "::1" defined in nixos/modules/config/networking.nix
|
||||||
Remove /etc/hosts entries for "127.0.0.2" and "::1" defined in nixos/modules/config/networking.nix
|
Regionservers must be able to resolve their hostnames to their IP addresses, through PTR records
|
||||||
Regionservers must be able to resolve their hostnames to their IP addresses, through PTR records
|
or /etc/hosts entries.
|
||||||
or /etc/hosts entries.
|
'';
|
||||||
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
openFirewall = mkOption {
|
|
||||||
type = types.bool;
|
|
||||||
default = false;
|
|
||||||
description = lib.mdDoc ''
|
|
||||||
Open firewall ports for HBase master.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
thrift = ports 9090 9095;
|
||||||
|
rest = ports 8080 8085;
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkMerge [
|
config = mkMerge ([
|
||||||
(mkIf cfg.hbase.master.enable {
|
|
||||||
services.hadoop.gatewayRole = {
|
|
||||||
enable = true;
|
|
||||||
enableHbaseCli = mkDefault true;
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.hbase-master = {
|
|
||||||
description = "HBase master";
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
|
|
||||||
preStart = mkIf cfg.hbase.master.initHDFS ''
|
|
||||||
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfsadmin -safemode wait
|
|
||||||
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfs -mkdir -p ${cfg.hbase.rootdir}
|
|
||||||
HADOOP_USER_NAME=hdfs ${cfg.package}/bin/hdfs --config ${hadoopConf} dfs -chown hbase ${cfg.hbase.rootdir}
|
|
||||||
'';
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
User = "hbase";
|
|
||||||
SyslogIdentifier = "hbase-master";
|
|
||||||
ExecStart = "${cfg.hbase.package}/bin/hbase --config ${hadoopConf} " +
|
|
||||||
"master start";
|
|
||||||
Restart = "always";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = mkIf cfg.hbase.master.openFirewall [
|
|
||||||
16000 16010
|
|
||||||
];
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
(mkIf cfg.hbase.regionServer.enable {
|
|
||||||
services.hadoop.gatewayRole = {
|
|
||||||
enable = true;
|
|
||||||
enableHbaseCli = mkDefault true;
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.hbase-regionserver = {
|
|
||||||
description = "HBase RegionServer";
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
serviceConfig = {
|
|
||||||
User = "hbase";
|
|
||||||
SyslogIdentifier = "hbase-regionserver";
|
|
||||||
ExecStart = "${cfg.hbase.package}/bin/hbase --config /etc/hadoop-conf/ " +
|
|
||||||
"regionserver start";
|
|
||||||
Restart = "always";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
|
|
||||||
|
|
||||||
networking = {
|
|
||||||
firewall.allowedTCPPorts = mkIf cfg.hbase.regionServer.openFirewall [
|
|
||||||
16020 16030
|
|
||||||
];
|
|
||||||
hosts = mkIf cfg.hbase.regionServer.overrideHosts {
|
|
||||||
"127.0.0.2" = mkForce [ ];
|
|
||||||
"::1" = mkForce [ ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
})
|
|
||||||
|
|
||||||
(mkIf cfg.gatewayRole.enable {
|
(mkIf cfg.gatewayRole.enable {
|
||||||
|
|
||||||
|
@ -192,5 +209,10 @@ in
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
};
|
};
|
||||||
})
|
})
|
||||||
];
|
] ++ (mapAttrsToList hbaseRoleConfig {
|
||||||
|
master = [ 16000 16010 ];
|
||||||
|
regionServer = [ 16020 16030 ];
|
||||||
|
thrift = with cfg.hbase.thrift; [ port infoPort ];
|
||||||
|
rest = with cfg.hbase.rest; [ port infoPort ];
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
version = "1.7.1";
|
version = "1.10.1";
|
||||||
cfg = config.services.kubernetes.addons.dns;
|
cfg = config.services.kubernetes.addons.dns;
|
||||||
ports = {
|
ports = {
|
||||||
dns = 10053;
|
dns = 10053;
|
||||||
|
@ -59,9 +59,9 @@ in {
|
||||||
type = types.attrs;
|
type = types.attrs;
|
||||||
default = {
|
default = {
|
||||||
imageName = "coredns/coredns";
|
imageName = "coredns/coredns";
|
||||||
imageDigest = "sha256:4a6e0769130686518325b21b0c1d0688b54e7c79244d48e1b15634e98e40c6ef";
|
imageDigest = "sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e";
|
||||||
finalImageTag = version;
|
finalImageTag = version;
|
||||||
sha256 = "02r440xcdsgi137k5lmmvp0z5w5fmk8g9mysq5pnysq1wl8sj6mw";
|
sha256 = "0wg696920smmal7552a2zdhfncndn5kfammfa8bk8l7dz9bhk0y1";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -136,6 +136,11 @@ in {
|
||||||
resources = [ "nodes" ];
|
resources = [ "nodes" ];
|
||||||
verbs = [ "get" ];
|
verbs = [ "get" ];
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
apiGroups = [ "discovery.k8s.io" ];
|
||||||
|
resources = [ "endpointslices" ];
|
||||||
|
verbs = [ "list" "watch" ];
|
||||||
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -398,7 +398,7 @@ in
|
||||||
systemd.services.hydra-evaluator =
|
systemd.services.hydra-evaluator =
|
||||||
{ wantedBy = [ "multi-user.target" ];
|
{ wantedBy = [ "multi-user.target" ];
|
||||||
requires = [ "hydra-init.service" ];
|
requires = [ "hydra-init.service" ];
|
||||||
after = [ "hydra-init.service" "network.target" ];
|
after = [ "hydra-init.service" "network.target" "network-online.target" ];
|
||||||
path = with pkgs; [ hydra-package nettools jq ];
|
path = with pkgs; [ hydra-package nettools jq ];
|
||||||
restartTriggers = [ hydraConf ];
|
restartTriggers = [ hydraConf ];
|
||||||
environment = env // {
|
environment = env // {
|
||||||
|
|
99
third_party/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agent.nix
vendored
Normal file
99
third_party/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agent.nix
vendored
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.woodpecker-agent;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
meta.maintainers = [ lib.maintainers.janik ];
|
||||||
|
|
||||||
|
options = {
|
||||||
|
services.woodpecker-agent = {
|
||||||
|
enable = lib.mkEnableOption (lib.mdDoc "the Woodpecker-Agent, Agents execute tasks generated by a Server, every install will need one server and at least one agent");
|
||||||
|
package = lib.mkPackageOptionMD pkgs "woodpecker-agent" { };
|
||||||
|
|
||||||
|
environment = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
type = lib.types.attrsOf lib.types.str;
|
||||||
|
example = lib.literalExpression ''
|
||||||
|
{
|
||||||
|
WOODPECKER_SERVER = "localhost:9000";
|
||||||
|
WOODPECKER_BACKEND = "docker";
|
||||||
|
DOCKER_HOST = "unix:///run/podman/podman.sock";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
description = lib.mdDoc "woodpecker-agent config envrionment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/agent-config)";
|
||||||
|
};
|
||||||
|
|
||||||
|
extraGroups = lib.mkOption {
|
||||||
|
default = null;
|
||||||
|
type = lib.types.nullOr (lib.types.listOf lib.types.str);
|
||||||
|
example = [ "podman" ];
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Additional groups for the systemd service.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
environmentFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
default = null;
|
||||||
|
example = "/root/woodpecker-agent.env";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
File to load environment variables
|
||||||
|
from. This is helpful for specifying secrets.
|
||||||
|
Example content of environmentFile:
|
||||||
|
```
|
||||||
|
WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
|
||||||
|
```
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
systemd.services = {
|
||||||
|
woodpecker-agent = {
|
||||||
|
description = "Woodpecker-Agent Service";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
DynamicUser = true;
|
||||||
|
SupplementaryGroups = lib.optionals (cfg.extraGroups != null) cfg.extraGroups;
|
||||||
|
EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
|
||||||
|
ExecStart = "${cfg.package}/bin/woodpecker-agent";
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 15;
|
||||||
|
CapabilityBoundingSet = "";
|
||||||
|
# Security
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
# Sandboxing
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
PrivateUsers = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
|
||||||
|
LockPersonality = true;
|
||||||
|
MemoryDenyWriteExecute = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
PrivateMounts = true;
|
||||||
|
# System Call Filtering
|
||||||
|
SystemCallArchitectures = "native";
|
||||||
|
SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
|
||||||
|
};
|
||||||
|
inherit (cfg) environment;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
98
third_party/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix
vendored
Normal file
98
third_party/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix
vendored
Normal file
|
@ -0,0 +1,98 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.woodpecker-server;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
meta.maintainers = [ lib.maintainers.janik ];
|
||||||
|
|
||||||
|
|
||||||
|
options = {
|
||||||
|
services.woodpecker-server = {
|
||||||
|
enable = lib.mkEnableOption (lib.mdDoc "the Woodpecker-Server, a CI/CD application for automatic builds, deployments and tests");
|
||||||
|
package = lib.mkPackageOptionMD pkgs "woodpecker-server" { };
|
||||||
|
environment = lib.mkOption {
|
||||||
|
default = { };
|
||||||
|
type = lib.types.attrsOf lib.types.str;
|
||||||
|
example = lib.literalExpression
|
||||||
|
''
|
||||||
|
{
|
||||||
|
WOODPECKER_HOST = "https://woodpecker.example.com";
|
||||||
|
WOODPECKER_OPEN = "true";
|
||||||
|
WOODPECKER_GITEA = "true";
|
||||||
|
WOODPECKER_GITEA_CLIENT = "ffffffff-ffff-ffff-ffff-ffffffffffff";
|
||||||
|
WOODPECKER_GITEA_URL = "https://git.example.com";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
description = lib.mdDoc "woodpecker-server config envrionment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/server-config)";
|
||||||
|
};
|
||||||
|
environmentFile = lib.mkOption {
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
|
default = null;
|
||||||
|
example = "/root/woodpecker-server.env";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
File to load environment variables
|
||||||
|
from. This is helpful for specifying secrets.
|
||||||
|
Example content of environmentFile:
|
||||||
|
```
|
||||||
|
WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
|
||||||
|
WOODPECKER_GITEA_SECRET=gto_**************************************
|
||||||
|
```
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
systemd.services = {
|
||||||
|
woodpecker-server = {
|
||||||
|
description = "Woodpecker-Server Service";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
DynamicUser = true;
|
||||||
|
WorkingDirectory = "%S/woodpecker-server";
|
||||||
|
StateDirectory = "woodpecker-server";
|
||||||
|
StateDirectoryMode = "0700";
|
||||||
|
UMask = "0007";
|
||||||
|
ConfigurationDirectory = "woodpecker-server";
|
||||||
|
EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
|
||||||
|
ExecStart = "${cfg.package}/bin/woodpecker-server";
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 15;
|
||||||
|
CapabilityBoundingSet = "";
|
||||||
|
# Security
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
# Sandboxing
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ProtectHome = true;
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
PrivateUsers = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
|
||||||
|
LockPersonality = true;
|
||||||
|
MemoryDenyWriteExecute = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
PrivateMounts = true;
|
||||||
|
# System Call Filtering
|
||||||
|
SystemCallArchitectures = "native";
|
||||||
|
SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
|
||||||
|
};
|
||||||
|
inherit (cfg) environment;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
103
third_party/nixpkgs/nixos/modules/services/development/gemstash.nix
vendored
Normal file
103
third_party/nixpkgs/nixos/modules/services/development/gemstash.nix
vendored
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
{ lib, pkgs, config, ... }:
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
settingsFormat = pkgs.formats.yaml { };
|
||||||
|
|
||||||
|
# gemstash uses a yaml config where the keys are ruby symbols,
|
||||||
|
# which means they start with ':'. This would be annoying to use
|
||||||
|
# on the nix side, so we rewrite plain names instead.
|
||||||
|
prefixColon = s: listToAttrs (map
|
||||||
|
(attrName: {
|
||||||
|
name = ":${attrName}";
|
||||||
|
value =
|
||||||
|
if isAttrs s.${attrName}
|
||||||
|
then prefixColon s."${attrName}"
|
||||||
|
else s."${attrName}";
|
||||||
|
})
|
||||||
|
(attrNames s));
|
||||||
|
|
||||||
|
# parse the port number out of the tcp://ip:port bind setting string
|
||||||
|
parseBindPort = bind: strings.toInt (last (strings.splitString ":" bind));
|
||||||
|
|
||||||
|
cfg = config.services.gemstash;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.gemstash = {
|
||||||
|
enable = mkEnableOption (lib.mdDoc "gemstash service");
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Whether to open the firewall for the port in {option}`services.gemstash.bind`.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = mkOption {
|
||||||
|
default = {};
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Configuration for Gemstash. The details can be found at in
|
||||||
|
[gemstash documentation](https://github.com/rubygems/gemstash/blob/master/man/gemstash-configuration.5.md).
|
||||||
|
Each key set here is automatically prefixed with ":" to match the gemstash expectations.
|
||||||
|
'';
|
||||||
|
type = types.submodule {
|
||||||
|
freeformType = settingsFormat.type;
|
||||||
|
options = {
|
||||||
|
base_path = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
default = "/var/lib/gemstash";
|
||||||
|
description = lib.mdDoc "Path to store the gem files and the sqlite database. If left unchanged, the directory will be created.";
|
||||||
|
};
|
||||||
|
bind = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "tcp://0.0.0.0:9292";
|
||||||
|
description = lib.mdDoc "Host and port combination for the server to listen on.";
|
||||||
|
};
|
||||||
|
db_adapter = mkOption {
|
||||||
|
type = types.nullOr (types.enum [ "sqlite3" "postgres" "mysql" "mysql2" ]);
|
||||||
|
default = null;
|
||||||
|
description = lib.mdDoc "Which database type to use. For choices other than sqlite3, the dbUrl has to be specified as well.";
|
||||||
|
};
|
||||||
|
db_url = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = lib.mdDoc "The database to connect to when using postgres, mysql, or mysql2.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config =
|
||||||
|
mkIf cfg.enable {
|
||||||
|
users = {
|
||||||
|
users.gemstash = {
|
||||||
|
group = "gemstash";
|
||||||
|
isSystemUser = true;
|
||||||
|
};
|
||||||
|
groups.gemstash = { };
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ (parseBindPort cfg.settings.bind) ];
|
||||||
|
|
||||||
|
systemd.services.gemstash = {
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network.target" ];
|
||||||
|
serviceConfig = mkMerge [
|
||||||
|
{
|
||||||
|
ExecStart = "${pkgs.gemstash}/bin/gemstash start --no-daemonize --config-file ${settingsFormat.generate "gemstash.yaml" (prefixColon cfg.settings)}";
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
User = "gemstash";
|
||||||
|
Group = "gemstash";
|
||||||
|
PrivateTmp = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
LockPersonality = true;
|
||||||
|
}
|
||||||
|
(mkIf (cfg.settings.base_path == "/var/lib/gemstash") {
|
||||||
|
StateDirectory = "gemstash";
|
||||||
|
})
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -18,6 +18,12 @@ let
|
||||||
fwupd = cfg.daemonSettings;
|
fwupd = cfg.daemonSettings;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
"fwupd/uefi_capsule.conf" = {
|
||||||
|
source = format.generate "uefi_capsule.conf" {
|
||||||
|
uefi_capsule = cfg.uefiCapsuleSettings;
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
originalEtc =
|
originalEtc =
|
||||||
|
@ -138,6 +144,16 @@ in {
|
||||||
Configurations for the fwupd daemon.
|
Configurations for the fwupd daemon.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
uefiCapsuleSettings = mkOption {
|
||||||
|
type = types.submodule {
|
||||||
|
freeformType = format.type.nestedTypes.elemType;
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
UEFI capsule configurations for the fwupd daemon.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -89,11 +89,6 @@ let
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
pagesArgs = [
|
|
||||||
"-pages-domain" gitlabConfig.production.pages.host
|
|
||||||
"-pages-root" "${gitlabConfig.production.shared.path}/pages"
|
|
||||||
] ++ cfg.pagesExtraArgs;
|
|
||||||
|
|
||||||
gitlabConfig = {
|
gitlabConfig = {
|
||||||
# These are the default settings from config/gitlab.example.yml
|
# These are the default settings from config/gitlab.example.yml
|
||||||
production = flip recursiveUpdate cfg.extraConfig {
|
production = flip recursiveUpdate cfg.extraConfig {
|
||||||
|
@ -161,6 +156,12 @@ let
|
||||||
};
|
};
|
||||||
extra = {};
|
extra = {};
|
||||||
uploads.storage_path = cfg.statePath;
|
uploads.storage_path = cfg.statePath;
|
||||||
|
pages = {
|
||||||
|
enabled = cfg.pages.enable;
|
||||||
|
port = 8090;
|
||||||
|
host = cfg.pages.settings.pages-domain;
|
||||||
|
secret_file = cfg.pages.settings.api-secret-key;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -246,6 +247,7 @@ in {
|
||||||
(mkRenamedOptionModule [ "services" "gitlab" "backupPath" ] [ "services" "gitlab" "backup" "path" ])
|
(mkRenamedOptionModule [ "services" "gitlab" "backupPath" ] [ "services" "gitlab" "backup" "path" ])
|
||||||
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
|
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
|
||||||
(mkRemovedOptionModule [ "services" "gitlab" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.gitlab directly instead")
|
(mkRemovedOptionModule [ "services" "gitlab" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.gitlab directly instead")
|
||||||
|
(mkRemovedOptionModule [ "services" "gitlab" "pagesExtraArgs" ] "Use services.gitlab.pages.settings instead")
|
||||||
];
|
];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
@ -667,10 +669,127 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
pagesExtraArgs = mkOption {
|
pages.enable = mkEnableOption (lib.mdDoc "the GitLab Pages service");
|
||||||
type = types.listOf types.str;
|
|
||||||
default = [ "-listen-proxy" "127.0.0.1:8090" ];
|
pages.settings = mkOption {
|
||||||
description = lib.mdDoc "Arguments to pass to the gitlab-pages daemon";
|
example = literalExpression ''
|
||||||
|
{
|
||||||
|
pages-domain = "example.com";
|
||||||
|
auth-client-id = "generated-id-xxxxxxx";
|
||||||
|
auth-client-secret = { _secret = "/var/keys/auth-client-secret"; };
|
||||||
|
auth-redirect-uri = "https://projects.example.com/auth";
|
||||||
|
auth-secret = { _secret = "/var/keys/auth-secret"; };
|
||||||
|
auth-server = "https://gitlab.example.com";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Configuration options to set in the GitLab Pages config
|
||||||
|
file.
|
||||||
|
|
||||||
|
Options containing secret data should be set to an attribute
|
||||||
|
set containing the attribute `_secret` - a string pointing
|
||||||
|
to a file containing the value the option should be set
|
||||||
|
to. See the example to get a better picture of this: in the
|
||||||
|
resulting configuration file, the `auth-client-secret` and
|
||||||
|
`auth-secret` keys will be set to the contents of the
|
||||||
|
{file}`/var/keys/auth-client-secret` and
|
||||||
|
{file}`/var/keys/auth-secret` files respectively.
|
||||||
|
'';
|
||||||
|
|
||||||
|
type = types.submodule {
|
||||||
|
freeformType = with types; attrsOf (nullOr (oneOf [ str int bool attrs ]));
|
||||||
|
|
||||||
|
options = {
|
||||||
|
listen-http = mkOption {
|
||||||
|
type = with types; listOf str;
|
||||||
|
apply = x: if x == [] then null else lib.concatStringsSep "," x;
|
||||||
|
default = [];
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The address(es) to listen on for HTTP requests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
listen-https = mkOption {
|
||||||
|
type = with types; listOf str;
|
||||||
|
apply = x: if x == [] then null else lib.concatStringsSep "," x;
|
||||||
|
default = [];
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The address(es) to listen on for HTTPS requests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
listen-proxy = mkOption {
|
||||||
|
type = with types; listOf str;
|
||||||
|
apply = x: if x == [] then null else lib.concatStringsSep "," x;
|
||||||
|
default = [ "127.0.0.1:8090" ];
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The address(es) to listen on for proxy requests.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
artifacts-server = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
default = "http${optionalString cfg.https "s"}://${cfg.host}/api/v4";
|
||||||
|
defaultText = "http(s)://<services.gitlab.host>/api/v4";
|
||||||
|
example = "https://gitlab.example.com/api/v4";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
API URL to proxy artifact requests to.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
gitlab-server = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
default = "http${optionalString cfg.https "s"}://${cfg.host}";
|
||||||
|
defaultText = "http(s)://<services.gitlab.host>";
|
||||||
|
example = "https://gitlab.example.com";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Public GitLab server URL.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
internal-gitlab-server = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
default = null;
|
||||||
|
defaultText = "http(s)://<services.gitlab.host>";
|
||||||
|
example = "https://gitlab.example.internal";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Internal GitLab server used for API requests, useful
|
||||||
|
if you want to send that traffic over an internal load
|
||||||
|
balancer. By default, the value of
|
||||||
|
`services.gitlab.pages.settings.gitlab-server` is
|
||||||
|
used.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
api-secret-key = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
default = "${cfg.statePath}/gitlab_pages_secret";
|
||||||
|
internal = true;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
File with secret key used to authenticate with the
|
||||||
|
GitLab API.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
pages-domain = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
example = "example.com";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The domain to serve static pages on.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
pages-root = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "${gitlabConfig.production.shared.path}/pages";
|
||||||
|
defaultText = literalExpression ''config.${opt.extraConfig}.production.shared.path + "/pages"'';
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The directory where pages are stored.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
secrets.secretFile = mkOption {
|
secrets.secretFile = mkOption {
|
||||||
|
@ -1210,6 +1329,9 @@ in {
|
||||||
umask u=rwx,g=,o=
|
umask u=rwx,g=,o=
|
||||||
|
|
||||||
openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
|
openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
|
||||||
|
${optionalString cfg.pages.enable ''
|
||||||
|
openssl rand -base64 32 > ${cfg.pages.settings.api-secret-key}
|
||||||
|
''}
|
||||||
|
|
||||||
rm -f '${cfg.statePath}/config/database.yml'
|
rm -f '${cfg.statePath}/config/database.yml'
|
||||||
|
|
||||||
|
@ -1359,28 +1481,66 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.gitlab-pages = mkIf (gitlabConfig.production.pages.enabled or false) {
|
services.gitlab.pages.settings = {
|
||||||
description = "GitLab static pages daemon";
|
api-secret-key = "${cfg.statePath}/gitlab_pages_secret";
|
||||||
after = [ "network.target" "gitlab-config.service" ];
|
|
||||||
bindsTo = [ "gitlab-config.service" ];
|
|
||||||
wantedBy = [ "gitlab.target" ];
|
|
||||||
partOf = [ "gitlab.target" ];
|
|
||||||
|
|
||||||
path = [ pkgs.unzip ];
|
|
||||||
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "simple";
|
|
||||||
TimeoutSec = "infinity";
|
|
||||||
Restart = "on-failure";
|
|
||||||
|
|
||||||
User = cfg.user;
|
|
||||||
Group = cfg.group;
|
|
||||||
|
|
||||||
ExecStart = "${cfg.packages.pages}/bin/gitlab-pages ${escapeShellArgs pagesArgs}";
|
|
||||||
WorkingDirectory = gitlabEnv.HOME;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
systemd.services.gitlab-pages =
|
||||||
|
let
|
||||||
|
filteredConfig = filterAttrs (_: v: v != null) cfg.pages.settings;
|
||||||
|
isSecret = v: isAttrs v && v ? _secret && isString v._secret;
|
||||||
|
mkPagesKeyValue = lib.generators.toKeyValue {
|
||||||
|
mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" rec {
|
||||||
|
mkValueString = v:
|
||||||
|
if isInt v then toString v
|
||||||
|
else if isString v then v
|
||||||
|
else if true == v then "true"
|
||||||
|
else if false == v then "false"
|
||||||
|
else if isSecret v then builtins.hashString "sha256" v._secret
|
||||||
|
else throw "unsupported type ${builtins.typeOf v}: ${(lib.generators.toPretty {}) v}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
secretPaths = lib.catAttrs "_secret" (lib.collect isSecret filteredConfig);
|
||||||
|
mkSecretReplacement = file: ''
|
||||||
|
replace-secret ${lib.escapeShellArgs [ (builtins.hashString "sha256" file) file "/run/gitlab-pages/gitlab-pages.conf" ]}
|
||||||
|
'';
|
||||||
|
secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
|
||||||
|
configFile = pkgs.writeText "gitlab-pages.conf" (mkPagesKeyValue filteredConfig);
|
||||||
|
in
|
||||||
|
mkIf cfg.pages.enable {
|
||||||
|
description = "GitLab static pages daemon";
|
||||||
|
after = [ "network.target" "gitlab-config.service" "gitlab.service" ];
|
||||||
|
bindsTo = [ "gitlab-config.service" "gitlab.service" ];
|
||||||
|
wantedBy = [ "gitlab.target" ];
|
||||||
|
partOf = [ "gitlab.target" ];
|
||||||
|
|
||||||
|
path = with pkgs; [
|
||||||
|
unzip
|
||||||
|
replace-secret
|
||||||
|
];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
TimeoutSec = "infinity";
|
||||||
|
Restart = "on-failure";
|
||||||
|
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
|
||||||
|
ExecStartPre = pkgs.writeShellScript "gitlab-pages-pre-start" ''
|
||||||
|
set -o errexit -o pipefail -o nounset
|
||||||
|
shopt -s dotglob nullglob inherit_errexit
|
||||||
|
|
||||||
|
install -m u=rw ${configFile} /run/gitlab-pages/gitlab-pages.conf
|
||||||
|
${secretReplacements}
|
||||||
|
'';
|
||||||
|
ExecStart = "${cfg.packages.pages}/bin/gitlab-pages -config=/run/gitlab-pages/gitlab-pages.conf";
|
||||||
|
WorkingDirectory = gitlabEnv.HOME;
|
||||||
|
RuntimeDirectory = "gitlab-pages";
|
||||||
|
RuntimeDirectoryMode = "0700";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
systemd.services.gitlab-workhorse = {
|
systemd.services.gitlab-workhorse = {
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
wantedBy = [ "gitlab.target" ];
|
wantedBy = [ "gitlab.target" ];
|
||||||
|
|
62
third_party/nixpkgs/nixos/modules/services/misc/jellyseerr.nix
vendored
Normal file
62
third_party/nixpkgs/nixos/modules/services/misc/jellyseerr.nix
vendored
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
|
cfg = config.services.jellyseerr;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
meta.maintainers = [ maintainers.camillemndn ];
|
||||||
|
|
||||||
|
options.services.jellyseerr = {
|
||||||
|
enable = mkEnableOption (mdDoc ''Jellyseerr, a requests manager for Jellyfin'');
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = mdDoc ''Open port in the firewall for the Jellyseerr web interface.'';
|
||||||
|
};
|
||||||
|
|
||||||
|
port = mkOption {
|
||||||
|
type = types.port;
|
||||||
|
default = 5055;
|
||||||
|
description = mdDoc ''The port which the Jellyseerr web UI should listen to.'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.jellyseerr = {
|
||||||
|
description = "Jellyseerr, a requests manager for Jellyfin";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
environment.PORT = toString cfg.port;
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "exec";
|
||||||
|
StateDirectory = "jellyseerr";
|
||||||
|
WorkingDirectory = "${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr";
|
||||||
|
DynamicUser = true;
|
||||||
|
ExecStart = "${pkgs.jellyseerr}/bin/jellyseerr";
|
||||||
|
BindPaths = [ "/var/lib/jellyseerr/:${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr/config/" ];
|
||||||
|
Restart = "on-failure";
|
||||||
|
ProtectHome = true;
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
RemoveIPC = true;
|
||||||
|
PrivateMounts = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
|
allowedTCPPorts = [ cfg.port ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
88
third_party/nixpkgs/nixos/modules/services/misc/readarr.nix
vendored
Normal file
88
third_party/nixpkgs/nixos/modules/services/misc/readarr.nix
vendored
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.readarr;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
services.readarr = {
|
||||||
|
enable = mkEnableOption (lib.mdDoc "Readarr");
|
||||||
|
|
||||||
|
dataDir = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "/var/lib/readarr/";
|
||||||
|
description = lib.mdDoc "The directory where Readarr stores its data files.";
|
||||||
|
};
|
||||||
|
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
default = pkgs.readarr;
|
||||||
|
defaultText = literalExpression "pkgs.readarr";
|
||||||
|
description = lib.mdDoc "The Readarr package to use";
|
||||||
|
};
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Open ports in the firewall for Readarr
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "readarr";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
User account under which Readarr runs.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "readarr";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Group under which Readarr runs.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.tmpfiles.rules = [
|
||||||
|
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.readarr = {
|
||||||
|
description = "Readarr";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
ExecStart = "${cfg.package}/bin/Readarr -nobrowser -data='${cfg.dataDir}'";
|
||||||
|
Restart = "on-failure";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
|
allowedTCPPorts = [ 8787 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users = mkIf (cfg.user == "readarr") {
|
||||||
|
readarr = {
|
||||||
|
description = "Readarr service";
|
||||||
|
home = cfg.dataDir;
|
||||||
|
group = cfg.group;
|
||||||
|
isSystemUser = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups = mkIf (cfg.group == "readarr") {
|
||||||
|
readarr = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -283,7 +283,8 @@ in {
|
||||||
phpfpm = lib.mkIf useNginx {
|
phpfpm = lib.mkIf useNginx {
|
||||||
pools.zoneminder = {
|
pools.zoneminder = {
|
||||||
inherit user group;
|
inherit user group;
|
||||||
phpPackage = pkgs.php.withExtensions ({ enabled, all }: enabled ++ [ all.apcu ]);
|
phpPackage = pkgs.php.withExtensions (
|
||||||
|
{ enabled, all }: enabled ++ [ all.apcu all.sysvsem ]);
|
||||||
phpOptions = ''
|
phpOptions = ''
|
||||||
date.timezone = "${config.time.timeZone}"
|
date.timezone = "${config.time.timeZone}"
|
||||||
'';
|
'';
|
||||||
|
@ -326,6 +327,15 @@ in {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${zoneminder}/bin/zmupdate.pl -nointeractive
|
${zoneminder}/bin/zmupdate.pl -nointeractive
|
||||||
|
${zoneminder}/bin/zmupdate.pl --nointeractive -f
|
||||||
|
|
||||||
|
# Update ZM's Nix store path in the configuration table. Do nothing if the config doesn't
|
||||||
|
# contain ZM's Nix store path.
|
||||||
|
${config.services.mysql.package}/bin/mysql -u zoneminder zm << EOF
|
||||||
|
UPDATE Config
|
||||||
|
SET Value = REGEXP_REPLACE(Value, "^/nix/store/[^-/]+-zoneminder-[^/]+", "${pkgs.zoneminder}")
|
||||||
|
WHERE Name = "ZM_FONT_FILE_LOCATION";
|
||||||
|
EOF
|
||||||
'';
|
'';
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
User = user;
|
User = user;
|
||||||
|
|
|
@ -1300,7 +1300,7 @@ in {
|
||||||
SystemCallFilter = [
|
SystemCallFilter = [
|
||||||
"@system-service"
|
"@system-service"
|
||||||
"~@privileged"
|
"~@privileged"
|
||||||
] ++ lib.optional (cfg.settings.server.protocol == "socket") [ "@chown" ];
|
] ++ lib.optionals (cfg.settings.server.protocol == "socket") [ "@chown" ];
|
||||||
UMask = "0027";
|
UMask = "0027";
|
||||||
};
|
};
|
||||||
preStart = ''
|
preStart = ''
|
||||||
|
|
|
@ -31,7 +31,7 @@ let
|
||||||
if checkConfigEnabled then
|
if checkConfigEnabled then
|
||||||
pkgs.runCommandLocal
|
pkgs.runCommandLocal
|
||||||
"${name}-${replaceStrings [" "] [""] what}-checked"
|
"${name}-${replaceStrings [" "] [""] what}-checked"
|
||||||
{ buildInputs = [ cfg.package ]; } ''
|
{ buildInputs = [ cfg.package.cli ]; } ''
|
||||||
ln -s ${file} $out
|
ln -s ${file} $out
|
||||||
promtool ${what} $out
|
promtool ${what} $out
|
||||||
'' else file;
|
'' else file;
|
||||||
|
@ -1408,7 +1408,7 @@ let
|
||||||
'';
|
'';
|
||||||
|
|
||||||
action =
|
action =
|
||||||
mkDefOpt (types.enum [ "replace" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
|
mkDefOpt (types.enum [ "replace" "lowercase" "uppercase" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
|
||||||
Action to perform based on regex matching.
|
Action to perform based on regex matching.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
203
third_party/nixpkgs/nixos/modules/services/networking/cgit.nix
vendored
Normal file
203
third_party/nixpkgs/nixos/modules/services/networking/cgit.nix
vendored
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
{ config, lib, pkgs, ...}:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfgs = config.services.cgit;
|
||||||
|
|
||||||
|
settingType = with types; oneOf [ bool int str ];
|
||||||
|
|
||||||
|
genAttrs' = names: f: listToAttrs (map f names);
|
||||||
|
|
||||||
|
regexEscape =
|
||||||
|
let
|
||||||
|
# taken from https://github.com/python/cpython/blob/05cb728d68a278d11466f9a6c8258d914135c96c/Lib/re.py#L251-L266
|
||||||
|
special = [
|
||||||
|
"(" ")" "[" "]" "{" "}" "?" "*" "+" "-" "|" "^" "$" "\\" "." "&" "~"
|
||||||
|
"#" " " "\t" "\n" "\r" "\v" "\f"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
replaceStrings special (map (c: "\\${c}") special);
|
||||||
|
|
||||||
|
stripLocation = cfg: removeSuffix "/" cfg.nginx.location;
|
||||||
|
|
||||||
|
regexLocation = cfg: regexEscape (stripLocation cfg);
|
||||||
|
|
||||||
|
mkFastcgiPass = cfg: ''
|
||||||
|
${if cfg.nginx.location == "/" then ''
|
||||||
|
fastcgi_param PATH_INFO $uri;
|
||||||
|
'' else ''
|
||||||
|
fastcgi_split_path_info ^(${regexLocation cfg})(/.+)$;
|
||||||
|
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||||
|
''
|
||||||
|
}fastcgi_pass unix:${config.services.fcgiwrap.socketAddress};
|
||||||
|
'';
|
||||||
|
|
||||||
|
cgitrcLine = name: value: "${name}=${
|
||||||
|
if value == true then
|
||||||
|
"1"
|
||||||
|
else if value == false then
|
||||||
|
"0"
|
||||||
|
else
|
||||||
|
toString value
|
||||||
|
}";
|
||||||
|
|
||||||
|
mkCgitrc = cfg: pkgs.writeText "cgitrc" ''
|
||||||
|
# global settings
|
||||||
|
${concatStringsSep "\n" (
|
||||||
|
mapAttrsToList
|
||||||
|
cgitrcLine
|
||||||
|
({ virtual-root = cfg.nginx.location; } // cfg.settings)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
${optionalString (cfg.scanPath != null) (cgitrcLine "scan-path" cfg.scanPath)}
|
||||||
|
|
||||||
|
# repository settings
|
||||||
|
${concatStrings (
|
||||||
|
mapAttrsToList
|
||||||
|
(url: settings: ''
|
||||||
|
${cgitrcLine "repo.url" url}
|
||||||
|
${concatStringsSep "\n" (
|
||||||
|
mapAttrsToList (name: cgitrcLine "repo.${name}") settings
|
||||||
|
)
|
||||||
|
}
|
||||||
|
'')
|
||||||
|
cfg.repos
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
# extra config
|
||||||
|
${cfg.extraConfig}
|
||||||
|
'';
|
||||||
|
|
||||||
|
mkCgitReposDir = cfg:
|
||||||
|
if cfg.scanPath != null then
|
||||||
|
cfg.scanPath
|
||||||
|
else
|
||||||
|
pkgs.runCommand "cgit-repos" {
|
||||||
|
preferLocalBuild = true;
|
||||||
|
allowSubstitutes = false;
|
||||||
|
} ''
|
||||||
|
mkdir -p "$out"
|
||||||
|
${
|
||||||
|
concatStrings (
|
||||||
|
mapAttrsToList
|
||||||
|
(name: value: ''
|
||||||
|
ln -s ${escapeShellArg value.path} "$out"/${escapeShellArg name}
|
||||||
|
'')
|
||||||
|
cfg.repos
|
||||||
|
)
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
services.cgit = mkOption {
|
||||||
|
description = mdDoc "Configure cgit instances.";
|
||||||
|
default = {};
|
||||||
|
type = types.attrsOf (types.submodule ({ config, ... }: {
|
||||||
|
options = {
|
||||||
|
enable = mkEnableOption (mdDoc "cgit");
|
||||||
|
|
||||||
|
package = mkPackageOptionMD pkgs "cgit" {};
|
||||||
|
|
||||||
|
nginx.virtualHost = mkOption {
|
||||||
|
description = mdDoc "VirtualHost to serve cgit on, defaults to the attribute name.";
|
||||||
|
type = types.str;
|
||||||
|
default = config._module.args.name;
|
||||||
|
example = "git.example.com";
|
||||||
|
};
|
||||||
|
|
||||||
|
nginx.location = mkOption {
|
||||||
|
description = mdDoc "Location to serve cgit under.";
|
||||||
|
type = types.str;
|
||||||
|
default = "/";
|
||||||
|
example = "/git/";
|
||||||
|
};
|
||||||
|
|
||||||
|
repos = mkOption {
|
||||||
|
description = mdDoc "cgit repository settings, see cgitrc(5)";
|
||||||
|
type = with types; attrsOf (attrsOf settingType);
|
||||||
|
default = {};
|
||||||
|
example = {
|
||||||
|
blah = {
|
||||||
|
path = "/var/lib/git/example";
|
||||||
|
desc = "An example repository";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
scanPath = mkOption {
|
||||||
|
description = mdDoc "A path which will be scanned for repositories.";
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
default = null;
|
||||||
|
example = "/var/lib/git";
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = mkOption {
|
||||||
|
description = mdDoc "cgit configuration, see cgitrc(5)";
|
||||||
|
type = types.attrsOf settingType;
|
||||||
|
default = {};
|
||||||
|
example = literalExpression ''
|
||||||
|
{
|
||||||
|
enable-follow-links = true;
|
||||||
|
source-filter = "''${pkgs.cgit}/lib/cgit/filters/syntax-highlighting.py";
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
extraConfig = mkOption {
|
||||||
|
description = mdDoc "These lines go to the end of cgitrc verbatim.";
|
||||||
|
type = types.lines;
|
||||||
|
default = "";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}));
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf (any (cfg: cfg.enable) (attrValues cfgs)) {
|
||||||
|
assertions = mapAttrsToList (vhost: cfg: {
|
||||||
|
assertion = !cfg.enable || (cfg.scanPath == null) != (cfg.repos == {});
|
||||||
|
message = "Exactly one of services.cgit.${vhost}.scanPath or services.cgit.${vhost}.repos must be set.";
|
||||||
|
}) cfgs;
|
||||||
|
|
||||||
|
services.fcgiwrap.enable = true;
|
||||||
|
|
||||||
|
services.nginx.enable = true;
|
||||||
|
|
||||||
|
services.nginx.virtualHosts = mkMerge (mapAttrsToList (_: cfg: {
|
||||||
|
${cfg.nginx.virtualHost} = {
|
||||||
|
locations = (
|
||||||
|
genAttrs'
|
||||||
|
[ "cgit.css" "cgit.png" "favicon.ico" "robots.txt" ]
|
||||||
|
(name: nameValuePair "= ${stripLocation cfg}/${name}" {
|
||||||
|
extraConfig = ''
|
||||||
|
alias ${cfg.package}/cgit/${name};
|
||||||
|
'';
|
||||||
|
})
|
||||||
|
) // {
|
||||||
|
"~ ${regexLocation cfg}/.+/(info/refs|git-upload-pack)" = {
|
||||||
|
fastcgiParams = rec {
|
||||||
|
SCRIPT_FILENAME = "${pkgs.git}/libexec/git-core/git-http-backend";
|
||||||
|
GIT_HTTP_EXPORT_ALL = "1";
|
||||||
|
GIT_PROJECT_ROOT = mkCgitReposDir cfg;
|
||||||
|
HOME = GIT_PROJECT_ROOT;
|
||||||
|
};
|
||||||
|
extraConfig = mkFastcgiPass cfg;
|
||||||
|
};
|
||||||
|
"${stripLocation cfg}/" = {
|
||||||
|
fastcgiParams = {
|
||||||
|
SCRIPT_FILENAME = "${cfg.package}/cgit/cgit.cgi";
|
||||||
|
QUERY_STRING = "$args";
|
||||||
|
HTTP_HOST = "$server_name";
|
||||||
|
CGIT_CONFIG = mkCgitrc cfg;
|
||||||
|
};
|
||||||
|
extraConfig = mkFastcgiPass cfg;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}) cfgs);
|
||||||
|
};
|
||||||
|
}
|
|
@ -3,8 +3,11 @@
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
||||||
cfg = config.services.networkd-dispatcher;
|
cfg = config.services.networkd-dispatcher;
|
||||||
|
|
||||||
in {
|
in {
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
services.networkd-dispatcher = {
|
services.networkd-dispatcher = {
|
||||||
|
|
||||||
|
@ -14,14 +17,49 @@ in {
|
||||||
for usage.
|
for usage.
|
||||||
'');
|
'');
|
||||||
|
|
||||||
scriptDir = mkOption {
|
rules = mkOption {
|
||||||
type = types.path;
|
default = {};
|
||||||
default = "/var/lib/networkd-dispatcher";
|
example = lib.literalExpression ''
|
||||||
description = mdDoc ''
|
{ "restart-tor" = {
|
||||||
This directory is used for keeping various scripts read and run by
|
onState = ["routable" "off"];
|
||||||
networkd-dispatcher. See [https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
|
script = '''
|
||||||
for directory structure and script usage.
|
#!''${pkgs.runtimeShell}
|
||||||
|
if [[ $IFACE == "wlan0" && $AdministrativeState == "configured" ]]; then
|
||||||
|
echo "Restarting Tor ..."
|
||||||
|
systemctl restart tor
|
||||||
|
fi
|
||||||
|
exit 0
|
||||||
|
''';
|
||||||
|
};
|
||||||
|
};
|
||||||
'';
|
'';
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Declarative configuration of networkd-dispatcher rules. See
|
||||||
|
[https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
|
||||||
|
for an introduction and example scripts.
|
||||||
|
'';
|
||||||
|
type = types.attrsOf (types.submodule {
|
||||||
|
options = {
|
||||||
|
onState = mkOption {
|
||||||
|
type = types.listOf (types.enum [
|
||||||
|
"routable" "dormant" "no-carrier" "off" "carrier" "degraded"
|
||||||
|
"configuring" "configured"
|
||||||
|
]);
|
||||||
|
default = null;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
List of names of the systemd-networkd operational states which
|
||||||
|
should trigger the script. See <https://www.freedesktop.org/software/systemd/man/networkctl.html>
|
||||||
|
for a description of the specific state type.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
script = mkOption {
|
||||||
|
type = types.lines;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Shell commands executed on specified operational states.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -30,34 +68,31 @@ in {
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
systemd = {
|
systemd = {
|
||||||
|
|
||||||
packages = [ pkgs.networkd-dispatcher ];
|
packages = [ pkgs.networkd-dispatcher ];
|
||||||
services.networkd-dispatcher = {
|
services.networkd-dispatcher = {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
# Override existing ExecStart definition
|
# Override existing ExecStart definition
|
||||||
serviceConfig.ExecStart = [
|
serviceConfig.ExecStart = let
|
||||||
|
scriptDir = pkgs.symlinkJoin {
|
||||||
|
name = "networkd-dispatcher-script-dir";
|
||||||
|
paths = lib.mapAttrsToList (name: cfg:
|
||||||
|
(map(state:
|
||||||
|
pkgs.writeTextFile {
|
||||||
|
inherit name;
|
||||||
|
text = cfg.script;
|
||||||
|
destination = "/${state}.d/${name}";
|
||||||
|
executable = true;
|
||||||
|
}
|
||||||
|
) cfg.onState)
|
||||||
|
) cfg.rules;
|
||||||
|
};
|
||||||
|
in [
|
||||||
""
|
""
|
||||||
"${pkgs.networkd-dispatcher}/bin/networkd-dispatcher -v --script-dir ${cfg.scriptDir} $networkd_dispatcher_args"
|
"${pkgs.networkd-dispatcher}/bin/networkd-dispatcher -v --script-dir ${scriptDir} $networkd_dispatcher_args"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
# Directory structure required according to upstream instructions
|
|
||||||
# https://gitlab.com/craftyguy/networkd-dispatcher
|
|
||||||
tmpfiles.rules = [
|
|
||||||
"d '${cfg.scriptDir}' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/routable.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/dormant.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/no-carrier.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/off.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/carrier.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/degraded.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/configuring.d' 0750 root root - -"
|
|
||||||
"d '${cfg.scriptDir}/configured.d' 0750 root root - -"
|
|
||||||
];
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -203,7 +203,7 @@ in
|
||||||
PrivateMounts = true;
|
PrivateMounts = true;
|
||||||
# System Call Filtering
|
# System Call Filtering
|
||||||
SystemCallArchitectures = "native";
|
SystemCallArchitectures = "native";
|
||||||
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "chown" ] ++ lib.optional pkgs.stdenv.hostPlatform.isAarch64 "fchownat";
|
SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "@chown" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -25,7 +25,7 @@ in
|
||||||
|
|
||||||
compressionLevel = mkOption {
|
compressionLevel = mkOption {
|
||||||
type = types.nullOr types.int;
|
type = types.nullOr types.int;
|
||||||
description = lib.mdDoc "The compression level for XZ compression (between 0 and 9)";
|
description = lib.mdDoc "The compression level for ZSTD compression (between 0 and 16)";
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
217
third_party/nixpkgs/nixos/modules/services/web-apps/coder.nix
vendored
Normal file
217
third_party/nixpkgs/nixos/modules/services/web-apps/coder.nix
vendored
Normal file
|
@ -0,0 +1,217 @@
|
||||||
|
{ config, lib, options, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.coder;
|
||||||
|
name = "coder";
|
||||||
|
in {
|
||||||
|
options = {
|
||||||
|
services.coder = {
|
||||||
|
enable = mkEnableOption (lib.mdDoc "Coder service");
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "coder";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
User under which the coder service runs.
|
||||||
|
|
||||||
|
::: {.note}
|
||||||
|
If left as the default value this user will automatically be created
|
||||||
|
on system activation, otherwise it needs to be configured manually.
|
||||||
|
:::
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "coder";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Group under which the coder service runs.
|
||||||
|
|
||||||
|
::: {.note}
|
||||||
|
If left as the default value this group will automatically be created
|
||||||
|
on system activation, otherwise it needs to be configured manually.
|
||||||
|
:::
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
default = pkgs.coder;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Package to use for the service.
|
||||||
|
'';
|
||||||
|
defaultText = literalExpression "pkgs.coder";
|
||||||
|
};
|
||||||
|
|
||||||
|
homeDir = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Home directory for coder user.
|
||||||
|
'';
|
||||||
|
default = "/var/lib/coder";
|
||||||
|
};
|
||||||
|
|
||||||
|
listenAddress = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Listen address.
|
||||||
|
'';
|
||||||
|
default = "127.0.0.1:3000";
|
||||||
|
};
|
||||||
|
|
||||||
|
accessUrl = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Access URL should be a external IP address or domain with DNS records pointing to Coder.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
example = "https://coder.example.com";
|
||||||
|
};
|
||||||
|
|
||||||
|
wildcardAccessUrl = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
If you are providing TLS certificates directly to the Coder server, you must use a single certificate for the root and wildcard domains.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
example = "*.coder.example.com";
|
||||||
|
};
|
||||||
|
|
||||||
|
database = {
|
||||||
|
createLocally = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = true;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Create the database and database user locally.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
host = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "/run/postgresql";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Hostname hosting the database.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
database = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "coder";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Name of database.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
username = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "coder";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Username for accessing the database.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
password = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Password for accessing the database.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
sslmode = mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = "disable";
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Password for accessing the database.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
tlsCert = mkOption {
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The path to the TLS certificate.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
tlsKey = mkOption {
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
The path to the TLS key.
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
assertions = [
|
||||||
|
{ assertion = cfg.database.createLocally -> cfg.database.username == name;
|
||||||
|
message = "services.coder.database.username must be set to ${user} if services.coder.database.createLocally is set true";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.coder = {
|
||||||
|
description = "Coder - Self-hosted developer workspaces on your infra";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
environment = {
|
||||||
|
CODER_ACCESS_URL = cfg.accessUrl;
|
||||||
|
CODER_WILDCARD_ACCESS_URL = cfg.wildcardAccessUrl;
|
||||||
|
CODER_PG_CONNECTION_URL = "user=${cfg.database.username} ${optionalString (cfg.database.password != null) "password=${cfg.database.password}"} database=${cfg.database.database} host=${cfg.database.host} ${optionalString (cfg.database.sslmode != null) "sslmode=${cfg.database.sslmode}"}";
|
||||||
|
CODER_ADDRESS = cfg.listenAddress;
|
||||||
|
CODER_TLS_ENABLE = optionalString (cfg.tlsCert != null) "1";
|
||||||
|
CODER_TLS_CERT_FILE = cfg.tlsCert;
|
||||||
|
CODER_TLS_KEY_FILE = cfg.tlsKey;
|
||||||
|
};
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
ProtectSystem = "full";
|
||||||
|
PrivateTmp = "yes";
|
||||||
|
PrivateDevices = "yes";
|
||||||
|
SecureBits = "keep-caps";
|
||||||
|
AmbientCapabilities = "CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
|
||||||
|
CacheDirectory = "coder";
|
||||||
|
CapabilityBoundingSet = "CAP_SYSLOG CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
|
||||||
|
KillSignal = "SIGINT";
|
||||||
|
KillMode = "mixed";
|
||||||
|
NoNewPrivileges = "yes";
|
||||||
|
Restart = "on-failure";
|
||||||
|
ExecStart = "${cfg.package}/bin/coder server";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.postgresql = lib.mkIf cfg.database.createLocally {
|
||||||
|
enable = true;
|
||||||
|
ensureDatabases = [
|
||||||
|
cfg.database.database
|
||||||
|
];
|
||||||
|
ensureUsers = [{
|
||||||
|
name = cfg.database.username;
|
||||||
|
ensurePermissions = {
|
||||||
|
"DATABASE \"${cfg.database.database}\"" = "ALL PRIVILEGES";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups = optionalAttrs (cfg.group == name) {
|
||||||
|
"${cfg.group}" = {};
|
||||||
|
};
|
||||||
|
users.users = optionalAttrs (cfg.user == name) {
|
||||||
|
${name} = {
|
||||||
|
description = "Coder service user";
|
||||||
|
group = cfg.group;
|
||||||
|
home = cfg.homeDir;
|
||||||
|
createHome = true;
|
||||||
|
isSystemUser = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -173,7 +173,7 @@ in
|
||||||
systemd.services.phosh = {
|
systemd.services.phosh = {
|
||||||
wantedBy = [ "graphical.target" ];
|
wantedBy = [ "graphical.target" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${cfg.package}/bin/phosh";
|
ExecStart = "${cfg.package}/bin/phosh-session";
|
||||||
User = cfg.user;
|
User = cfg.user;
|
||||||
Group = cfg.group;
|
Group = cfg.group;
|
||||||
PAMName = "login";
|
PAMName = "login";
|
||||||
|
|
|
@ -448,6 +448,7 @@ in
|
||||||
kio-extras
|
kio-extras
|
||||||
];
|
];
|
||||||
optionalPackages = [
|
optionalPackages = [
|
||||||
|
ark
|
||||||
elisa
|
elisa
|
||||||
gwenview
|
gwenview
|
||||||
okular
|
okular
|
||||||
|
|
23
third_party/nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix
vendored
Normal file
23
third_party/nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.xserver.windowManager.nimdow;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
services.xserver.windowManager.nimdow.enable = mkEnableOption (lib.mdDoc "nimdow");
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.xserver.windowManager.session = singleton {
|
||||||
|
name = "nimdow";
|
||||||
|
start = ''
|
||||||
|
${pkgs.nimdow}/bin/nimdow &
|
||||||
|
waitPID=$!
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
environment.systemPackages = [ pkgs.nimdow ];
|
||||||
|
};
|
||||||
|
}
|
|
@ -592,7 +592,8 @@ in
|
||||||
|| dmConf.sddm.enable
|
|| dmConf.sddm.enable
|
||||||
|| dmConf.xpra.enable
|
|| dmConf.xpra.enable
|
||||||
|| dmConf.sx.enable
|
|| dmConf.sx.enable
|
||||||
|| dmConf.startx.enable);
|
|| dmConf.startx.enable
|
||||||
|
|| config.services.greetd.enable);
|
||||||
in mkIf (default) (mkDefault true);
|
in mkIf (default) (mkDefault true);
|
||||||
|
|
||||||
# so that the service won't be enabled when only startx is used
|
# so that the service won't be enabled when only startx is used
|
||||||
|
|
|
@ -130,6 +130,13 @@ let
|
||||||
pkgs.replaceDependency { inherit oldDependency newDependency drv; }
|
pkgs.replaceDependency { inherit oldDependency newDependency drv; }
|
||||||
) baseSystemAssertWarn config.system.replaceRuntimeDependencies;
|
) baseSystemAssertWarn config.system.replaceRuntimeDependencies;
|
||||||
|
|
||||||
|
systemWithBuildDeps = system.overrideAttrs (o: {
|
||||||
|
systemBuildClosure = pkgs.closureInfo { rootPaths = [ system.drvPath ]; };
|
||||||
|
buildCommand = o.buildCommand + ''
|
||||||
|
ln -sn $systemBuildClosure $out/build-closure
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -306,6 +313,27 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
system.includeBuildDependencies = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Whether to include the build closure of the whole system in
|
||||||
|
its runtime closure. This can be useful for making changes
|
||||||
|
fully offline, as it includes all sources, patches, and
|
||||||
|
intermediate outputs required to build all the derivations
|
||||||
|
that the system depends on.
|
||||||
|
|
||||||
|
Note that this includes _all_ the derivations, down from the
|
||||||
|
included applications to their sources, the compilers used to
|
||||||
|
build them, and even the bootstrap compiler used to compile
|
||||||
|
the compilers. This increases the size of the system and the
|
||||||
|
time needed to download its dependencies drastically: a
|
||||||
|
minimal configuration with no extra services enabled grows
|
||||||
|
from ~670MiB in size to 13.5GiB, and takes proportionally
|
||||||
|
longer to download.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -336,7 +364,7 @@ in
|
||||||
]; };
|
]; };
|
||||||
};
|
};
|
||||||
|
|
||||||
system.build.toplevel = system;
|
system.build.toplevel = if config.system.includeBuildDependencies then systemWithBuildDeps else system;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -134,11 +134,11 @@ let
|
||||||
mask = ''\xff\xff\xff\xff'';
|
mask = ''\xff\xff\xff\xff'';
|
||||||
};
|
};
|
||||||
x86_64-windows = {
|
x86_64-windows = {
|
||||||
magicOrExtension = ".exe";
|
magicOrExtension = "exe";
|
||||||
recognitionType = "extension";
|
recognitionType = "extension";
|
||||||
};
|
};
|
||||||
i686-windows = {
|
i686-windows = {
|
||||||
magicOrExtension = ".exe";
|
magicOrExtension = "exe";
|
||||||
recognitionType = "extension";
|
recognitionType = "extension";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -450,8 +450,9 @@ sub addEntry {
|
||||||
|
|
||||||
# Include second initrd with secrets
|
# Include second initrd with secrets
|
||||||
if (-e -x "$path/append-initrd-secrets") {
|
if (-e -x "$path/append-initrd-secrets") {
|
||||||
my $initrdName = basename($initrd);
|
# Name the initrd secrets after the system from which they're derived.
|
||||||
my $initrdSecretsPath = "$bootPath/kernels/$initrdName-secrets";
|
my $systemName = basename(Cwd::abs_path("$path"));
|
||||||
|
my $initrdSecretsPath = "$bootPath/kernels/$systemName-secrets";
|
||||||
|
|
||||||
mkpath(dirname($initrdSecretsPath), 0, 0755);
|
mkpath(dirname($initrdSecretsPath), 0, 0755);
|
||||||
my $oldUmask = umask;
|
my $oldUmask = umask;
|
||||||
|
@ -470,7 +471,7 @@ sub addEntry {
|
||||||
if (-e $initrdSecretsPathTemp && ! -z _) {
|
if (-e $initrdSecretsPathTemp && ! -z _) {
|
||||||
rename $initrdSecretsPathTemp, $initrdSecretsPath or die "failed to move initrd secrets into place: $!\n";
|
rename $initrdSecretsPathTemp, $initrdSecretsPath or die "failed to move initrd secrets into place: $!\n";
|
||||||
$copied{$initrdSecretsPath} = 1;
|
$copied{$initrdSecretsPath} = 1;
|
||||||
$initrd .= " " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$initrdName-secrets";
|
$initrd .= " " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$systemName-secrets";
|
||||||
} else {
|
} else {
|
||||||
unlink $initrdSecretsPathTemp;
|
unlink $initrdSecretsPathTemp;
|
||||||
rmdir dirname($initrdSecretsPathTemp);
|
rmdir dirname($initrdSecretsPathTemp);
|
||||||
|
|
|
@ -85,18 +85,18 @@ def copy_from_profile(profile: Optional[str], generation: int, specialisation: O
|
||||||
return efi_file_path
|
return efi_file_path
|
||||||
|
|
||||||
|
|
||||||
def describe_generation(generation_dir: str) -> str:
|
def describe_generation(profile: Optional[str], generation: int, specialisation: Optional[str]) -> str:
|
||||||
try:
|
try:
|
||||||
with open("%s/nixos-version" % generation_dir) as f:
|
with open(profile_path(profile, generation, specialisation, "nixos-version")) as f:
|
||||||
nixos_version = f.read()
|
nixos_version = f.read()
|
||||||
except IOError:
|
except IOError:
|
||||||
nixos_version = "Unknown"
|
nixos_version = "Unknown"
|
||||||
|
|
||||||
kernel_dir = os.path.dirname(os.path.realpath("%s/kernel" % generation_dir))
|
kernel_dir = os.path.dirname(profile_path(profile, generation, specialisation, "kernel"))
|
||||||
module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0]
|
module_dir = glob.glob("%s/lib/modules/*" % kernel_dir)[0]
|
||||||
kernel_version = os.path.basename(module_dir)
|
kernel_version = os.path.basename(module_dir)
|
||||||
|
|
||||||
build_time = int(os.path.getctime(generation_dir))
|
build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
|
||||||
build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
|
build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
|
||||||
|
|
||||||
description = "@distroName@ {}, Linux Kernel {}, Built on {}".format(
|
description = "@distroName@ {}, Linux Kernel {}, Built on {}".format(
|
||||||
|
@ -131,11 +131,10 @@ def write_entry(profile: Optional[str], generation: int, specialisation: Optiona
|
||||||
"or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
|
"or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
|
||||||
entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
|
entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
|
||||||
generation_conf_filename(profile, generation, specialisation))
|
generation_conf_filename(profile, generation, specialisation))
|
||||||
generation_dir = os.readlink(system_dir(profile, generation, specialisation))
|
|
||||||
tmp_path = "%s.tmp" % (entry_file)
|
tmp_path = "%s.tmp" % (entry_file)
|
||||||
kernel_params = "init=%s/init " % generation_dir
|
kernel_params = "init=%s " % profile_path(profile, generation, specialisation, "init")
|
||||||
|
|
||||||
with open("%s/kernel-params" % (generation_dir)) as params_file:
|
with open(profile_path(profile, generation, specialisation, "kernel-params")) as params_file:
|
||||||
kernel_params = kernel_params + params_file.read()
|
kernel_params = kernel_params + params_file.read()
|
||||||
with open(tmp_path, 'w') as f:
|
with open(tmp_path, 'w') as f:
|
||||||
f.write(BOOT_ENTRY.format(title=title,
|
f.write(BOOT_ENTRY.format(title=title,
|
||||||
|
@ -143,7 +142,7 @@ def write_entry(profile: Optional[str], generation: int, specialisation: Optiona
|
||||||
kernel=kernel,
|
kernel=kernel,
|
||||||
initrd=initrd,
|
initrd=initrd,
|
||||||
kernel_params=kernel_params,
|
kernel_params=kernel_params,
|
||||||
description=describe_generation(generation_dir)))
|
description=describe_generation(profile, generation, specialisation)))
|
||||||
if machine_id is not None:
|
if machine_id is not None:
|
||||||
f.write("machine-id %s\n" % machine_id)
|
f.write("machine-id %s\n" % machine_id)
|
||||||
os.rename(tmp_path, entry_file)
|
os.rename(tmp_path, entry_file)
|
||||||
|
@ -296,7 +295,7 @@ def main() -> None:
|
||||||
remove_old_entries(gens)
|
remove_old_entries(gens)
|
||||||
for gen in gens:
|
for gen in gens:
|
||||||
try:
|
try:
|
||||||
is_default = os.readlink(system_dir(*gen)) == args.default_config
|
is_default = os.path.dirname(profile_path(*gen, "init")) == args.default_config
|
||||||
write_entry(*gen, machine_id, current=is_default)
|
write_entry(*gen, machine_id, current=is_default)
|
||||||
for specialisation in get_specialisations(*gen):
|
for specialisation in get_specialisations(*gen):
|
||||||
write_entry(*specialisation, machine_id, current=is_default)
|
write_entry(*specialisation, machine_id, current=is_default)
|
||||||
|
|
|
@ -614,7 +614,7 @@ in
|
||||||
|
|
||||||
# Avoid potentially degraded system state due to
|
# Avoid potentially degraded system state due to
|
||||||
# "Userspace Out-Of-Memory (OOM) Killer was skipped because of a failed condition check (ConditionControlGroupController=v2)."
|
# "Userspace Out-Of-Memory (OOM) Killer was skipped because of a failed condition check (ConditionControlGroupController=v2)."
|
||||||
systemd.services.systemd-oomd.enable = mkIf (!cfg.enableUnifiedCgroupHierarchy) false;
|
systemd.oomd.enable = mkIf (!cfg.enableUnifiedCgroupHierarchy) false;
|
||||||
|
|
||||||
services.logrotate.settings = {
|
services.logrotate.settings = {
|
||||||
"/var/log/btmp" = mapAttrs (_: mkDefault) {
|
"/var/log/btmp" = mapAttrs (_: mkDefault) {
|
||||||
|
|
|
@ -158,6 +158,16 @@ in {
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
managerEnvironment = mkOption {
|
||||||
|
type = with types; attrsOf (nullOr (oneOf [ str path package ]));
|
||||||
|
default = {};
|
||||||
|
example = { SYSTEMD_LOG_LEVEL = "debug"; };
|
||||||
|
description = lib.mdDoc ''
|
||||||
|
Environment variables of PID 1. These variables are
|
||||||
|
*not* passed to started units.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
contents = mkOption {
|
contents = mkOption {
|
||||||
description = lib.mdDoc "Set of files that have to be linked into the initrd";
|
description = lib.mdDoc "Set of files that have to be linked into the initrd";
|
||||||
example = literalExpression ''
|
example = literalExpression ''
|
||||||
|
@ -355,8 +365,11 @@ in {
|
||||||
less = "${pkgs.less}/bin/less";
|
less = "${pkgs.less}/bin/less";
|
||||||
mount = "${cfg.package.util-linux}/bin/mount";
|
mount = "${cfg.package.util-linux}/bin/mount";
|
||||||
umount = "${cfg.package.util-linux}/bin/umount";
|
umount = "${cfg.package.util-linux}/bin/umount";
|
||||||
|
fsck = "${cfg.package.util-linux}/bin/fsck";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
managerEnvironment.PATH = "/bin:/sbin";
|
||||||
|
|
||||||
contents = {
|
contents = {
|
||||||
"/init".source = "${cfg.package}/lib/systemd/systemd";
|
"/init".source = "${cfg.package}/lib/systemd/systemd";
|
||||||
"/etc/systemd/system".source = stage1Units;
|
"/etc/systemd/system".source = stage1Units;
|
||||||
|
@ -365,6 +378,7 @@ in {
|
||||||
[Manager]
|
[Manager]
|
||||||
DefaultEnvironment=PATH=/bin:/sbin ${optionalString (isBool cfg.emergencyAccess && cfg.emergencyAccess) "SYSTEMD_SULOGIN_FORCE=1"}
|
DefaultEnvironment=PATH=/bin:/sbin ${optionalString (isBool cfg.emergencyAccess && cfg.emergencyAccess) "SYSTEMD_SULOGIN_FORCE=1"}
|
||||||
${cfg.extraConfig}
|
${cfg.extraConfig}
|
||||||
|
ManagerEnvironment=${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${lib.escapeShellArg v}") cfg.managerEnvironment)}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
"/lib/modules".source = "${modulesClosure}/lib/modules";
|
"/lib/modules".source = "${modulesClosure}/lib/modules";
|
||||||
|
@ -444,21 +458,6 @@ in {
|
||||||
(v: let n = escapeSystemdPath v.where;
|
(v: let n = escapeSystemdPath v.where;
|
||||||
in nameValuePair "${n}.automount" (automountToUnit n v)) cfg.automounts);
|
in nameValuePair "${n}.automount" (automountToUnit n v)) cfg.automounts);
|
||||||
|
|
||||||
# The unit in /run/systemd/generator shadows the unit in
|
|
||||||
# /etc/systemd/system, but will still apply drop-ins from
|
|
||||||
# /etc/systemd/system/foo.service.d/
|
|
||||||
#
|
|
||||||
# We need IgnoreOnIsolate, otherwise the Requires dependency of
|
|
||||||
# a mount unit on its makefs unit causes it to be unmounted when
|
|
||||||
# we isolate for switch-root. Use a dummy package so that
|
|
||||||
# generateUnits will generate drop-ins instead of unit files.
|
|
||||||
packages = [(pkgs.runCommand "dummy" {} ''
|
|
||||||
mkdir -p $out/etc/systemd/system
|
|
||||||
touch $out/etc/systemd/system/systemd-{makefs,growfs}@.service
|
|
||||||
'')];
|
|
||||||
services."systemd-makefs@" = lib.mkIf needMakefs { unitConfig.IgnoreOnIsolate = true; };
|
|
||||||
services."systemd-growfs@" = lib.mkIf needGrowfs { unitConfig.IgnoreOnIsolate = true; };
|
|
||||||
|
|
||||||
# make sure all the /dev nodes are set up
|
# make sure all the /dev nodes are set up
|
||||||
services.systemd-tmpfiles-setup-dev.wantedBy = ["sysinit.target"];
|
services.systemd-tmpfiles-setup-dev.wantedBy = ["sysinit.target"];
|
||||||
|
|
||||||
|
|
|
@ -140,7 +140,10 @@ let
|
||||||
else if config.fsType == "reiserfs" then "-q"
|
else if config.fsType == "reiserfs" then "-q"
|
||||||
else null;
|
else null;
|
||||||
in {
|
in {
|
||||||
options = mkIf config.autoResize [ "x-nixos.autoresize" ];
|
options = mkMerge [
|
||||||
|
(mkIf config.autoResize [ "x-nixos.autoresize" ])
|
||||||
|
(mkIf (utils.fsNeededForBoot config) [ "x-initrd.mount" ])
|
||||||
|
];
|
||||||
formatOptions = mkIf (defaultFormatOptions != null) (mkDefault defaultFormatOptions);
|
formatOptions = mkIf (defaultFormatOptions != null) (mkDefault defaultFormatOptions);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -155,27 +158,54 @@ let
|
||||||
|
|
||||||
makeFstabEntries =
|
makeFstabEntries =
|
||||||
let
|
let
|
||||||
fsToSkipCheck = [ "none" "bindfs" "btrfs" "zfs" "tmpfs" "nfs" "nfs4" "vboxsf" "glusterfs" "apfs" "9p" "cifs" "prl_fs" "vmhgfs" ];
|
fsToSkipCheck = [
|
||||||
|
"none"
|
||||||
|
"auto"
|
||||||
|
"overlay"
|
||||||
|
"iso9660"
|
||||||
|
"bindfs"
|
||||||
|
"udf"
|
||||||
|
"btrfs"
|
||||||
|
"zfs"
|
||||||
|
"tmpfs"
|
||||||
|
"bcachefs"
|
||||||
|
"nfs"
|
||||||
|
"nfs4"
|
||||||
|
"nilfs2"
|
||||||
|
"vboxsf"
|
||||||
|
"squashfs"
|
||||||
|
"glusterfs"
|
||||||
|
"apfs"
|
||||||
|
"9p"
|
||||||
|
"cifs"
|
||||||
|
"prl_fs"
|
||||||
|
"vmhgfs"
|
||||||
|
] ++ lib.optionals (!config.boot.initrd.checkJournalingFS) [
|
||||||
|
"ext3"
|
||||||
|
"ext4"
|
||||||
|
"reiserfs"
|
||||||
|
"xfs"
|
||||||
|
"jfs"
|
||||||
|
"f2fs"
|
||||||
|
];
|
||||||
isBindMount = fs: builtins.elem "bind" fs.options;
|
isBindMount = fs: builtins.elem "bind" fs.options;
|
||||||
skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck || isBindMount fs;
|
skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck || isBindMount fs;
|
||||||
# https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
|
# https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
|
||||||
escape = string: builtins.replaceStrings [ " " "\t" ] [ "\\040" "\\011" ] string;
|
escape = string: builtins.replaceStrings [ " " "\t" ] [ "\\040" "\\011" ] string;
|
||||||
in fstabFileSystems: { rootPrefix ? "", excludeChecks ? false, extraOpts ? (fs: []) }: concatMapStrings (fs:
|
in fstabFileSystems: { rootPrefix ? "", extraOpts ? (fs: []) }: concatMapStrings (fs:
|
||||||
(optionalString (isBindMount fs) (escape rootPrefix))
|
(optionalString (isBindMount fs) (escape rootPrefix))
|
||||||
+ (if fs.device != null then escape fs.device
|
+ (if fs.device != null then escape fs.device
|
||||||
else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
|
else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
|
||||||
else throw "No device specified for mount point ‘${fs.mountPoint}’.")
|
else throw "No device specified for mount point ‘${fs.mountPoint}’.")
|
||||||
+ " " + escape (rootPrefix + fs.mountPoint)
|
+ " " + escape fs.mountPoint
|
||||||
+ " " + fs.fsType
|
+ " " + fs.fsType
|
||||||
+ " " + escape (builtins.concatStringsSep "," (fs.options ++ (extraOpts fs)))
|
+ " " + escape (builtins.concatStringsSep "," (fs.options ++ (extraOpts fs)))
|
||||||
+ " " + (optionalString (!excludeChecks)
|
+ " 0 " + (if skipCheck fs then "0" else if fs.mountPoint == "/" then "1" else "2")
|
||||||
("0 " + (if skipCheck fs then "0" else if fs.mountPoint == "/" then "1" else "2")))
|
|
||||||
+ "\n"
|
+ "\n"
|
||||||
) fstabFileSystems;
|
) fstabFileSystems;
|
||||||
|
|
||||||
initrdFstab = pkgs.writeText "initrd-fstab" (makeFstabEntries (filter utils.fsNeededForBoot fileSystems) {
|
initrdFstab = pkgs.writeText "initrd-fstab" (makeFstabEntries (filter utils.fsNeededForBoot fileSystems) {
|
||||||
rootPrefix = "/sysroot";
|
rootPrefix = "/sysroot";
|
||||||
excludeChecks = true;
|
|
||||||
extraOpts = fs:
|
extraOpts = fs:
|
||||||
(optional fs.autoResize "x-systemd.growfs")
|
(optional fs.autoResize "x-systemd.growfs")
|
||||||
++ (optional fs.autoFormat "x-systemd.makefs");
|
++ (optional fs.autoFormat "x-systemd.makefs");
|
||||||
|
@ -328,7 +358,9 @@ in
|
||||||
)}
|
)}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
boot.initrd.systemd.contents."/etc/fstab".source = initrdFstab;
|
boot.initrd.systemd.storePaths = [initrdFstab];
|
||||||
|
boot.initrd.systemd.managerEnvironment.SYSTEMD_SYSROOT_FSTAB = initrdFstab;
|
||||||
|
boot.initrd.systemd.services.initrd-parse-etc.environment.SYSTEMD_SYSROOT_FSTAB = initrdFstab;
|
||||||
|
|
||||||
# Provide a target that pulls in all filesystems.
|
# Provide a target that pulls in all filesystems.
|
||||||
systemd.targets.fs =
|
systemd.targets.fs =
|
||||||
|
|
|
@ -11,7 +11,7 @@ in
|
||||||
{
|
{
|
||||||
config = mkIf (any (fs: fs == "vfat") config.boot.supportedFilesystems) {
|
config = mkIf (any (fs: fs == "vfat") config.boot.supportedFilesystems) {
|
||||||
|
|
||||||
system.fsPackages = [ pkgs.dosfstools ];
|
system.fsPackages = [ pkgs.dosfstools pkgs.mtools ];
|
||||||
|
|
||||||
boot.initrd.kernelModules = mkIf inInitrd [ "vfat" "nls_cp437" "nls_iso8859-1" ];
|
boot.initrd.kernelModules = mkIf inInitrd [ "vfat" "nls_cp437" "nls_iso8859-1" ];
|
||||||
|
|
||||||
|
|
|
@ -55,10 +55,9 @@ done
|
||||||
echo "getting EC2 instance metadata..."
|
echo "getting EC2 instance metadata..."
|
||||||
|
|
||||||
get_imds() {
|
get_imds() {
|
||||||
# Intentionally no --fail here, so that we proceed even if e.g. a
|
# --fail to avoid populating missing files with 404 HTML response body
|
||||||
# 404 was returned (but we still fail if we can't reach the IMDS
|
# || true to allow the script to continue even when encountering a 404
|
||||||
# server).
|
curl --silent --show-error --fail --header "X-aws-ec2-metadata-token: $IMDS_TOKEN" "$@" || true
|
||||||
curl --silent --show-error --header "X-aws-ec2-metadata-token: $IMDS_TOKEN" "$@"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get_imds -o "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
|
get_imds -o "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
|
||||||
|
|
|
@ -152,9 +152,11 @@ let
|
||||||
|
|
||||||
${lib.optionalString cfg.useBootLoader
|
${lib.optionalString cfg.useBootLoader
|
||||||
''
|
''
|
||||||
# Create a writable copy/snapshot of the boot disk.
|
if ${if !cfg.persistBootDevice then "true" else "! test -e $TMPDIR/disk.img"}; then
|
||||||
# A writable boot disk can be booted from automatically.
|
# Create a writable copy/snapshot of the boot disk.
|
||||||
${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${bootDisk}/disk.img "$TMPDIR/disk.img"
|
# A writable boot disk can be booted from automatically.
|
||||||
|
${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${bootDisk}/disk.img "$TMPDIR/disk.img"
|
||||||
|
fi
|
||||||
|
|
||||||
NIX_EFI_VARS=$(readlink -f "''${NIX_EFI_VARS:-${cfg.efiVars}}")
|
NIX_EFI_VARS=$(readlink -f "''${NIX_EFI_VARS:-${cfg.efiVars}}")
|
||||||
|
|
||||||
|
@ -370,6 +372,17 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
virtualisation.persistBootDevice =
|
||||||
|
mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description =
|
||||||
|
lib.mdDoc ''
|
||||||
|
If useBootLoader is specified, whether to recreate the boot device
|
||||||
|
on each instantiaton or allow it to persist.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
virtualisation.emptyDiskImages =
|
virtualisation.emptyDiskImages =
|
||||||
mkOption {
|
mkOption {
|
||||||
type = types.listOf types.ints.positive;
|
type = types.listOf types.ints.positive;
|
||||||
|
@ -853,6 +866,8 @@ in
|
||||||
# * The disks are attached in `virtualisation.qemu.drives`.
|
# * The disks are attached in `virtualisation.qemu.drives`.
|
||||||
# Their order makes them appear as devices `a`, `b`, etc.
|
# Their order makes them appear as devices `a`, `b`, etc.
|
||||||
# * `fileSystems."/boot"` is adjusted to be on device `b`.
|
# * `fileSystems."/boot"` is adjusted to be on device `b`.
|
||||||
|
# * The disk.img is recreated each time the VM is booted unless
|
||||||
|
# virtualisation.persistBootDevice is set.
|
||||||
|
|
||||||
# If `useBootLoader`, GRUB goes to the second disk, see
|
# If `useBootLoader`, GRUB goes to the second disk, see
|
||||||
# note [Disk layout with `useBootLoader`].
|
# note [Disk layout with `useBootLoader`].
|
||||||
|
@ -895,7 +910,7 @@ in
|
||||||
|
|
||||||
${optionalString cfg.writableStore ''
|
${optionalString cfg.writableStore ''
|
||||||
echo "mounting overlay filesystem on /nix/store..."
|
echo "mounting overlay filesystem on /nix/store..."
|
||||||
mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
|
mkdir -p -m 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
|
||||||
mount -t overlay overlay $targetRoot/nix/store \
|
mount -t overlay overlay $targetRoot/nix/store \
|
||||||
-o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
|
-o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
|
||||||
''}
|
''}
|
||||||
|
@ -1086,18 +1101,20 @@ in
|
||||||
what = "overlay";
|
what = "overlay";
|
||||||
type = "overlay";
|
type = "overlay";
|
||||||
options = "lowerdir=/sysroot/nix/.ro-store,upperdir=/sysroot/nix/.rw-store/store,workdir=/sysroot/nix/.rw-store/work";
|
options = "lowerdir=/sysroot/nix/.ro-store,upperdir=/sysroot/nix/.rw-store/store,workdir=/sysroot/nix/.rw-store/work";
|
||||||
wantedBy = ["local-fs.target"];
|
wantedBy = ["initrd-fs.target"];
|
||||||
before = ["local-fs.target"];
|
before = ["initrd-fs.target"];
|
||||||
requires = ["sysroot-nix-.ro\\x2dstore.mount" "sysroot-nix-.rw\\x2dstore.mount" "rw-store.service"];
|
requires = ["rw-store.service"];
|
||||||
after = ["sysroot-nix-.ro\\x2dstore.mount" "sysroot-nix-.rw\\x2dstore.mount" "rw-store.service"];
|
after = ["rw-store.service"];
|
||||||
unitConfig.IgnoreOnIsolate = true;
|
unitConfig.RequiresMountsFor = "/sysroot/nix/.ro-store";
|
||||||
}];
|
}];
|
||||||
services.rw-store = {
|
services.rw-store = {
|
||||||
after = ["sysroot-nix-.rw\\x2dstore.mount"];
|
unitConfig = {
|
||||||
unitConfig.DefaultDependencies = false;
|
DefaultDependencies = false;
|
||||||
|
RequiresMountsFor = "/sysroot/nix/.rw-store";
|
||||||
|
};
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
ExecStart = "/bin/mkdir -p 0755 /sysroot/nix/.rw-store/store /sysroot/nix/.rw-store/work /sysroot/nix/store";
|
ExecStart = "/bin/mkdir -p -m 0755 /sysroot/nix/.rw-store/store /sysroot/nix/.rw-store/work /sysroot/nix/store";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -108,6 +108,7 @@ in {
|
||||||
breitbandmessung = handleTest ./breitbandmessung.nix {};
|
breitbandmessung = handleTest ./breitbandmessung.nix {};
|
||||||
brscan5 = handleTest ./brscan5.nix {};
|
brscan5 = handleTest ./brscan5.nix {};
|
||||||
btrbk = handleTest ./btrbk.nix {};
|
btrbk = handleTest ./btrbk.nix {};
|
||||||
|
btrbk-doas = handleTest ./btrbk-doas.nix {};
|
||||||
btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
|
btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
|
||||||
btrbk-section-order = handleTest ./btrbk-section-order.nix {};
|
btrbk-section-order = handleTest ./btrbk-section-order.nix {};
|
||||||
buildbot = handleTest ./buildbot.nix {};
|
buildbot = handleTest ./buildbot.nix {};
|
||||||
|
@ -125,6 +126,7 @@ in {
|
||||||
ceph-single-node-bluestore = handleTestOn ["x86_64-linux"] ./ceph-single-node-bluestore.nix {};
|
ceph-single-node-bluestore = handleTestOn ["x86_64-linux"] ./ceph-single-node-bluestore.nix {};
|
||||||
certmgr = handleTest ./certmgr.nix {};
|
certmgr = handleTest ./certmgr.nix {};
|
||||||
cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
|
cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
|
||||||
|
cgit = handleTest ./cgit.nix {};
|
||||||
charliecloud = handleTest ./charliecloud.nix {};
|
charliecloud = handleTest ./charliecloud.nix {};
|
||||||
chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
|
chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
|
||||||
chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
|
chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
|
||||||
|
@ -137,6 +139,7 @@ in {
|
||||||
cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
|
cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
|
||||||
cockpit = handleTest ./cockpit.nix {};
|
cockpit = handleTest ./cockpit.nix {};
|
||||||
cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
|
cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
|
||||||
|
coder = handleTest ./coder.nix {};
|
||||||
collectd = handleTest ./collectd.nix {};
|
collectd = handleTest ./collectd.nix {};
|
||||||
connman = handleTest ./connman.nix {};
|
connman = handleTest ./connman.nix {};
|
||||||
consul = handleTest ./consul.nix {};
|
consul = handleTest ./consul.nix {};
|
||||||
|
@ -235,9 +238,11 @@ in {
|
||||||
freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
|
freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
|
||||||
frr = handleTest ./frr.nix {};
|
frr = handleTest ./frr.nix {};
|
||||||
fsck = handleTest ./fsck.nix {};
|
fsck = handleTest ./fsck.nix {};
|
||||||
|
fsck-systemd-stage-1 = handleTest ./fsck.nix { systemdStage1 = true; };
|
||||||
ft2-clone = handleTest ./ft2-clone.nix {};
|
ft2-clone = handleTest ./ft2-clone.nix {};
|
||||||
mimir = handleTest ./mimir.nix {};
|
mimir = handleTest ./mimir.nix {};
|
||||||
garage = handleTest ./garage {};
|
garage = handleTest ./garage {};
|
||||||
|
gemstash = handleTest ./gemstash.nix {};
|
||||||
gerrit = handleTest ./gerrit.nix {};
|
gerrit = handleTest ./gerrit.nix {};
|
||||||
geth = handleTest ./geth.nix {};
|
geth = handleTest ./geth.nix {};
|
||||||
ghostunnel = handleTest ./ghostunnel.nix {};
|
ghostunnel = handleTest ./ghostunnel.nix {};
|
||||||
|
@ -308,6 +313,7 @@ in {
|
||||||
initrd-network-ssh = handleTest ./initrd-network-ssh {};
|
initrd-network-ssh = handleTest ./initrd-network-ssh {};
|
||||||
initrdNetwork = handleTest ./initrd-network.nix {};
|
initrdNetwork = handleTest ./initrd-network.nix {};
|
||||||
initrd-secrets = handleTest ./initrd-secrets.nix {};
|
initrd-secrets = handleTest ./initrd-secrets.nix {};
|
||||||
|
initrd-secrets-changing = handleTest ./initrd-secrets-changing.nix {};
|
||||||
input-remapper = handleTest ./input-remapper.nix {};
|
input-remapper = handleTest ./input-remapper.nix {};
|
||||||
inspircd = handleTest ./inspircd.nix {};
|
inspircd = handleTest ./inspircd.nix {};
|
||||||
installer = handleTest ./installer.nix {};
|
installer = handleTest ./installer.nix {};
|
||||||
|
@ -580,6 +586,7 @@ in {
|
||||||
radarr = handleTest ./radarr.nix {};
|
radarr = handleTest ./radarr.nix {};
|
||||||
radicale = handleTest ./radicale.nix {};
|
radicale = handleTest ./radicale.nix {};
|
||||||
rasdaemon = handleTest ./rasdaemon.nix {};
|
rasdaemon = handleTest ./rasdaemon.nix {};
|
||||||
|
readarr = handleTest ./readarr.nix {};
|
||||||
redis = handleTest ./redis.nix {};
|
redis = handleTest ./redis.nix {};
|
||||||
redmine = handleTest ./redmine.nix {};
|
redmine = handleTest ./redmine.nix {};
|
||||||
restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
|
restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
|
||||||
|
@ -687,6 +694,7 @@ in {
|
||||||
terminal-emulators = handleTest ./terminal-emulators.nix {};
|
terminal-emulators = handleTest ./terminal-emulators.nix {};
|
||||||
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
||||||
tigervnc = handleTest ./tigervnc.nix {};
|
tigervnc = handleTest ./tigervnc.nix {};
|
||||||
|
timescaledb = handleTest ./timescaledb.nix {};
|
||||||
timezone = handleTest ./timezone.nix {};
|
timezone = handleTest ./timezone.nix {};
|
||||||
tinc = handleTest ./tinc {};
|
tinc = handleTest ./tinc {};
|
||||||
tinydns = handleTest ./tinydns.nix {};
|
tinydns = handleTest ./tinydns.nix {};
|
||||||
|
|
114
third_party/nixpkgs/nixos/tests/btrbk-doas.nix
vendored
Normal file
114
third_party/nixpkgs/nixos/tests/btrbk-doas.nix
vendored
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
privateKey = ''
|
||||||
|
-----BEGIN OPENSSH PRIVATE KEY-----
|
||||||
|
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
|
||||||
|
QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
|
||||||
|
RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
|
||||||
|
AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
|
||||||
|
9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
|
||||||
|
-----END OPENSSH PRIVATE KEY-----
|
||||||
|
'';
|
||||||
|
publicKey = ''
|
||||||
|
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "btrbk-doas";
|
||||||
|
meta = with pkgs.lib; {
|
||||||
|
maintainers = with maintainers; [ symphorien tu-maurice ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
archive = { ... }: {
|
||||||
|
security.sudo.enable = false;
|
||||||
|
security.doas.enable = true;
|
||||||
|
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||||
|
# note: this makes the privateKey world readable.
|
||||||
|
# don't do it with real ssh keys.
|
||||||
|
environment.etc."btrbk_key".text = privateKey;
|
||||||
|
services.btrbk = {
|
||||||
|
extraPackages = [ pkgs.lz4 ];
|
||||||
|
instances = {
|
||||||
|
remote = {
|
||||||
|
onCalendar = "minutely";
|
||||||
|
settings = {
|
||||||
|
ssh_identity = "/etc/btrbk_key";
|
||||||
|
ssh_user = "btrbk";
|
||||||
|
stream_compress = "lz4";
|
||||||
|
volume = {
|
||||||
|
"ssh://main/mnt" = {
|
||||||
|
target = "/mnt";
|
||||||
|
snapshot_dir = "btrbk/remote";
|
||||||
|
subvolume = "to_backup";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
main = { ... }: {
|
||||||
|
security.sudo.enable = false;
|
||||||
|
security.doas.enable = true;
|
||||||
|
environment.systemPackages = with pkgs; [ btrfs-progs ];
|
||||||
|
services.openssh = {
|
||||||
|
enable = true;
|
||||||
|
passwordAuthentication = false;
|
||||||
|
kbdInteractiveAuthentication = false;
|
||||||
|
};
|
||||||
|
services.btrbk = {
|
||||||
|
extraPackages = [ pkgs.lz4 ];
|
||||||
|
sshAccess = [
|
||||||
|
{
|
||||||
|
key = publicKey;
|
||||||
|
roles = [ "source" "send" "info" "delete" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
instances = {
|
||||||
|
local = {
|
||||||
|
onCalendar = "minutely";
|
||||||
|
settings = {
|
||||||
|
volume = {
|
||||||
|
"/mnt" = {
|
||||||
|
snapshot_dir = "btrbk/local";
|
||||||
|
subvolume = "to_backup";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
# create btrfs partition at /mnt
|
||||||
|
for machine in (archive, main):
|
||||||
|
machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
|
||||||
|
machine.succeed("mkfs.btrfs /data_fs")
|
||||||
|
machine.succeed("mkdir /mnt")
|
||||||
|
machine.succeed("mount /data_fs /mnt")
|
||||||
|
|
||||||
|
# what to backup and where
|
||||||
|
main.succeed("btrfs subvolume create /mnt/to_backup")
|
||||||
|
main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
|
||||||
|
|
||||||
|
# check that local snapshots work
|
||||||
|
with subtest("local"):
|
||||||
|
main.succeed("echo foo > /mnt/to_backup/bar")
|
||||||
|
main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||||
|
main.succeed("echo bar > /mnt/to_backup/bar")
|
||||||
|
main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
|
||||||
|
|
||||||
|
# check that btrfs send/receive works and ssh access works
|
||||||
|
with subtest("remote"):
|
||||||
|
archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
|
||||||
|
main.succeed("echo baz > /mnt/to_backup/bar")
|
||||||
|
archive.succeed("cat /mnt/*/bar | grep bar")
|
||||||
|
'';
|
||||||
|
})
|
73
third_party/nixpkgs/nixos/tests/cgit.nix
vendored
Normal file
73
third_party/nixpkgs/nixos/tests/cgit.nix
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
|
let
|
||||||
|
robotsTxt = pkgs.writeText "cgit-robots.txt" ''
|
||||||
|
User-agent: *
|
||||||
|
Disallow: /
|
||||||
|
'';
|
||||||
|
in {
|
||||||
|
name = "cgit";
|
||||||
|
meta = with pkgs.lib.maintainers; {
|
||||||
|
maintainers = [ schnusch ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
server = { ... }: {
|
||||||
|
services.cgit."localhost" = {
|
||||||
|
enable = true;
|
||||||
|
package = pkgs.cgit.overrideAttrs ({ postInstall, ... }: {
|
||||||
|
postInstall = ''
|
||||||
|
${postInstall}
|
||||||
|
cp ${robotsTxt} "$out/cgit/robots.txt"
|
||||||
|
'';
|
||||||
|
});
|
||||||
|
nginx.location = "/(c)git/";
|
||||||
|
repos = {
|
||||||
|
some-repo = {
|
||||||
|
path = "/srv/git/some-repo";
|
||||||
|
desc = "some-repo description";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
environment.systemPackages = [ pkgs.git ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = { nodes, ... }: ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
server.wait_for_unit("nginx.service")
|
||||||
|
server.wait_for_unit("network.target")
|
||||||
|
server.wait_for_open_port(80)
|
||||||
|
|
||||||
|
server.succeed("curl -fsS http://localhost/%28c%29git/cgit.css")
|
||||||
|
|
||||||
|
server.succeed("curl -fsS http://localhost/%28c%29git/robots.txt | diff -u - ${robotsTxt}")
|
||||||
|
|
||||||
|
server.succeed(
|
||||||
|
"curl -fsS http://localhost/%28c%29git/ | grep -F 'some-repo description'"
|
||||||
|
)
|
||||||
|
|
||||||
|
server.fail("curl -fsS http://localhost/robots.txt")
|
||||||
|
|
||||||
|
server.succeed("${pkgs.writeShellScript "setup-cgit-test-repo" ''
|
||||||
|
set -e
|
||||||
|
git init --bare -b master /srv/git/some-repo
|
||||||
|
git init -b master reference
|
||||||
|
cd reference
|
||||||
|
git remote add origin /srv/git/some-repo
|
||||||
|
date > date.txt
|
||||||
|
git add date.txt
|
||||||
|
git -c user.name=test -c user.email=test@localhost commit -m 'add date'
|
||||||
|
git push -u origin master
|
||||||
|
''}")
|
||||||
|
|
||||||
|
server.succeed(
|
||||||
|
"curl -fsS 'http://localhost/%28c%29git/some-repo/plain/date.txt?id=master' | diff -u reference/date.txt -"
|
||||||
|
)
|
||||||
|
|
||||||
|
server.succeed(
|
||||||
|
"git clone http://localhost/%28c%29git/some-repo && diff -u reference/date.txt some-repo/date.txt"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
24
third_party/nixpkgs/nixos/tests/coder.nix
vendored
Normal file
24
third_party/nixpkgs/nixos/tests/coder.nix
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, ... }: {
|
||||||
|
name = "coder";
|
||||||
|
meta = with pkgs.lib.maintainers; {
|
||||||
|
maintainers = [ shyim ghuntley ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes.machine =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{
|
||||||
|
services.coder = {
|
||||||
|
enable = true;
|
||||||
|
accessUrl = "http://localhost:3000";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
machine.start()
|
||||||
|
machine.wait_for_unit("postgresql.service")
|
||||||
|
machine.wait_for_unit("coder.service")
|
||||||
|
machine.wait_for_open_port(3000)
|
||||||
|
|
||||||
|
machine.succeed("curl --fail http://localhost:3000")
|
||||||
|
'';
|
||||||
|
})
|
54
third_party/nixpkgs/nixos/tests/docker-tools.nix
vendored
54
third_party/nixpkgs/nixos/tests/docker-tools.nix
vendored
|
@ -1,6 +1,52 @@
|
||||||
# this test creates a simple GNU image with docker tools and sees if it executes
|
# this test creates a simple GNU image with docker tools and sees if it executes
|
||||||
|
|
||||||
import ./make-test-python.nix ({ pkgs, ... }: {
|
import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
|
let
|
||||||
|
# nixpkgs#214434: dockerTools.buildImage fails to unpack base images
|
||||||
|
# containing duplicate layers when those duplicate tarballs
|
||||||
|
# appear under the manifest's 'Layers'. Docker can generate images
|
||||||
|
# like this even though dockerTools does not.
|
||||||
|
repeatedLayerTestImage =
|
||||||
|
let
|
||||||
|
# Rootfs diffs for layers 1 and 2 are identical (and empty)
|
||||||
|
layer1 = pkgs.dockerTools.buildImage { name = "empty"; };
|
||||||
|
layer2 = layer1.overrideAttrs (_: { fromImage = layer1; });
|
||||||
|
repeatedRootfsDiffs = pkgs.runCommandNoCC "image-with-links.tar" {
|
||||||
|
nativeBuildInputs = [pkgs.jq];
|
||||||
|
} ''
|
||||||
|
mkdir contents
|
||||||
|
tar -xf "${layer2}" -C contents
|
||||||
|
cd contents
|
||||||
|
first_rootfs=$(jq -r '.[0].Layers[0]' manifest.json)
|
||||||
|
second_rootfs=$(jq -r '.[0].Layers[1]' manifest.json)
|
||||||
|
target_rootfs=$(sha256sum "$first_rootfs" | cut -d' ' -f 1).tar
|
||||||
|
|
||||||
|
# Replace duplicated rootfs diffs with symlinks to one tarball
|
||||||
|
chmod -R ug+w .
|
||||||
|
mv "$first_rootfs" "$target_rootfs"
|
||||||
|
rm "$second_rootfs"
|
||||||
|
ln -s "../$target_rootfs" "$first_rootfs"
|
||||||
|
ln -s "../$target_rootfs" "$second_rootfs"
|
||||||
|
|
||||||
|
# Update manifest's layers to use the symlinks' target
|
||||||
|
cat manifest.json | \
|
||||||
|
jq ".[0].Layers[0] = \"$target_rootfs\"" |
|
||||||
|
jq ".[0].Layers[1] = \"$target_rootfs\"" > manifest.json.new
|
||||||
|
mv manifest.json.new manifest.json
|
||||||
|
|
||||||
|
tar --sort=name --hard-dereference -cf $out .
|
||||||
|
'';
|
||||||
|
in pkgs.dockerTools.buildImage {
|
||||||
|
fromImage = repeatedRootfsDiffs;
|
||||||
|
name = "repeated-layer-test";
|
||||||
|
tag = "latest";
|
||||||
|
copyToRoot = pkgs.bash;
|
||||||
|
# A runAsRoot script is required to force previous layers to be unpacked
|
||||||
|
runAsRoot = ''
|
||||||
|
echo 'runAsRoot has run.'
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in {
|
||||||
name = "docker-tools";
|
name = "docker-tools";
|
||||||
meta = with pkgs.lib.maintainers; {
|
meta = with pkgs.lib.maintainers; {
|
||||||
maintainers = [ lnl7 roberth ];
|
maintainers = [ lnl7 roberth ];
|
||||||
|
@ -221,6 +267,12 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
||||||
"docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
|
"docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
with subtest("Ensure repeated base layers handled by buildImage"):
|
||||||
|
docker.succeed(
|
||||||
|
"docker load --input='${repeatedLayerTestImage}'",
|
||||||
|
"docker run --rm ${repeatedLayerTestImage.imageName} /bin/bash -c 'exit 0'"
|
||||||
|
)
|
||||||
|
|
||||||
with subtest("Ensure environment variables are correctly inherited"):
|
with subtest("Ensure environment variables are correctly inherited"):
|
||||||
docker.succeed(
|
docker.succeed(
|
||||||
"docker load --input='${examples.environmentVariables}'"
|
"docker load --input='${examples.environmentVariables}'"
|
||||||
|
|
12
third_party/nixpkgs/nixos/tests/fsck.nix
vendored
12
third_party/nixpkgs/nixos/tests/fsck.nix
vendored
|
@ -1,3 +1,9 @@
|
||||||
|
{ system ? builtins.currentSystem
|
||||||
|
, config ? {}
|
||||||
|
, pkgs ? import ../.. { inherit system config; }
|
||||||
|
, systemdStage1 ? false
|
||||||
|
}:
|
||||||
|
|
||||||
import ./make-test-python.nix {
|
import ./make-test-python.nix {
|
||||||
name = "fsck";
|
name = "fsck";
|
||||||
|
|
||||||
|
@ -11,13 +17,17 @@ import ./make-test-python.nix {
|
||||||
autoFormat = true;
|
autoFormat = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
boot.initrd.systemd.enable = systemdStage1;
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
machine.wait_for_unit("default.target")
|
machine.wait_for_unit("default.target")
|
||||||
|
|
||||||
with subtest("root fs is fsckd"):
|
with subtest("root fs is fsckd"):
|
||||||
machine.succeed("journalctl -b | grep 'fsck.ext4.*/dev/vda'")
|
machine.succeed("journalctl -b | grep '${if systemdStage1
|
||||||
|
then "fsck.*vda.*clean"
|
||||||
|
else "fsck.ext4.*/dev/vda"}'")
|
||||||
|
|
||||||
with subtest("mnt fs is fsckd"):
|
with subtest("mnt fs is fsckd"):
|
||||||
machine.succeed("journalctl -b | grep 'fsck.*/dev/vdb.*clean'")
|
machine.succeed("journalctl -b | grep 'fsck.*/dev/vdb.*clean'")
|
||||||
|
|
|
@ -49,5 +49,5 @@ in
|
||||||
})
|
})
|
||||||
{}
|
{}
|
||||||
[
|
[
|
||||||
"0_8_0"
|
"0_8"
|
||||||
]
|
]
|
||||||
|
|
51
third_party/nixpkgs/nixos/tests/gemstash.nix
vendored
Normal file
51
third_party/nixpkgs/nixos/tests/gemstash.nix
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
{ system ? builtins.currentSystem, config ? { }
|
||||||
|
, pkgs ? import ../.. { inherit system config; } }:
|
||||||
|
|
||||||
|
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||||
|
with pkgs.lib;
|
||||||
|
|
||||||
|
let common_meta = { maintainers = [ maintainers.viraptor ]; };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
gemstash_works = makeTest {
|
||||||
|
name = "gemstash-works";
|
||||||
|
meta = common_meta;
|
||||||
|
|
||||||
|
nodes.machine = { config, pkgs, ... }: {
|
||||||
|
services.gemstash = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# gemstash responds to http requests
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("gemstash.service")
|
||||||
|
machine.wait_for_file("/var/lib/gemstash")
|
||||||
|
machine.wait_for_open_port(9292)
|
||||||
|
machine.succeed("curl http://localhost:9292")
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
gemstash_custom_port = makeTest {
|
||||||
|
name = "gemstash-custom-port";
|
||||||
|
meta = common_meta;
|
||||||
|
|
||||||
|
nodes.machine = { config, pkgs, ... }: {
|
||||||
|
services.gemstash = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
settings = {
|
||||||
|
bind = "tcp://0.0.0.0:12345";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# gemstash responds to http requests
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("gemstash.service")
|
||||||
|
machine.wait_for_file("/var/lib/gemstash")
|
||||||
|
machine.wait_for_open_port(12345)
|
||||||
|
machine.succeed("curl http://localhost:12345")
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
12
third_party/nixpkgs/nixos/tests/gitlab.nix
vendored
12
third_party/nixpkgs/nixos/tests/gitlab.nix
vendored
|
@ -69,6 +69,10 @@ in {
|
||||||
databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
|
databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
|
||||||
initialRootPasswordFile = pkgs.writeText "rootPassword" initialRootPassword;
|
initialRootPasswordFile = pkgs.writeText "rootPassword" initialRootPassword;
|
||||||
smtp.enable = true;
|
smtp.enable = true;
|
||||||
|
pages = {
|
||||||
|
enable = true;
|
||||||
|
settings.pages-domain = "localhost";
|
||||||
|
};
|
||||||
extraConfig = {
|
extraConfig = {
|
||||||
incoming_email = {
|
incoming_email = {
|
||||||
enabled = true;
|
enabled = true;
|
||||||
|
@ -79,11 +83,6 @@ in {
|
||||||
host = "localhost";
|
host = "localhost";
|
||||||
port = 143;
|
port = 143;
|
||||||
};
|
};
|
||||||
# https://github.com/NixOS/nixpkgs/issues/132295
|
|
||||||
# pages = {
|
|
||||||
# enabled = true;
|
|
||||||
# host = "localhost";
|
|
||||||
# };
|
|
||||||
};
|
};
|
||||||
secrets = {
|
secrets = {
|
||||||
secretFile = pkgs.writeText "secret" "Aig5zaic";
|
secretFile = pkgs.writeText "secret" "Aig5zaic";
|
||||||
|
@ -171,10 +170,9 @@ in {
|
||||||
waitForServices = ''
|
waitForServices = ''
|
||||||
gitlab.wait_for_unit("gitaly.service")
|
gitlab.wait_for_unit("gitaly.service")
|
||||||
gitlab.wait_for_unit("gitlab-workhorse.service")
|
gitlab.wait_for_unit("gitlab-workhorse.service")
|
||||||
# https://github.com/NixOS/nixpkgs/issues/132295
|
|
||||||
# gitlab.wait_for_unit("gitlab-pages.service")
|
|
||||||
gitlab.wait_for_unit("gitlab-mailroom.service")
|
gitlab.wait_for_unit("gitlab-mailroom.service")
|
||||||
gitlab.wait_for_unit("gitlab.service")
|
gitlab.wait_for_unit("gitlab.service")
|
||||||
|
gitlab.wait_for_unit("gitlab-pages.service")
|
||||||
gitlab.wait_for_unit("gitlab-sidekiq.service")
|
gitlab.wait_for_unit("gitlab-sidekiq.service")
|
||||||
gitlab.wait_for_file("${nodes.gitlab.config.services.gitlab.statePath}/tmp/sockets/gitlab.socket")
|
gitlab.wait_for_file("${nodes.gitlab.config.services.gitlab.statePath}/tmp/sockets/gitlab.socket")
|
||||||
gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
|
gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
|
||||||
|
|
25
third_party/nixpkgs/nixos/tests/hadoop/hbase.nix
vendored
25
third_party/nixpkgs/nixos/tests/hadoop/hbase.nix
vendored
|
@ -53,6 +53,24 @@ with pkgs.lib;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
thrift = { ... }:{
|
||||||
|
services.hadoop = {
|
||||||
|
inherit coreSite;
|
||||||
|
hbase = {
|
||||||
|
inherit zookeeperQuorum;
|
||||||
|
thrift = defOpts;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
rest = { ... }:{
|
||||||
|
services.hadoop = {
|
||||||
|
inherit coreSite;
|
||||||
|
hbase = {
|
||||||
|
inherit zookeeperQuorum;
|
||||||
|
rest = defOpts;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
|
@ -80,5 +98,12 @@ with pkgs.lib;
|
||||||
assert "1 active master, 0 backup masters, 1 servers" in master.succeed("echo status | HADOOP_USER_NAME=hbase hbase shell -n")
|
assert "1 active master, 0 backup masters, 1 servers" in master.succeed("echo status | HADOOP_USER_NAME=hbase hbase shell -n")
|
||||||
regionserver.wait_until_succeeds("echo \"create 't1','f1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
|
regionserver.wait_until_succeeds("echo \"create 't1','f1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
|
||||||
assert "NAME => 'f1'" in regionserver.succeed("echo \"describe 't1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
|
assert "NAME => 'f1'" in regionserver.succeed("echo \"describe 't1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
|
||||||
|
|
||||||
|
rest.wait_for_open_port(8080)
|
||||||
|
assert "${hbase.version}" in regionserver.succeed("curl http://rest:8080/version/cluster")
|
||||||
|
|
||||||
|
thrift.wait_for_open_port(9090)
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
meta.maintainers = with maintainers; [ illustris ];
|
||||||
})
|
})
|
||||||
|
|
58
third_party/nixpkgs/nixos/tests/initrd-secrets-changing.nix
vendored
Normal file
58
third_party/nixpkgs/nixos/tests/initrd-secrets-changing.nix
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
{ system ? builtins.currentSystem
|
||||||
|
, config ? {}
|
||||||
|
, pkgs ? import ../.. { inherit system config; }
|
||||||
|
, lib ? pkgs.lib
|
||||||
|
, testing ? import ../lib/testing-python.nix { inherit system pkgs; }
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
secret1InStore = pkgs.writeText "topsecret" "iamasecret1";
|
||||||
|
secret2InStore = pkgs.writeText "topsecret" "iamasecret2";
|
||||||
|
in
|
||||||
|
|
||||||
|
testing.makeTest {
|
||||||
|
name = "initrd-secrets-changing";
|
||||||
|
|
||||||
|
nodes.machine = { ... }: {
|
||||||
|
virtualisation.useBootLoader = true;
|
||||||
|
virtualisation.persistBootDevice = true;
|
||||||
|
|
||||||
|
boot.loader.grub.device = "/dev/vda";
|
||||||
|
|
||||||
|
boot.initrd.secrets = {
|
||||||
|
"/test" = secret1InStore;
|
||||||
|
"/run/keys/test" = secret1InStore;
|
||||||
|
};
|
||||||
|
boot.initrd.postMountCommands = "cp /test /mnt-root/secret-from-initramfs";
|
||||||
|
|
||||||
|
specialisation.secrets2System.configuration = {
|
||||||
|
boot.initrd.secrets = lib.mkForce {
|
||||||
|
"/test" = secret2InStore;
|
||||||
|
"/run/keys/test" = secret2InStore;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
print(machine.succeed("cat /run/keys/test"))
|
||||||
|
machine.succeed(
|
||||||
|
"cmp ${secret1InStore} /secret-from-initramfs",
|
||||||
|
"cmp ${secret1InStore} /run/keys/test",
|
||||||
|
)
|
||||||
|
# Select the second boot entry corresponding to the specialisation secrets2System.
|
||||||
|
machine.succeed("grub-reboot 1")
|
||||||
|
machine.shutdown()
|
||||||
|
|
||||||
|
with subtest("Check that the specialisation's secrets are distinct despite identical kernels"):
|
||||||
|
machine.wait_for_unit("multi-user.target")
|
||||||
|
print(machine.succeed("cat /run/keys/test"))
|
||||||
|
machine.succeed(
|
||||||
|
"cmp ${secret2InStore} /secret-from-initramfs",
|
||||||
|
"cmp ${secret2InStore} /run/keys/test",
|
||||||
|
)
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
}
|
|
@ -77,9 +77,9 @@ let
|
||||||
let iface = if grubVersion == 1 then "ide" else "virtio";
|
let iface = if grubVersion == 1 then "ide" else "virtio";
|
||||||
isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
|
isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
|
||||||
bios = if pkgs.stdenv.isAarch64 then "QEMU_EFI.fd" else "OVMF.fd";
|
bios = if pkgs.stdenv.isAarch64 then "QEMU_EFI.fd" else "OVMF.fd";
|
||||||
in if !isEfi && !pkgs.stdenv.hostPlatform.isx86 then
|
in if !isEfi && !pkgs.stdenv.hostPlatform.isx86 then ''
|
||||||
throw "Non-EFI boot methods are only supported on i686 / x86_64"
|
machine.succeed("true")
|
||||||
else ''
|
'' else ''
|
||||||
def assemble_qemu_flags():
|
def assemble_qemu_flags():
|
||||||
flags = "-cpu max"
|
flags = "-cpu max"
|
||||||
${if (system == "x86_64-linux" || system == "i686-linux")
|
${if (system == "x86_64-linux" || system == "i686-linux")
|
||||||
|
|
120
third_party/nixpkgs/nixos/tests/kea.nix
vendored
120
third_party/nixpkgs/nixos/tests/kea.nix
vendored
|
@ -1,3 +1,10 @@
|
||||||
|
# This test verifies DHCPv4 interaction between a client and a router.
|
||||||
|
# For successful DHCP allocations a dynamic update request is sent
|
||||||
|
# towards a nameserver to allocate a name in the lan.nixos.test zone.
|
||||||
|
# We then verify whether client and router can ping each other, and
|
||||||
|
# that the nameserver can resolve the clients fqdn to the correct IP
|
||||||
|
# address.
|
||||||
|
|
||||||
import ./make-test-python.nix ({ pkgs, lib, ...}: {
|
import ./make-test-python.nix ({ pkgs, lib, ...}: {
|
||||||
meta.maintainers = with lib.maintainers; [ hexa ];
|
meta.maintainers = with lib.maintainers; [ hexa ];
|
||||||
|
|
||||||
|
@ -8,17 +15,17 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
|
||||||
virtualisation.vlans = [ 1 ];
|
virtualisation.vlans = [ 1 ];
|
||||||
|
|
||||||
networking = {
|
networking = {
|
||||||
useNetworkd = true;
|
|
||||||
useDHCP = false;
|
useDHCP = false;
|
||||||
firewall.allowedUDPPorts = [ 67 ];
|
firewall.allowedUDPPorts = [ 67 ];
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.network = {
|
systemd.network = {
|
||||||
|
enable = true;
|
||||||
networks = {
|
networks = {
|
||||||
"01-eth1" = {
|
"01-eth1" = {
|
||||||
name = "eth1";
|
name = "eth1";
|
||||||
networkConfig = {
|
networkConfig = {
|
||||||
Address = "10.0.0.1/30";
|
Address = "10.0.0.1/29";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -45,13 +52,115 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
|
||||||
};
|
};
|
||||||
|
|
||||||
subnet4 = [ {
|
subnet4 = [ {
|
||||||
subnet = "10.0.0.0/30";
|
subnet = "10.0.0.0/29";
|
||||||
pools = [ {
|
pools = [ {
|
||||||
pool = "10.0.0.2 - 10.0.0.2";
|
pool = "10.0.0.3 - 10.0.0.3";
|
||||||
} ];
|
} ];
|
||||||
} ];
|
} ];
|
||||||
|
|
||||||
|
# Enable communication between dhcp4 and a local dhcp-ddns
|
||||||
|
# instance.
|
||||||
|
# https://kea.readthedocs.io/en/kea-2.2.0/arm/dhcp4-srv.html#ddns-for-dhcpv4
|
||||||
|
dhcp-ddns = {
|
||||||
|
enable-updates = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
ddns-send-updates = true;
|
||||||
|
ddns-qualifying-suffix = "lan.nixos.test.";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
services.kea.dhcp-ddns = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
forward-ddns = {
|
||||||
|
# Configure updates of a forward zone named `lan.nixos.test`
|
||||||
|
# hosted at the nameserver at 10.0.0.2
|
||||||
|
# https://kea.readthedocs.io/en/kea-2.2.0/arm/ddns.html#adding-forward-dns-servers
|
||||||
|
ddns-domains = [ {
|
||||||
|
name = "lan.nixos.test.";
|
||||||
|
# Use a TSIG key in production!
|
||||||
|
key-name = "";
|
||||||
|
dns-servers = [ {
|
||||||
|
ip-address = "10.0.0.2";
|
||||||
|
port = 53;
|
||||||
|
} ];
|
||||||
|
} ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
nameserver = { config, pkgs, ... }: {
|
||||||
|
virtualisation.vlans = [ 1 ];
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
useDHCP = false;
|
||||||
|
firewall.allowedUDPPorts = [ 53 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.network = {
|
||||||
|
enable = true;
|
||||||
|
networks = {
|
||||||
|
"01-eth1" = {
|
||||||
|
name = "eth1";
|
||||||
|
networkConfig = {
|
||||||
|
Address = "10.0.0.2/29";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.resolved.enable = false;
|
||||||
|
|
||||||
|
# Set up an authoritative nameserver, serving the `lan.nixos.test`
|
||||||
|
# zone and configure an ACL that allows dynamic updates from
|
||||||
|
# the router's ip address.
|
||||||
|
# This ACL is likely insufficient for production usage. Please
|
||||||
|
# use TSIG keys.
|
||||||
|
services.knot = let
|
||||||
|
zone = pkgs.writeTextDir "lan.nixos.test.zone" ''
|
||||||
|
@ SOA ns.nixos.test nox.nixos.test 0 86400 7200 3600000 172800
|
||||||
|
@ NS nameserver
|
||||||
|
nameserver A 10.0.0.3
|
||||||
|
router A 10.0.0.1
|
||||||
|
'';
|
||||||
|
zonesDir = pkgs.buildEnv {
|
||||||
|
name = "knot-zones";
|
||||||
|
paths = [ zone ];
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
enable = true;
|
||||||
|
extraArgs = [
|
||||||
|
"-v"
|
||||||
|
];
|
||||||
|
extraConfig = ''
|
||||||
|
server:
|
||||||
|
listen: 0.0.0.0@53
|
||||||
|
|
||||||
|
log:
|
||||||
|
- target: syslog
|
||||||
|
any: debug
|
||||||
|
|
||||||
|
acl:
|
||||||
|
- id: dhcp_ddns
|
||||||
|
address: 10.0.0.1
|
||||||
|
action: update
|
||||||
|
|
||||||
|
template:
|
||||||
|
- id: default
|
||||||
|
storage: ${zonesDir}
|
||||||
|
zonefile-sync: -1
|
||||||
|
zonefile-load: difference-no-serial
|
||||||
|
journal-content: all
|
||||||
|
|
||||||
|
zone:
|
||||||
|
- domain: lan.nixos.test
|
||||||
|
file: lan.nixos.test.zone
|
||||||
|
acl: [dhcp_ddns]
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
client = { config, pkgs, ... }: {
|
client = { config, pkgs, ... }: {
|
||||||
|
@ -70,6 +179,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...}: {
|
||||||
router.wait_for_unit("kea-dhcp4-server.service")
|
router.wait_for_unit("kea-dhcp4-server.service")
|
||||||
client.wait_for_unit("systemd-networkd-wait-online.service")
|
client.wait_for_unit("systemd-networkd-wait-online.service")
|
||||||
client.wait_until_succeeds("ping -c 5 10.0.0.1")
|
client.wait_until_succeeds("ping -c 5 10.0.0.1")
|
||||||
router.wait_until_succeeds("ping -c 5 10.0.0.2")
|
router.wait_until_succeeds("ping -c 5 10.0.0.3")
|
||||||
|
nameserver.wait_until_succeeds("kdig +short client.lan.nixos.test @10.0.0.2 | grep -q 10.0.0.3")
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
|
48
third_party/nixpkgs/nixos/tests/knot.nix
vendored
48
third_party/nixpkgs/nixos/tests/knot.nix
vendored
|
@ -31,7 +31,7 @@ let
|
||||||
# DO NOT USE pkgs.writeText IN PRODUCTION. This put secrets in the nix store!
|
# DO NOT USE pkgs.writeText IN PRODUCTION. This put secrets in the nix store!
|
||||||
tsigFile = pkgs.writeText "tsig.conf" ''
|
tsigFile = pkgs.writeText "tsig.conf" ''
|
||||||
key:
|
key:
|
||||||
- id: slave_key
|
- id: xfr_key
|
||||||
algorithm: hmac-sha256
|
algorithm: hmac-sha256
|
||||||
secret: zOYgOgnzx3TGe5J5I/0kxd7gTcxXhLYMEq3Ek3fY37s=
|
secret: zOYgOgnzx3TGe5J5I/0kxd7gTcxXhLYMEq3Ek3fY37s=
|
||||||
'';
|
'';
|
||||||
|
@ -43,7 +43,7 @@ in {
|
||||||
|
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
master = { lib, ... }: {
|
primary = { lib, ... }: {
|
||||||
imports = [ common ];
|
imports = [ common ];
|
||||||
|
|
||||||
# trigger sched_setaffinity syscall
|
# trigger sched_setaffinity syscall
|
||||||
|
@ -64,22 +64,17 @@ in {
|
||||||
server:
|
server:
|
||||||
listen: 0.0.0.0@53
|
listen: 0.0.0.0@53
|
||||||
listen: ::@53
|
listen: ::@53
|
||||||
|
automatic-acl: true
|
||||||
acl:
|
|
||||||
- id: slave_acl
|
|
||||||
address: 192.168.0.2
|
|
||||||
key: slave_key
|
|
||||||
action: transfer
|
|
||||||
|
|
||||||
remote:
|
remote:
|
||||||
- id: slave
|
- id: secondary
|
||||||
address: 192.168.0.2@53
|
address: 192.168.0.2@53
|
||||||
|
key: xfr_key
|
||||||
|
|
||||||
template:
|
template:
|
||||||
- id: default
|
- id: default
|
||||||
storage: ${knotZonesEnv}
|
storage: ${knotZonesEnv}
|
||||||
notify: [slave]
|
notify: [secondary]
|
||||||
acl: [slave_acl]
|
|
||||||
dnssec-signing: on
|
dnssec-signing: on
|
||||||
# Input-only zone files
|
# Input-only zone files
|
||||||
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
|
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
|
||||||
|
@ -105,7 +100,7 @@ in {
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
slave = { lib, ... }: {
|
secondary = { lib, ... }: {
|
||||||
imports = [ common ];
|
imports = [ common ];
|
||||||
networking.interfaces.eth1 = {
|
networking.interfaces.eth1 = {
|
||||||
ipv4.addresses = lib.mkForce [
|
ipv4.addresses = lib.mkForce [
|
||||||
|
@ -122,21 +117,16 @@ in {
|
||||||
server:
|
server:
|
||||||
listen: 0.0.0.0@53
|
listen: 0.0.0.0@53
|
||||||
listen: ::@53
|
listen: ::@53
|
||||||
|
automatic-acl: true
|
||||||
acl:
|
|
||||||
- id: notify_from_master
|
|
||||||
address: 192.168.0.1
|
|
||||||
action: notify
|
|
||||||
|
|
||||||
remote:
|
remote:
|
||||||
- id: master
|
- id: primary
|
||||||
address: 192.168.0.1@53
|
address: 192.168.0.1@53
|
||||||
key: slave_key
|
key: xfr_key
|
||||||
|
|
||||||
template:
|
template:
|
||||||
- id: default
|
- id: default
|
||||||
master: master
|
master: primary
|
||||||
acl: [notify_from_master]
|
|
||||||
# zonefileless setup
|
# zonefileless setup
|
||||||
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
|
# https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
|
||||||
zonefile-sync: -1
|
zonefile-sync: -1
|
||||||
|
@ -174,19 +164,19 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = { nodes, ... }: let
|
testScript = { nodes, ... }: let
|
||||||
master4 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv4.addresses).address;
|
primary4 = (lib.head nodes.primary.config.networking.interfaces.eth1.ipv4.addresses).address;
|
||||||
master6 = (lib.head nodes.master.config.networking.interfaces.eth1.ipv6.addresses).address;
|
primary6 = (lib.head nodes.primary.config.networking.interfaces.eth1.ipv6.addresses).address;
|
||||||
|
|
||||||
slave4 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv4.addresses).address;
|
secondary4 = (lib.head nodes.secondary.config.networking.interfaces.eth1.ipv4.addresses).address;
|
||||||
slave6 = (lib.head nodes.slave.config.networking.interfaces.eth1.ipv6.addresses).address;
|
secondary6 = (lib.head nodes.secondary.config.networking.interfaces.eth1.ipv6.addresses).address;
|
||||||
in ''
|
in ''
|
||||||
import re
|
import re
|
||||||
|
|
||||||
start_all()
|
start_all()
|
||||||
|
|
||||||
client.wait_for_unit("network.target")
|
client.wait_for_unit("network.target")
|
||||||
master.wait_for_unit("knot.service")
|
primary.wait_for_unit("knot.service")
|
||||||
slave.wait_for_unit("knot.service")
|
secondary.wait_for_unit("knot.service")
|
||||||
|
|
||||||
|
|
||||||
def test(host, query_type, query, pattern):
|
def test(host, query_type, query, pattern):
|
||||||
|
@ -195,7 +185,7 @@ in {
|
||||||
assert re.search(pattern, out), f'Did not match "{pattern}"'
|
assert re.search(pattern, out), f'Did not match "{pattern}"'
|
||||||
|
|
||||||
|
|
||||||
for host in ("${master4}", "${master6}", "${slave4}", "${slave6}"):
|
for host in ("${primary4}", "${primary6}", "${secondary4}", "${secondary6}"):
|
||||||
with subtest(f"Interrogate {host}"):
|
with subtest(f"Interrogate {host}"):
|
||||||
test(host, "SOA", "example.com", r"start of authority.*noc\.example\.com\.")
|
test(host, "SOA", "example.com", r"start of authority.*noc\.example\.com\.")
|
||||||
test(host, "A", "example.com", r"has no [^ ]+ record")
|
test(host, "A", "example.com", r"has no [^ ]+ record")
|
||||||
|
@ -211,6 +201,6 @@ in {
|
||||||
test(host, "RRSIG", "www.example.com", r"RR set signature is")
|
test(host, "RRSIG", "www.example.com", r"RR set signature is")
|
||||||
test(host, "DNSKEY", "example.com", r"DNSSEC key is")
|
test(host, "DNSKEY", "example.com", r"DNSSEC key is")
|
||||||
|
|
||||||
master.log(master.succeed("systemd-analyze security knot.service | grep -v '✓'"))
|
primary.log(primary.succeed("systemd-analyze security knot.service | grep -v '✓'"))
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
|
|
|
@ -107,7 +107,10 @@ import ../make-test-python.nix (
|
||||||
client = { pkgs, ... }: {
|
client = { pkgs, ... }: {
|
||||||
environment.systemPackages = [
|
environment.systemPackages = [
|
||||||
(pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir"
|
(pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir"
|
||||||
{ libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
|
{ libraries = with pkgs.python3Packages; [
|
||||||
|
matrix-nio
|
||||||
|
] ++ matrix-nio.optional-dependencies.e2e;
|
||||||
|
} ''
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from nio import (
|
from nio import (
|
||||||
|
|
|
@ -30,12 +30,10 @@ let
|
||||||
virtualisation.additionalPaths = [
|
virtualisation.additionalPaths = [
|
||||||
pkgs.hello
|
pkgs.hello
|
||||||
pkgs.figlet
|
pkgs.figlet
|
||||||
|
|
||||||
# This includes build dependencies all the way down. Not efficient,
|
|
||||||
# but we do need build deps to an *arbitrary* depth, which is hard to
|
|
||||||
# determine.
|
|
||||||
(allDrvOutputs nodes.server.config.system.build.toplevel)
|
|
||||||
];
|
];
|
||||||
|
|
||||||
|
# TODO: make this efficient, https://github.com/NixOS/nixpkgs/issues/180529
|
||||||
|
system.includeBuildDependencies = true;
|
||||||
};
|
};
|
||||||
server = { lib, ... }: {
|
server = { lib, ... }: {
|
||||||
imports = [ ./legacy/base-configuration.nix ];
|
imports = [ ./legacy/base-configuration.nix ];
|
||||||
|
|
2
third_party/nixpkgs/nixos/tests/openldap.nix
vendored
2
third_party/nixpkgs/nixos/tests/openldap.nix
vendored
|
@ -118,7 +118,7 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
testScript = { nodes, ... }: let
|
testScript = { nodes, ... }: let
|
||||||
specializations = "${nodes.machine.config.system.build.toplevel}/specialisation";
|
specializations = "${nodes.machine.system.build.toplevel}/specialisation";
|
||||||
changeRootPw = ''
|
changeRootPw = ''
|
||||||
dn: olcDatabase={1}mdb,cn=config
|
dn: olcDatabase={1}mdb,cn=config
|
||||||
changetype: modify
|
changetype: modify
|
||||||
|
|
18
third_party/nixpkgs/nixos/tests/readarr.nix
vendored
Normal file
18
third_party/nixpkgs/nixos/tests/readarr.nix
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
import ./make-test-python.nix ({ lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "readarr";
|
||||||
|
meta.maintainers = with maintainers; [ jocelynthode ];
|
||||||
|
|
||||||
|
nodes.machine =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{ services.readarr.enable = true; };
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("readarr.service")
|
||||||
|
machine.wait_for_open_port(8787)
|
||||||
|
machine.succeed("curl --fail http://localhost:8787/")
|
||||||
|
'';
|
||||||
|
})
|
93
third_party/nixpkgs/nixos/tests/timescaledb.nix
vendored
Normal file
93
third_party/nixpkgs/nixos/tests/timescaledb.nix
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
# mostly copied from ./postgresql.nix as it seemed unapproriate to
|
||||||
|
# test additional extensions for postgresql there.
|
||||||
|
|
||||||
|
{ system ? builtins.currentSystem
|
||||||
|
, config ? { }
|
||||||
|
, pkgs ? import ../.. { inherit system config; }
|
||||||
|
}:
|
||||||
|
|
||||||
|
with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||||
|
with pkgs.lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
|
||||||
|
test-sql = pkgs.writeText "postgresql-test" ''
|
||||||
|
CREATE EXTENSION timescaledb;
|
||||||
|
CREATE EXTENSION timescaledb_toolkit;
|
||||||
|
|
||||||
|
CREATE TABLE sth (
|
||||||
|
time TIMESTAMPTZ NOT NULL,
|
||||||
|
value DOUBLE PRECISION
|
||||||
|
);
|
||||||
|
|
||||||
|
SELECT create_hypertable('sth', 'time');
|
||||||
|
|
||||||
|
INSERT INTO sth (time, value) VALUES
|
||||||
|
('2003-04-12 04:05:06 America/New_York', 1.0),
|
||||||
|
('2003-04-12 04:05:07 America/New_York', 2.0),
|
||||||
|
('2003-04-12 04:05:08 America/New_York', 3.0),
|
||||||
|
('2003-04-12 04:05:09 America/New_York', 4.0),
|
||||||
|
('2003-04-12 04:05:10 America/New_York', 5.0)
|
||||||
|
;
|
||||||
|
|
||||||
|
WITH t AS (
|
||||||
|
SELECT
|
||||||
|
time_bucket('1 day'::interval, time) AS dt,
|
||||||
|
stats_agg(value) AS stats
|
||||||
|
FROM sth
|
||||||
|
GROUP BY time_bucket('1 day'::interval, time)
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
average(stats)
|
||||||
|
FROM t;
|
||||||
|
'';
|
||||||
|
make-postgresql-test = postgresql-name: postgresql-package: makeTest {
|
||||||
|
name = postgresql-name;
|
||||||
|
meta = with pkgs.lib.maintainers; {
|
||||||
|
maintainers = [ typetetris ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes.machine = { ... }:
|
||||||
|
{
|
||||||
|
services.postgresql = {
|
||||||
|
enable = true;
|
||||||
|
package = postgresql-package;
|
||||||
|
extraPlugins = with postgresql-package.pkgs; [
|
||||||
|
timescaledb
|
||||||
|
timescaledb_toolkit
|
||||||
|
];
|
||||||
|
settings = { shared_preload_libraries = "timescaledb, timescaledb_toolkit"; };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
def check_count(statement, lines):
|
||||||
|
return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
|
||||||
|
statement, lines
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
machine.start()
|
||||||
|
machine.wait_for_unit("postgresql")
|
||||||
|
|
||||||
|
with subtest("Postgresql with extensions timescaledb and timescaledb_toolkit is available just after unit start"):
|
||||||
|
machine.succeed(
|
||||||
|
"sudo -u postgres psql -f ${test-sql}"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.fail(check_count("SELECT * FROM sth;", 3))
|
||||||
|
machine.succeed(check_count("SELECT * FROM sth;", 5))
|
||||||
|
machine.fail(check_count("SELECT * FROM sth;", 4))
|
||||||
|
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
|
||||||
|
};
|
||||||
|
applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12") postgresql-versions;
|
||||||
|
in
|
||||||
|
mapAttrs'
|
||||||
|
(name: package: {
|
||||||
|
inherit name;
|
||||||
|
value = make-postgresql-test name package;
|
||||||
|
})
|
||||||
|
applicablePostgresqlVersions
|
|
@ -24,11 +24,11 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "bitwig-studio";
|
pname = "bitwig-studio";
|
||||||
version = "4.4.6";
|
version = "4.4.8";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://downloads.bitwig.com/stable/${version}/${pname}-${version}.deb";
|
url = "https://downloads.bitwig.com/stable/${version}/${pname}-${version}.deb";
|
||||||
sha256 = "sha256-VcK74JrVH81sgNeh1FDvCO1jtgkVeLpx5IqlXuzH27A=";
|
sha256 = "sha256-qdqRvCmp6Q7lcTdOIEHeQKAAOLtJxs867gapopyeHuc=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook ];
|
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook ];
|
||||||
|
|
|
@ -31,13 +31,13 @@ python3Packages.buildPythonApplication rec {
|
||||||
pkg-config
|
pkg-config
|
||||||
wrapGAppsHook4
|
wrapGAppsHook4
|
||||||
desktop-file-utils
|
desktop-file-utils
|
||||||
|
gobject-introspection
|
||||||
];
|
];
|
||||||
|
|
||||||
buildInputs = [
|
buildInputs = [
|
||||||
glib
|
glib
|
||||||
gtk4
|
gtk4
|
||||||
libadwaita
|
libadwaita
|
||||||
gobject-introspection
|
|
||||||
gst_all_1.gstreamer
|
gst_all_1.gstreamer
|
||||||
gst_all_1.gst-plugins-base
|
gst_all_1.gst-plugins-base
|
||||||
gst_all_1.gst-plugins-good
|
gst_all_1.gst-plugins-good
|
||||||
|
@ -48,9 +48,6 @@ python3Packages.buildPythonApplication rec {
|
||||||
pygobject3
|
pygobject3
|
||||||
];
|
];
|
||||||
|
|
||||||
# Broken with gobject-introspection setup hook
|
|
||||||
# https://github.com/NixOS/nixpkgs/issues/56943
|
|
||||||
strictDeps = false;
|
|
||||||
format = "other";
|
format = "other";
|
||||||
|
|
||||||
postPatch = ''
|
postPatch = ''
|
||||||
|
|
|
@ -1,25 +1,38 @@
|
||||||
{ lib, stdenv, fetchFromGitHub, alsa-lib, file, fluidsynth, jack2,
|
{ lib
|
||||||
liblo, libpulseaudio, libsndfile, pkg-config, python3Packages,
|
, stdenv
|
||||||
which, withFrontend ? true,
|
, fetchFromGitHub
|
||||||
withQt ? true, qtbase ? null, wrapQtAppsHook ? null,
|
, alsa-lib
|
||||||
withGtk2 ? true, gtk2 ? null,
|
, file
|
||||||
withGtk3 ? true, gtk3 ? null }:
|
, fluidsynth
|
||||||
|
, jack2
|
||||||
|
, liblo
|
||||||
|
, libpulseaudio
|
||||||
|
, libsndfile
|
||||||
|
, pkg-config
|
||||||
|
, python3Packages
|
||||||
|
, which
|
||||||
|
, gtk2 ? null
|
||||||
|
, gtk3 ? null
|
||||||
|
, qtbase ? null
|
||||||
|
, withFrontend ? true
|
||||||
|
, withGtk2 ? true
|
||||||
|
, withGtk3 ? true
|
||||||
|
, withQt ? true
|
||||||
|
, wrapQtAppsHook ? null
|
||||||
|
}:
|
||||||
|
|
||||||
assert withFrontend -> python3Packages ? pyqt5;
|
|
||||||
assert withQt -> qtbase != null;
|
assert withQt -> qtbase != null;
|
||||||
assert withQt -> wrapQtAppsHook != null;
|
assert withQt -> wrapQtAppsHook != null;
|
||||||
assert withGtk2 -> gtk2 != null;
|
|
||||||
assert withGtk3 -> gtk3 != null;
|
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "carla";
|
pname = "carla";
|
||||||
version = "2.5.1";
|
version = "2.5.3";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "falkTX";
|
owner = "falkTX";
|
||||||
repo = pname;
|
repo = pname;
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "sha256-SN+9Q5v0bv+kQcYLBJmSCd9WIGSeQuOZze8LVwF20EA=";
|
hash = "sha256-J0C3GLdlLMkm3LHl6l3OI2rA73A6z5MMcNJ1I1T0pbI=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
@ -60,7 +73,6 @@ stdenv.mkDerivation rec {
|
||||||
patchPythonScript "$f"
|
patchPythonScript "$f"
|
||||||
done
|
done
|
||||||
patchPythonScript "$out/share/carla/carla_settings.py"
|
patchPythonScript "$out/share/carla/carla_settings.py"
|
||||||
patchPythonScript "$out/share/carla/carla_database.py"
|
|
||||||
|
|
||||||
for program in $out/bin/*; do
|
for program in $out/bin/*; do
|
||||||
wrapQtApp "$program" \
|
wrapQtApp "$program" \
|
||||||
|
|
|
@ -24,11 +24,6 @@ python3Packages.buildPythonApplication rec {
|
||||||
pname = "cozy";
|
pname = "cozy";
|
||||||
version = "1.2.1";
|
version = "1.2.1";
|
||||||
|
|
||||||
# Temporary fix
|
|
||||||
# See https://github.com/NixOS/nixpkgs/issues/57029
|
|
||||||
# and https://github.com/NixOS/nixpkgs/issues/56943
|
|
||||||
strictDeps = false;
|
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "geigi";
|
owner = "geigi";
|
||||||
repo = pname;
|
repo = pname;
|
||||||
|
|
|
@ -37,13 +37,13 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "easyeffects";
|
pname = "easyeffects";
|
||||||
version = "7.0.0";
|
version = "7.0.1";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "wwmm";
|
owner = "wwmm";
|
||||||
repo = "easyeffects";
|
repo = "easyeffects";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "sha256-SjhJj5kClPd8DT1vzbSdqJ9AJw0XiX9Q8/R8SDnxGPQ=";
|
sha256 = "sha256-PI29TJSYa/dARlSHe4mO4ejV+muhGFhwVvhA10jziTA=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
@ -107,6 +107,5 @@ stdenv.mkDerivation rec {
|
||||||
license = licenses.gpl3Plus;
|
license = licenses.gpl3Plus;
|
||||||
maintainers = with maintainers; [ jtojnar ];
|
maintainers = with maintainers; [ jtojnar ];
|
||||||
platforms = platforms.linux;
|
platforms = platforms.linux;
|
||||||
badPlatforms = [ "aarch64-linux" ];
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,13 +10,13 @@
|
||||||
# gcc only supports objc on darwin
|
# gcc only supports objc on darwin
|
||||||
buildGoModule.override { stdenv = clangStdenv; } rec {
|
buildGoModule.override { stdenv = clangStdenv; } rec {
|
||||||
pname = "go-musicfox";
|
pname = "go-musicfox";
|
||||||
version = "3.7.2";
|
version = "3.7.3";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "anhoder";
|
owner = "anhoder";
|
||||||
repo = pname;
|
repo = pname;
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
hash = "sha256-Wc9HFvBSLQA7jT+LJj+tyHzRbszhR2XD1/3C+SdrAGA=";
|
hash = "sha256-aM7IJGRRY2V2Rovj042ctg5254EUw1bTuoRCp9Za1FY=";
|
||||||
};
|
};
|
||||||
|
|
||||||
deleteVendor = true;
|
deleteVendor = true;
|
||||||
|
@ -45,6 +45,6 @@ buildGoModule.override { stdenv = clangStdenv; } rec {
|
||||||
homepage = "https://github.com/anhoder/go-musicfox";
|
homepage = "https://github.com/anhoder/go-musicfox";
|
||||||
license = licenses.mit;
|
license = licenses.mit;
|
||||||
mainProgram = "musicfox";
|
mainProgram = "musicfox";
|
||||||
maintainers = with maintainers; [ zendo ];
|
maintainers = with maintainers; [ zendo Ruixi-rebirth ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,17 +27,12 @@ python3Packages.buildPythonApplication rec {
|
||||||
intltool
|
intltool
|
||||||
wrapGAppsHook
|
wrapGAppsHook
|
||||||
glibcLocales
|
glibcLocales
|
||||||
|
gobject-introspection
|
||||||
];
|
];
|
||||||
|
|
||||||
# as of 2021-07, the gobject-introspection setup hook does not
|
|
||||||
# work with `strictDeps` enabled, thus for proper `wrapGAppsHook`
|
|
||||||
# it needs to be disabled explicitly. https://github.com/NixOS/nixpkgs/issues/56943
|
|
||||||
strictDeps = false;
|
|
||||||
|
|
||||||
buildInputs = [
|
buildInputs = [
|
||||||
python3
|
python3
|
||||||
gtk3
|
gtk3
|
||||||
gobject-introspection
|
|
||||||
gnome.adwaita-icon-theme
|
gnome.adwaita-icon-theme
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -5,25 +5,26 @@
|
||||||
, stdenv
|
, stdenv
|
||||||
, openssl
|
, openssl
|
||||||
, libiconv
|
, libiconv
|
||||||
|
, sqlite
|
||||||
, Security }:
|
, Security }:
|
||||||
|
|
||||||
rustPlatform.buildRustPackage rec {
|
rustPlatform.buildRustPackage rec {
|
||||||
pname = "listenbrainz-mpd";
|
pname = "listenbrainz-mpd";
|
||||||
version = "2.0.2";
|
version = "2.1.0";
|
||||||
|
|
||||||
src = fetchFromGitea {
|
src = fetchFromGitea {
|
||||||
domain = "codeberg.org";
|
domain = "codeberg.org";
|
||||||
owner = "elomatreb";
|
owner = "elomatreb";
|
||||||
repo = "listenbrainz-mpd";
|
repo = "listenbrainz-mpd";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
hash = "sha256-DO7YUqaJZyVWjiAZ9WIVNTTvOU0qdsI2ct7aT/6O5dQ=";
|
hash = "sha256-AalZTlizaw93KlVffFDjGNoKkCHUFQTiElZgJo64shs=";
|
||||||
};
|
};
|
||||||
|
|
||||||
cargoHash = "sha256-MiAalxe0drRHrST3maVvi8GM2y3d0z4Zl7R7Zx8VjEM=";
|
cargoHash = "sha256-n24P56ZrF8qEpM45uIFr7bJhlzuAexNr6siEsF219uA=";
|
||||||
|
|
||||||
nativeBuildInputs = [ pkg-config ];
|
nativeBuildInputs = [ pkg-config ];
|
||||||
|
|
||||||
buildInputs = if stdenv.isDarwin then [ libiconv Security ] else [ openssl ];
|
buildInputs = [ sqlite ] ++ (if stdenv.isDarwin then [ libiconv Security ] else [ openssl ]);
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
homepage = "https://codeberg.org/elomatreb/listenbrainz-mpd";
|
homepage = "https://codeberg.org/elomatreb/listenbrainz-mpd";
|
||||||
|
|
|
@ -52,13 +52,13 @@
|
||||||
|
|
||||||
mkDerivation rec {
|
mkDerivation rec {
|
||||||
pname = "mixxx";
|
pname = "mixxx";
|
||||||
version = "2.3.3";
|
version = "2.3.4";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "mixxxdj";
|
owner = "mixxxdj";
|
||||||
repo = "mixxx";
|
repo = "mixxx";
|
||||||
rev = version;
|
rev = version;
|
||||||
sha256 = "sha256-NRtrEobdJMFgDXrEeb2t1zeVN8pQP7+pda2DSU/yNX8=";
|
sha256 = "sha256-1hOMU/Mdk1vT0GQipn/WX2fm9ddN0mPIq7kf2i2w3xQ=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ cmake pkg-config ];
|
nativeBuildInputs = [ cmake pkg-config ];
|
||||||
|
@ -116,7 +116,7 @@ mkDerivation rec {
|
||||||
|
|
||||||
# mixxx installs udev rules to DATADIR instead of SYSCONFDIR
|
# mixxx installs udev rules to DATADIR instead of SYSCONFDIR
|
||||||
# let's disable this and install udev rules manually via postInstall
|
# let's disable this and install udev rules manually via postInstall
|
||||||
# see https://github.com/mixxxdj/mixxx/blob/2.3.3/CMakeLists.txt#L1381-L1392
|
# see https://github.com/mixxxdj/mixxx/blob/2.3.4/CMakeLists.txt#L1381-L1392
|
||||||
cmakeFlags = [
|
cmakeFlags = [
|
||||||
"-DINSTALL_USER_UDEV_RULES=OFF"
|
"-DINSTALL_USER_UDEV_RULES=OFF"
|
||||||
];
|
];
|
||||||
|
|
|
@ -70,9 +70,9 @@ stdenv.mkDerivation rec {
|
||||||
Cocoa SystemConfiguration
|
Cocoa SystemConfiguration
|
||||||
] ++ lib.optionals coreaudioSupport [
|
] ++ lib.optionals coreaudioSupport [
|
||||||
CoreAudio
|
CoreAudio
|
||||||
] ++ lib.optional sndioSupport [
|
] ++ lib.optionals sndioSupport [
|
||||||
sndio
|
sndio
|
||||||
] ++ lib.optional pipewireSupport [
|
] ++ lib.optionals pipewireSupport [
|
||||||
pipewire
|
pipewire
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ stdenv.mkDerivation rec {
|
||||||
"-DDISABLE_STRIP=true"
|
"-DDISABLE_STRIP=true"
|
||||||
];
|
];
|
||||||
|
|
||||||
postFixup = lib.optionals stdenv.isDarwin ''
|
postFixup = lib.optionalString stdenv.isDarwin ''
|
||||||
install_name_tool -add_rpath $out/share/${pname} $out/share/${pname}/${pname}
|
install_name_tool -add_rpath $out/share/${pname} $out/share/${pname}/${pname}
|
||||||
install_name_tool -add_rpath $out/share/${pname} $out/share/${pname}/${pname}d
|
install_name_tool -add_rpath $out/share/${pname} $out/share/${pname}/${pname}d
|
||||||
'';
|
'';
|
||||||
|
|
|
@ -16,13 +16,13 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "mympd";
|
pname = "mympd";
|
||||||
version = "10.2.4";
|
version = "10.2.5";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "jcorporation";
|
owner = "jcorporation";
|
||||||
repo = "myMPD";
|
repo = "myMPD";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "sha256-12hCIAwrLQkwiU9t9nNPBdIiHfMidfErSWOA0FPfhBQ=";
|
sha256 = "sha256-ZxGMvbm9GKhhfCNZdeIYUh2FF4c3vXtvRdu24u3Zrtg=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
|
|
@ -7,16 +7,16 @@
|
||||||
|
|
||||||
rustPlatform.buildRustPackage rec {
|
rustPlatform.buildRustPackage rec {
|
||||||
pname = "ncspot";
|
pname = "ncspot";
|
||||||
version = "0.12.0";
|
version = "0.13.0";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "hrkfdn";
|
owner = "hrkfdn";
|
||||||
repo = "ncspot";
|
repo = "ncspot";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
sha256 = "sha256-kqGYBaXmGeGuGJ5fcc4OQzHISU8fVuQNGwiD8nyPa/0=";
|
hash = "sha256-YWA8chp33SkMdo+XT/7qikIkgwt8pozC9wMFpY8Dv8Q=";
|
||||||
};
|
};
|
||||||
|
|
||||||
cargoSha256 = "sha256-gVXH2pFtyMfYkCqda9NrqOgczvmxiWHe0zArJfnnrgE=";
|
cargoHash = "sha256-DB3r6pPtustEQG8QXM6qT1hkd7msC//46bhVP/HMxnY=";
|
||||||
|
|
||||||
nativeBuildInputs = [ pkg-config ];
|
nativeBuildInputs = [ pkg-config ];
|
||||||
|
|
||||||
|
|
|
@ -3,13 +3,13 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "opustags";
|
pname = "opustags";
|
||||||
version = "1.7.0";
|
version = "1.8.0";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "fmang";
|
owner = "fmang";
|
||||||
repo = "opustags";
|
repo = "opustags";
|
||||||
rev = version;
|
rev = version;
|
||||||
sha256 = "sha256-vGMRzw46X3DNRIvlI9XEKoDwiJsVL0v9Nfn8pbszRbw=";
|
sha256 = "sha256-qxtTJ4Hl2ccL+rhONeoOfV6ZyJaWaVDPMsXYJkXCWkY=";
|
||||||
};
|
};
|
||||||
|
|
||||||
buildInputs = [ libogg ];
|
buildInputs = [ libogg ];
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue