Project import generated by Copybara.
GitOrigin-RevId: 2893f56de08021cffd9b6b6dfc70fd9ccd51eb60
This commit is contained in:
parent
56bae7e5ef
commit
83627f9931
1153 changed files with 33764 additions and 11428 deletions
24
third_party/nixpkgs/.github/CODEOWNERS
vendored
24
third_party/nixpkgs/.github/CODEOWNERS
vendored
|
@ -67,6 +67,9 @@
|
||||||
/nixos/lib/make-disk-image.nix @raitobezarius
|
/nixos/lib/make-disk-image.nix @raitobezarius
|
||||||
|
|
||||||
# Nix, the package manager
|
# Nix, the package manager
|
||||||
|
# @raitobezarius is not "code owner", but is listed here to be notified of changes
|
||||||
|
# pertaining to the Nix package manager.
|
||||||
|
# i.e. no authority over those files.
|
||||||
pkgs/tools/package-management/nix/ @raitobezarius
|
pkgs/tools/package-management/nix/ @raitobezarius
|
||||||
nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
|
nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
|
||||||
|
|
||||||
|
@ -93,7 +96,6 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
|
||||||
/nixos/default.nix @infinisil
|
/nixos/default.nix @infinisil
|
||||||
/nixos/lib/from-env.nix @infinisil
|
/nixos/lib/from-env.nix @infinisil
|
||||||
/nixos/lib/eval-config.nix @infinisil
|
/nixos/lib/eval-config.nix @infinisil
|
||||||
/nixos/modules/system @dasJ
|
|
||||||
/nixos/modules/system/activation/bootspec.nix @grahamc @cole-h @raitobezarius
|
/nixos/modules/system/activation/bootspec.nix @grahamc @cole-h @raitobezarius
|
||||||
/nixos/modules/system/activation/bootspec.cue @grahamc @cole-h @raitobezarius
|
/nixos/modules/system/activation/bootspec.cue @grahamc @cole-h @raitobezarius
|
||||||
|
|
||||||
|
@ -137,9 +139,9 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
|
||||||
/pkgs/top-level/haskell-packages.nix @sternenseemann @maralorn @ncfavier
|
/pkgs/top-level/haskell-packages.nix @sternenseemann @maralorn @ncfavier
|
||||||
|
|
||||||
# Perl
|
# Perl
|
||||||
/pkgs/development/interpreters/perl @stigtsp @zakame @dasJ @marcusramberg
|
/pkgs/development/interpreters/perl @stigtsp @zakame @marcusramberg
|
||||||
/pkgs/top-level/perl-packages.nix @stigtsp @zakame @dasJ @marcusramberg
|
/pkgs/top-level/perl-packages.nix @stigtsp @zakame @marcusramberg
|
||||||
/pkgs/development/perl-modules @stigtsp @zakame @dasJ @marcusramberg
|
/pkgs/development/perl-modules @stigtsp @zakame @marcusramberg
|
||||||
|
|
||||||
# R
|
# R
|
||||||
/pkgs/applications/science/math/R @jbedo
|
/pkgs/applications/science/math/R @jbedo
|
||||||
|
@ -152,6 +154,7 @@ nixos/modules/installer/tools/nix-fallback-paths.nix @raitobezarius
|
||||||
|
|
||||||
# C compilers
|
# C compilers
|
||||||
/pkgs/development/compilers/gcc
|
/pkgs/development/compilers/gcc
|
||||||
|
/pkgs/development/compilers/llvm @RossComputerGuy
|
||||||
/pkgs/development/compilers/emscripten @raitobezarius
|
/pkgs/development/compilers/emscripten @raitobezarius
|
||||||
/doc/languages-frameworks/emscripten.section.md @raitobezarius
|
/doc/languages-frameworks/emscripten.section.md @raitobezarius
|
||||||
|
|
||||||
|
@ -363,3 +366,16 @@ pkgs/by-name/lx/lxc* @adamcstephens
|
||||||
pkgs/by-name/lx/lxd* @adamcstephens
|
pkgs/by-name/lx/lxd* @adamcstephens
|
||||||
pkgs/os-specific/linux/lxc/ @adamcstephens
|
pkgs/os-specific/linux/lxc/ @adamcstephens
|
||||||
|
|
||||||
|
# ExpidusOS, Flutter
|
||||||
|
/pkgs/development/compilers/flutter @RossComputerGuy
|
||||||
|
/pkgs/desktops/expidus @RossComputerGuy
|
||||||
|
|
||||||
|
# GNU Tar & Zip
|
||||||
|
/pkgs/tools/archivers/gnutar @RossComputerGuy
|
||||||
|
/pkgs/tools/archivers/zip @RossComputerGuy
|
||||||
|
|
||||||
|
# SELinux
|
||||||
|
/pkgs/os-specific/linux/checkpolicy @RossComputerGuy
|
||||||
|
/pkgs/os-specific/linux/libselinux @RossComputerGuy
|
||||||
|
/pkgs/os-specific/linux/libsepol @RossComputerGuy
|
||||||
|
|
||||||
|
|
|
@ -9,22 +9,40 @@ pkgs.makeSetupHook {
|
||||||
name = "something-hook";
|
name = "something-hook";
|
||||||
propagatedBuildInputs = [ pkgs.commandsomething ];
|
propagatedBuildInputs = [ pkgs.commandsomething ];
|
||||||
depsTargetTargetPropagated = [ pkgs.libsomething ];
|
depsTargetTargetPropagated = [ pkgs.libsomething ];
|
||||||
} ./script.sh
|
} ./script.sh;
|
||||||
```
|
```
|
||||||
|
|
||||||
### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash {#sec-pkgs.makeSetupHook-usage-example}
|
### setup hook that depends on the hello package and runs hello and @shell@ is substituted with path to bash {#sec-pkgs.makeSetupHook-usage-example}
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
pkgs.makeSetupHook {
|
pkgs.makeSetupHook
|
||||||
|
{
|
||||||
name = "run-hello-hook";
|
name = "run-hello-hook";
|
||||||
propagatedBuildInputs = [ pkgs.hello ];
|
# Put dependencies here if they have hooks or necessary dependencies propagated
|
||||||
substitutions = { shell = "${pkgs.bash}/bin/bash"; };
|
# otherwise prefer direct paths to executables.
|
||||||
passthru.tests.greeting = callPackage ./test { };
|
propagatedBuildInputs = [
|
||||||
meta.platforms = lib.platforms.linux;
|
pkgs.hello
|
||||||
} (writeScript "run-hello-hook.sh" ''
|
pkgs.cowsay
|
||||||
#!@shell@
|
];
|
||||||
hello
|
substitutions = {
|
||||||
'')
|
shell = "${pkgs.bash}/bin/bash";
|
||||||
|
cowsay = "${pkgs.cowsay}/bin/cowsay";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
(
|
||||||
|
writeScript "run-hello-hook.sh" ''
|
||||||
|
#!@shell@
|
||||||
|
# the direct path to the executable has to be here because
|
||||||
|
# this will be run when the file is sourced
|
||||||
|
# at which point '$PATH' has not yet been populated with inputs
|
||||||
|
@cowsay@ cow
|
||||||
|
|
||||||
|
_printHelloHook() {
|
||||||
|
hello
|
||||||
|
}
|
||||||
|
preConfigureHooks+=(_printHelloHook)
|
||||||
|
''
|
||||||
|
);
|
||||||
```
|
```
|
||||||
|
|
||||||
## Attributes {#sec-pkgs.makeSetupHook-attributes}
|
## Attributes {#sec-pkgs.makeSetupHook-attributes}
|
||||||
|
|
|
@ -101,7 +101,7 @@ See the [Dart documentation](#ssec-dart-applications) for more details on requir
|
||||||
`flutter` in Nixpkgs always points to `flutterPackages.stable`, which is the latest packaged version. To avoid unforeseen breakage during upgrade, packages in Nixpkgs should use a specific flutter version, such as `flutter319` and `flutter322`, instead of using `flutter` directly.
|
`flutter` in Nixpkgs always points to `flutterPackages.stable`, which is the latest packaged version. To avoid unforeseen breakage during upgrade, packages in Nixpkgs should use a specific flutter version, such as `flutter319` and `flutter322`, instead of using `flutter` directly.
|
||||||
|
|
||||||
```nix
|
```nix
|
||||||
{ flutter, fetchFromGitHub }:
|
{ flutter322, fetchFromGitHub }:
|
||||||
|
|
||||||
flutter322.buildFlutterApplication {
|
flutter322.buildFlutterApplication {
|
||||||
pname = "firmware-updater";
|
pname = "firmware-updater";
|
||||||
|
|
|
@ -120,14 +120,6 @@ buildDunePackage rec {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Note about `minimalOCamlVersion`. A deprecated version of this argument was
|
|
||||||
spelled `minimumOCamlVersion`; setting the old attribute wrongly modifies the
|
|
||||||
derivation hash and is therefore inappropriate. As a technical dept, currently
|
|
||||||
packaged libraries may still use the old spelling: maintainers are invited to
|
|
||||||
fix this when updating packages. Massive renaming is strongly discouraged as it
|
|
||||||
would be challenging to review, difficult to test, and will cause unnecessary
|
|
||||||
rebuild.
|
|
||||||
|
|
||||||
The build will automatically fail if two distinct versions of the same library
|
The build will automatically fail if two distinct versions of the same library
|
||||||
are added to `buildInputs` (which usually happens transitively because of
|
are added to `buildInputs` (which usually happens transitively because of
|
||||||
`propagatedBuildInputs`). Set `dontDetectOcamlConflicts` to true to disable this
|
`propagatedBuildInputs`). Set `dontDetectOcamlConflicts` to true to disable this
|
||||||
|
|
|
@ -214,7 +214,7 @@ Note: this is not possible anymore for Neovim.
|
||||||
|
|
||||||
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`nix-shell -p vimPluginsUpdater --run vim-plugins-updater`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/updater.nix). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
|
Nix expressions for Vim plugins are stored in [pkgs/applications/editors/vim/plugins](https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/editors/vim/plugins). For the vast majority of plugins, Nix expressions are automatically generated by running [`nix-shell -p vimPluginsUpdater --run vim-plugins-updater`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/updater.nix). This creates a [generated.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/generated.nix) file based on the plugins listed in [vim-plugin-names](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/vim-plugin-names).
|
||||||
|
|
||||||
After running the updater, if nvim-treesitter received an update, also run [`nvim-treesitter/update.py`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
|
When the vim updater detects an nvim-treesitter update, it also runs [`nvim-treesitter/update.py $(nix-build -A vimPlugins.nvim-treesitter)`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/update.py) to update the tree sitter grammars for `nvim-treesitter`.
|
||||||
|
|
||||||
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
|
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/editors/vim/plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
|
||||||
|
|
||||||
|
|
2
third_party/nixpkgs/lib/attrsets.nix
vendored
2
third_party/nixpkgs/lib/attrsets.nix
vendored
|
@ -11,7 +11,7 @@ let
|
||||||
in
|
in
|
||||||
|
|
||||||
rec {
|
rec {
|
||||||
inherit (builtins) attrNames listToAttrs hasAttr isAttrs getAttr removeAttrs;
|
inherit (builtins) attrNames listToAttrs hasAttr isAttrs getAttr removeAttrs intersectAttrs;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
27
third_party/nixpkgs/lib/default.nix
vendored
27
third_party/nixpkgs/lib/default.nix
vendored
|
@ -64,22 +64,21 @@ let
|
||||||
# linux kernel configuration
|
# linux kernel configuration
|
||||||
kernel = callLibs ./kernel.nix;
|
kernel = callLibs ./kernel.nix;
|
||||||
|
|
||||||
inherit (builtins) add addErrorContext attrNames concatLists
|
# TODO: For consistency, all builtins should also be available from a sub-library;
|
||||||
deepSeq elem elemAt filter genericClosure genList getAttr
|
# these are the only ones that are currently not
|
||||||
hasAttr head isAttrs isBool isInt isList isPath isString length
|
inherit (builtins) addErrorContext isPath trace;
|
||||||
lessThan listToAttrs pathExists readFile replaceStrings seq
|
|
||||||
stringLength sub substring tail trace;
|
|
||||||
inherit (self.trivial) id const pipe concat or and xor bitAnd bitOr bitXor
|
inherit (self.trivial) id const pipe concat or and xor bitAnd bitOr bitXor
|
||||||
bitNot boolToString mergeAttrs flip mapNullable inNixShell isFloat min max
|
bitNot boolToString mergeAttrs flip mapNullable inNixShell isFloat min max
|
||||||
importJSON importTOML warn warnIf warnIfNot throwIf throwIfNot checkListOfEnum
|
importJSON importTOML warn warnIf warnIfNot throwIf throwIfNot checkListOfEnum
|
||||||
info showWarnings nixpkgsVersion version isInOldestRelease
|
info showWarnings nixpkgsVersion version isInOldestRelease
|
||||||
mod compare splitByAndCompare
|
mod compare splitByAndCompare seq deepSeq lessThan add sub
|
||||||
functionArgs setFunctionArgs isFunction toFunction mirrorFunctionArgs
|
functionArgs setFunctionArgs isFunction toFunction mirrorFunctionArgs
|
||||||
toHexString toBaseDigits inPureEvalMode;
|
toHexString toBaseDigits inPureEvalMode isBool isInt pathExists
|
||||||
|
genericClosure readFile;
|
||||||
inherit (self.fixedPoints) fix fix' converge extends composeExtensions
|
inherit (self.fixedPoints) fix fix' converge extends composeExtensions
|
||||||
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
|
composeManyExtensions makeExtensible makeExtensibleWithCustomName;
|
||||||
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
|
inherit (self.attrsets) attrByPath hasAttrByPath setAttrByPath
|
||||||
getAttrFromPath attrVals attrValues getAttrs catAttrs filterAttrs
|
getAttrFromPath attrVals attrNames attrValues getAttrs catAttrs filterAttrs
|
||||||
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
|
filterAttrsRecursive foldlAttrs foldAttrs collect nameValuePair mapAttrs
|
||||||
mapAttrs' mapAttrsToList attrsToList concatMapAttrs mapAttrsRecursive
|
mapAttrs' mapAttrsToList attrsToList concatMapAttrs mapAttrsRecursive
|
||||||
mapAttrsRecursiveCond genAttrs isDerivation toDerivation optionalAttrs
|
mapAttrsRecursiveCond genAttrs isDerivation toDerivation optionalAttrs
|
||||||
|
@ -87,14 +86,16 @@ let
|
||||||
recursiveUpdate matchAttrs mergeAttrsList overrideExisting showAttrPath getOutput
|
recursiveUpdate matchAttrs mergeAttrsList overrideExisting showAttrPath getOutput
|
||||||
getBin getLib getDev getMan chooseDevOutputs zipWithNames zip
|
getBin getLib getDev getMan chooseDevOutputs zipWithNames zip
|
||||||
recurseIntoAttrs dontRecurseIntoAttrs cartesianProduct cartesianProductOfSets
|
recurseIntoAttrs dontRecurseIntoAttrs cartesianProduct cartesianProductOfSets
|
||||||
mapCartesianProduct updateManyAttrsByPath;
|
mapCartesianProduct updateManyAttrsByPath listToAttrs hasAttr getAttr isAttrs intersectAttrs removeAttrs;
|
||||||
inherit (self.lists) singleton forEach foldr fold foldl foldl' imap0 imap1
|
inherit (self.lists) singleton forEach map foldr fold foldl foldl' imap0 imap1
|
||||||
ifilter0 concatMap flatten remove findSingle findFirst any all count
|
filter ifilter0 concatMap flatten remove findSingle findFirst any all count
|
||||||
optional optionals toList range replicate partition zipListsWith zipLists
|
optional optionals toList range replicate partition zipListsWith zipLists
|
||||||
reverseList listDfs toposort sort sortOn naturalSort compareLists take
|
reverseList listDfs toposort sort sortOn naturalSort compareLists take
|
||||||
drop sublist last init crossLists unique allUnique intersectLists
|
drop sublist last init crossLists unique allUnique intersectLists
|
||||||
subtractLists mutuallyExclusive groupBy groupBy';
|
subtractLists mutuallyExclusive groupBy groupBy' concatLists genList
|
||||||
|
length head tail elem elemAt isList;
|
||||||
inherit (self.strings) concatStrings concatMapStrings concatImapStrings
|
inherit (self.strings) concatStrings concatMapStrings concatImapStrings
|
||||||
|
stringLength substring isString replaceStrings
|
||||||
intersperse concatStringsSep concatMapStringsSep
|
intersperse concatStringsSep concatMapStringsSep
|
||||||
concatImapStringsSep concatLines makeSearchPath makeSearchPathOutput
|
concatImapStringsSep concatLines makeSearchPath makeSearchPathOutput
|
||||||
makeLibraryPath makeIncludePath makeBinPath optionalString
|
makeLibraryPath makeIncludePath makeBinPath optionalString
|
||||||
|
@ -105,7 +106,7 @@ let
|
||||||
escapeRegex escapeURL escapeXML replaceChars lowerChars
|
escapeRegex escapeURL escapeXML replaceChars lowerChars
|
||||||
upperChars toLower toUpper addContextFrom splitString
|
upperChars toLower toUpper addContextFrom splitString
|
||||||
removePrefix removeSuffix versionOlder versionAtLeast
|
removePrefix removeSuffix versionOlder versionAtLeast
|
||||||
getName getVersion
|
getName getVersion match split
|
||||||
cmakeOptionType cmakeBool cmakeFeature
|
cmakeOptionType cmakeBool cmakeFeature
|
||||||
mesonOption mesonBool mesonEnable
|
mesonOption mesonBool mesonEnable
|
||||||
nameFromURL enableFeature enableFeatureAs withFeature
|
nameFromURL enableFeature enableFeatureAs withFeature
|
||||||
|
|
21
third_party/nixpkgs/lib/licenses.nix
vendored
21
third_party/nixpkgs/lib/licenses.nix
vendored
|
@ -530,6 +530,13 @@ in mkLicense lset) ({
|
||||||
fullName = "Unspecified free software license";
|
fullName = "Unspecified free software license";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
fsl11Mit = {
|
||||||
|
fullName = "Functional Source License, Version 1.1, MIT Future License";
|
||||||
|
url = "https://fsl.software/FSL-1.1-MIT.template.md";
|
||||||
|
free = false;
|
||||||
|
redistributable = true;
|
||||||
|
};
|
||||||
|
|
||||||
ftl = {
|
ftl = {
|
||||||
spdxId = "FTL";
|
spdxId = "FTL";
|
||||||
fullName = "Freetype Project License";
|
fullName = "Freetype Project License";
|
||||||
|
@ -1278,11 +1285,21 @@ in mkLicense lset) ({
|
||||||
fullName = "xinetd License";
|
fullName = "xinetd License";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
xskat = {
|
||||||
|
spdxId = "XSkat";
|
||||||
|
fullName = "XSkat License";
|
||||||
|
};
|
||||||
|
|
||||||
zlib = {
|
zlib = {
|
||||||
spdxId = "Zlib";
|
spdxId = "Zlib";
|
||||||
fullName = "zlib License";
|
fullName = "zlib License";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
zsh = {
|
||||||
|
url = "https://github.com/zsh-users/zsh/blob/master/LICENCE";
|
||||||
|
fulllName = "Zsh License";
|
||||||
|
};
|
||||||
|
|
||||||
zpl20 = {
|
zpl20 = {
|
||||||
spdxId = "ZPL-2.0";
|
spdxId = "ZPL-2.0";
|
||||||
fullName = "Zope Public License 2.0";
|
fullName = "Zope Public License 2.0";
|
||||||
|
@ -1293,10 +1310,6 @@ in mkLicense lset) ({
|
||||||
fullName = "Zope Public License 2.1";
|
fullName = "Zope Public License 2.1";
|
||||||
};
|
};
|
||||||
|
|
||||||
xskat = {
|
|
||||||
spdxId = "XSkat";
|
|
||||||
fullName = "XSkat License";
|
|
||||||
};
|
|
||||||
} // {
|
} // {
|
||||||
# TODO: remove legacy aliases
|
# TODO: remove legacy aliases
|
||||||
apsl10 = {
|
apsl10 = {
|
||||||
|
|
12
third_party/nixpkgs/lib/systems/examples.nix
vendored
12
third_party/nixpkgs/lib/systems/examples.nix
vendored
|
@ -60,23 +60,23 @@ rec {
|
||||||
armv7a-android-prebuilt = {
|
armv7a-android-prebuilt = {
|
||||||
config = "armv7a-unknown-linux-androideabi";
|
config = "armv7a-unknown-linux-androideabi";
|
||||||
rust.rustcTarget = "armv7-linux-androideabi";
|
rust.rustcTarget = "armv7-linux-androideabi";
|
||||||
sdkVer = "28";
|
sdkVer = "33";
|
||||||
ndkVer = "24";
|
ndkVer = "26";
|
||||||
useAndroidPrebuilt = true;
|
useAndroidPrebuilt = true;
|
||||||
} // platforms.armv7a-android;
|
} // platforms.armv7a-android;
|
||||||
|
|
||||||
aarch64-android-prebuilt = {
|
aarch64-android-prebuilt = {
|
||||||
config = "aarch64-unknown-linux-android";
|
config = "aarch64-unknown-linux-android";
|
||||||
rust.rustcTarget = "aarch64-linux-android";
|
rust.rustcTarget = "aarch64-linux-android";
|
||||||
sdkVer = "28";
|
sdkVer = "33";
|
||||||
ndkVer = "24";
|
ndkVer = "26";
|
||||||
useAndroidPrebuilt = true;
|
useAndroidPrebuilt = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
aarch64-android = {
|
aarch64-android = {
|
||||||
config = "aarch64-unknown-linux-android";
|
config = "aarch64-unknown-linux-android";
|
||||||
sdkVer = "30";
|
sdkVer = "33";
|
||||||
ndkVer = "24";
|
ndkVer = "26";
|
||||||
libc = "bionic";
|
libc = "bionic";
|
||||||
useAndroidPrebuilt = false;
|
useAndroidPrebuilt = false;
|
||||||
useLLVM = true;
|
useLLVM = true;
|
||||||
|
|
145
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
145
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
|
@ -1551,12 +1551,6 @@
|
||||||
githubId = 56009;
|
githubId = 56009;
|
||||||
name = "Arcadio Rubio García";
|
name = "Arcadio Rubio García";
|
||||||
};
|
};
|
||||||
arcayr = {
|
|
||||||
email = "nix@arcayr.online";
|
|
||||||
github = "arcayr";
|
|
||||||
githubId = 11192354;
|
|
||||||
name = "Elliot Speck";
|
|
||||||
};
|
|
||||||
archer-65 = {
|
archer-65 = {
|
||||||
email = "mario.liguori.056@gmail.com";
|
email = "mario.liguori.056@gmail.com";
|
||||||
github = "archer-65";
|
github = "archer-65";
|
||||||
|
@ -1983,6 +1977,12 @@
|
||||||
githubId = 12958979;
|
githubId = 12958979;
|
||||||
name = "Mika Naylor";
|
name = "Mika Naylor";
|
||||||
};
|
};
|
||||||
|
autra = {
|
||||||
|
email = "augustin.trancart@gmail.com";
|
||||||
|
github = "autra";
|
||||||
|
githubId = 1576598;
|
||||||
|
name = "Augustin Trancart";
|
||||||
|
};
|
||||||
autrimpo = {
|
autrimpo = {
|
||||||
email = "michal@koutensky.net";
|
email = "michal@koutensky.net";
|
||||||
github = "autrimpo";
|
github = "autrimpo";
|
||||||
|
@ -3962,6 +3962,12 @@
|
||||||
githubId = 40290417;
|
githubId = 40290417;
|
||||||
name = "Seb Blair";
|
name = "Seb Blair";
|
||||||
};
|
};
|
||||||
|
CompileTime = {
|
||||||
|
email = "socialcoms@posteo.de";
|
||||||
|
github = "Compile-Time";
|
||||||
|
githubId = 18414241;
|
||||||
|
name = "Andreas Erdes";
|
||||||
|
};
|
||||||
confus = {
|
confus = {
|
||||||
email = "con-f-use@gmx.net";
|
email = "con-f-use@gmx.net";
|
||||||
github = "con-f-use";
|
github = "con-f-use";
|
||||||
|
@ -6506,6 +6512,18 @@
|
||||||
githubId = 8182846;
|
githubId = 8182846;
|
||||||
name = "Francesco Gazzetta";
|
name = "Francesco Gazzetta";
|
||||||
};
|
};
|
||||||
|
fidgetingbits = {
|
||||||
|
name = "fidgetingbits";
|
||||||
|
email = "nixpkgs.xe7au@passmail.net";
|
||||||
|
matrix = "@fidgetingbits:matrix.org";
|
||||||
|
github = "fidgetingbits";
|
||||||
|
githubId = 13679876;
|
||||||
|
keys = [
|
||||||
|
{ fingerprint = "U+vNNrQxJRj3NPu9EoD0LFZssRbk6LBg4YPN5nFvQvs"; }
|
||||||
|
{ fingerprint = "lX5ewVcaQLxuzqI92gujs3jFNki4d8qF+PATexMijoQ"; }
|
||||||
|
{ fingerprint = "elY15tXap1tddxbBVoUoAioe1u0RDWti5rc9cauSmwo"; }
|
||||||
|
];
|
||||||
|
};
|
||||||
figsoda = {
|
figsoda = {
|
||||||
email = "figsoda@pm.me";
|
email = "figsoda@pm.me";
|
||||||
matrix = "@figsoda:matrix.org";
|
matrix = "@figsoda:matrix.org";
|
||||||
|
@ -6812,6 +6830,14 @@
|
||||||
matrix = "@frogamic:beeper.com";
|
matrix = "@frogamic:beeper.com";
|
||||||
keys = [ { fingerprint = "779A 7CA8 D51C C53A 9C51 43F7 AAE0 70F0 67EC 00A5"; } ];
|
keys = [ { fingerprint = "779A 7CA8 D51C C53A 9C51 43F7 AAE0 70F0 67EC 00A5"; } ];
|
||||||
};
|
};
|
||||||
|
frontear = {
|
||||||
|
name = "Ali Rizvi";
|
||||||
|
email = "perm-iterate-0b@icloud.com";
|
||||||
|
matrix = "@frontear:matrix.org";
|
||||||
|
github = "Frontear";
|
||||||
|
githubId = 31909298;
|
||||||
|
keys = [ { fingerprint = "C170 11B7 C0AA BB3F 7415 022C BCB5 CEFD E222 82F5"; } ];
|
||||||
|
};
|
||||||
frontsideair = {
|
frontsideair = {
|
||||||
email = "photonia@gmail.com";
|
email = "photonia@gmail.com";
|
||||||
github = "frontsideair";
|
github = "frontsideair";
|
||||||
|
@ -7373,12 +7399,6 @@
|
||||||
github = "gmacon";
|
github = "gmacon";
|
||||||
githubId = 238853;
|
githubId = 238853;
|
||||||
};
|
};
|
||||||
gmemstr = {
|
|
||||||
email = "git@gmem.ca";
|
|
||||||
github = "gmemstr";
|
|
||||||
githubId = 1878840;
|
|
||||||
name = "Gabriel Simmer";
|
|
||||||
};
|
|
||||||
gnxlxnxx = {
|
gnxlxnxx = {
|
||||||
email = "gnxlxnxx@web.de";
|
email = "gnxlxnxx@web.de";
|
||||||
github = "gnxlxnxx";
|
github = "gnxlxnxx";
|
||||||
|
@ -8399,6 +8419,12 @@
|
||||||
githubId = 7403236;
|
githubId = 7403236;
|
||||||
name = "Markus J. Ankenbrand";
|
name = "Markus J. Ankenbrand";
|
||||||
};
|
};
|
||||||
|
iivusly = {
|
||||||
|
email = "iivusly@icloud.com";
|
||||||
|
github = "iivusly";
|
||||||
|
githubId = 52052910;
|
||||||
|
name = "iivusly";
|
||||||
|
};
|
||||||
ikervagyok = {
|
ikervagyok = {
|
||||||
email = "ikervagyok@gmail.com";
|
email = "ikervagyok@gmail.com";
|
||||||
github = "ikervagyok";
|
github = "ikervagyok";
|
||||||
|
@ -9822,6 +9848,11 @@
|
||||||
githubId = 1918771;
|
githubId = 1918771;
|
||||||
name = "Joe Doyle";
|
name = "Joe Doyle";
|
||||||
};
|
};
|
||||||
|
jpds = {
|
||||||
|
github = "jpds";
|
||||||
|
githubId = 29158971;
|
||||||
|
name = "Jonathan Davies";
|
||||||
|
};
|
||||||
jpentland = {
|
jpentland = {
|
||||||
email = "joe.pentland@gmail.com";
|
email = "joe.pentland@gmail.com";
|
||||||
github = "jpentland";
|
github = "jpentland";
|
||||||
|
@ -9957,6 +9988,12 @@
|
||||||
githubId = 5802758;
|
githubId = 5802758;
|
||||||
name = "Joshua Trees";
|
name = "Joshua Trees";
|
||||||
};
|
};
|
||||||
|
jtszalay = {
|
||||||
|
email = "jamestszalay@gmail.com";
|
||||||
|
github = "jtszalay";
|
||||||
|
githubId = 589502;
|
||||||
|
name = "James Szalay";
|
||||||
|
};
|
||||||
juancmuller = {
|
juancmuller = {
|
||||||
email = "nix@juancmuller.com";
|
email = "nix@juancmuller.com";
|
||||||
githubId = 208500;
|
githubId = 208500;
|
||||||
|
@ -10574,13 +10611,6 @@
|
||||||
name = "Kat Inskip";
|
name = "Kat Inskip";
|
||||||
keys = [ { fingerprint = "9CC6 44B5 69CD A59B C874 C4C9 E8DD E3ED 1C90 F3A0"; } ];
|
keys = [ { fingerprint = "9CC6 44B5 69CD A59B C874 C4C9 E8DD E3ED 1C90 F3A0"; } ];
|
||||||
};
|
};
|
||||||
kiwi = {
|
|
||||||
email = "envy1988@gmail.com";
|
|
||||||
github = "Kiwi";
|
|
||||||
githubId = 35715;
|
|
||||||
name = "Robert Djubek";
|
|
||||||
keys = [ { fingerprint = "8992 44FC D291 5CA2 0A97 802C 156C 88A5 B0A0 4B2A"; } ];
|
|
||||||
};
|
|
||||||
kjeremy = {
|
kjeremy = {
|
||||||
email = "kjeremy@gmail.com";
|
email = "kjeremy@gmail.com";
|
||||||
name = "Jeremy Kolb";
|
name = "Jeremy Kolb";
|
||||||
|
@ -10593,6 +10623,11 @@
|
||||||
github = "kkharji";
|
github = "kkharji";
|
||||||
githubId = 65782666;
|
githubId = 65782666;
|
||||||
};
|
};
|
||||||
|
kkoniuszy = {
|
||||||
|
name = "Kacper Koniuszy";
|
||||||
|
github = "kkoniuszy";
|
||||||
|
githubId = 120419423;
|
||||||
|
};
|
||||||
klden = {
|
klden = {
|
||||||
name = "Kenzyme Le";
|
name = "Kenzyme Le";
|
||||||
email = "kl@kenzymele.com";
|
email = "kl@kenzymele.com";
|
||||||
|
@ -11297,12 +11332,6 @@
|
||||||
github = "LogicalOverflow";
|
github = "LogicalOverflow";
|
||||||
githubId = 5919957;
|
githubId = 5919957;
|
||||||
};
|
};
|
||||||
lheckemann = {
|
|
||||||
email = "git@sphalerite.org";
|
|
||||||
github = "lheckemann";
|
|
||||||
githubId = 341954;
|
|
||||||
name = "Linus Heckemann";
|
|
||||||
};
|
|
||||||
lhvwb = {
|
lhvwb = {
|
||||||
email = "nathaniel.baxter@gmail.com";
|
email = "nathaniel.baxter@gmail.com";
|
||||||
github = "nathanielbaxter";
|
github = "nathanielbaxter";
|
||||||
|
@ -11635,6 +11664,13 @@
|
||||||
githubId = 10626;
|
githubId = 10626;
|
||||||
name = "Andreas Wagner";
|
name = "Andreas Wagner";
|
||||||
};
|
};
|
||||||
|
lpchaim = {
|
||||||
|
email = "lpchaim@gmail.comm";
|
||||||
|
matrix = "@lpchaim:matrix.org";
|
||||||
|
github = "lpchaim";
|
||||||
|
githubId = 4030336;
|
||||||
|
name = "Lucas Chaim";
|
||||||
|
};
|
||||||
lpostula = {
|
lpostula = {
|
||||||
email = "lois@postu.la";
|
email = "lois@postu.la";
|
||||||
github = "loispostula";
|
github = "loispostula";
|
||||||
|
@ -13137,6 +13173,12 @@
|
||||||
githubId = 3269878;
|
githubId = 3269878;
|
||||||
name = "Miguel Madrid Mencía";
|
name = "Miguel Madrid Mencía";
|
||||||
};
|
};
|
||||||
|
mimvoid = {
|
||||||
|
github = "mimvoid";
|
||||||
|
githubId = 153698678;
|
||||||
|
email = "mimvoid@proton.me";
|
||||||
|
name = "mimvoid";
|
||||||
|
};
|
||||||
mindavi = {
|
mindavi = {
|
||||||
email = "rol3517@gmail.com";
|
email = "rol3517@gmail.com";
|
||||||
github = "Mindavi";
|
github = "Mindavi";
|
||||||
|
@ -13413,14 +13455,6 @@
|
||||||
githubId = 754512;
|
githubId = 754512;
|
||||||
name = "Mogria";
|
name = "Mogria";
|
||||||
};
|
};
|
||||||
mohe2015 = {
|
|
||||||
name = "Moritz Hedtke";
|
|
||||||
email = "Moritz.Hedtke@t-online.de";
|
|
||||||
matrix = "@moritz.hedtke:matrix.org";
|
|
||||||
github = "mohe2015";
|
|
||||||
githubId = 13287984;
|
|
||||||
keys = [ { fingerprint = "1248 D3E1 1D11 4A85 75C9 8934 6794 D45A 488C 2EDE"; } ];
|
|
||||||
};
|
|
||||||
momeemt = {
|
momeemt = {
|
||||||
name = "Mutsuha Asada";
|
name = "Mutsuha Asada";
|
||||||
email = "me@momee.mt";
|
email = "me@momee.mt";
|
||||||
|
@ -15196,6 +15230,12 @@
|
||||||
githubId = 79252025;
|
githubId = 79252025;
|
||||||
name = "Nicolas Benes";
|
name = "Nicolas Benes";
|
||||||
};
|
};
|
||||||
|
panky = {
|
||||||
|
email = "dev@pankajraghav.com";
|
||||||
|
github = "Panky-codes";
|
||||||
|
githubId = 33182938;
|
||||||
|
name = "Pankaj";
|
||||||
|
};
|
||||||
paperdigits = {
|
paperdigits = {
|
||||||
email = "mica@silentumbrella.com";
|
email = "mica@silentumbrella.com";
|
||||||
github = "paperdigits";
|
github = "paperdigits";
|
||||||
|
@ -15798,6 +15838,12 @@
|
||||||
githubId = 4303706;
|
githubId = 4303706;
|
||||||
keys = [ { fingerprint = "B00F E582 FD3F 0732 EA48 3937 F558 14E4 D687 4375"; } ];
|
keys = [ { fingerprint = "B00F E582 FD3F 0732 EA48 3937 F558 14E4 D687 4375"; } ];
|
||||||
};
|
};
|
||||||
|
pladypus = {
|
||||||
|
name = "Peter Loftus";
|
||||||
|
email = "loftusp5976+nixpkgs@gmail.com";
|
||||||
|
github = "pladypus";
|
||||||
|
githubId = 56337621;
|
||||||
|
};
|
||||||
plchldr = {
|
plchldr = {
|
||||||
email = "mail@oddco.de";
|
email = "mail@oddco.de";
|
||||||
github = "plchldr";
|
github = "plchldr";
|
||||||
|
@ -17688,13 +17734,6 @@
|
||||||
githubId = 226872;
|
githubId = 226872;
|
||||||
name = "Samuel Ainsworth";
|
name = "Samuel Ainsworth";
|
||||||
};
|
};
|
||||||
samueldr = {
|
|
||||||
email = "samuel@dionne-riel.com";
|
|
||||||
matrix = "@samueldr:matrix.org";
|
|
||||||
github = "samueldr";
|
|
||||||
githubId = 132835;
|
|
||||||
name = "Samuel Dionne-Riel";
|
|
||||||
};
|
|
||||||
samuelefacenda = {
|
samuelefacenda = {
|
||||||
name = "Samuele Facenda";
|
name = "Samuele Facenda";
|
||||||
email = "samuele.facenda@gmail.com";
|
email = "samuele.facenda@gmail.com";
|
||||||
|
@ -18798,14 +18837,6 @@
|
||||||
githubId = 53029739;
|
githubId = 53029739;
|
||||||
name = "Joshua Ortiz";
|
name = "Joshua Ortiz";
|
||||||
};
|
};
|
||||||
Sorixelle = {
|
|
||||||
email = "ruby+nixpkgs@srxl.me";
|
|
||||||
matrix = "@ruby:isincredibly.gay";
|
|
||||||
name = "Ruby Iris Juric";
|
|
||||||
github = "Sorixelle";
|
|
||||||
githubId = 38685302;
|
|
||||||
keys = [ { fingerprint = "2D76 76C7 A28E 16FC 75C7 268D 1B55 6ED8 4B0E 303A"; } ];
|
|
||||||
};
|
|
||||||
sorki = {
|
sorki = {
|
||||||
email = "srk@48.io";
|
email = "srk@48.io";
|
||||||
github = "sorki";
|
github = "sorki";
|
||||||
|
@ -19535,6 +19566,12 @@
|
||||||
githubId = 870673;
|
githubId = 870673;
|
||||||
name = "Takuo Yonezawa";
|
name = "Takuo Yonezawa";
|
||||||
};
|
};
|
||||||
|
TakWolf = {
|
||||||
|
email = "takwolf@foxmail.com";
|
||||||
|
github = "TakWolf";
|
||||||
|
githubId = 6064962;
|
||||||
|
name = "TakWolf";
|
||||||
|
};
|
||||||
talkara = {
|
talkara = {
|
||||||
email = "taito.horiuchi@relexsolutions.com";
|
email = "taito.horiuchi@relexsolutions.com";
|
||||||
github = "talkara";
|
github = "talkara";
|
||||||
|
@ -20025,12 +20062,6 @@
|
||||||
githubId = 1391883;
|
githubId = 1391883;
|
||||||
name = "Tom Hall";
|
name = "Tom Hall";
|
||||||
};
|
};
|
||||||
thubrecht = {
|
|
||||||
email = "tom@hubrecht.ovh";
|
|
||||||
github = "Tom-Hubrecht";
|
|
||||||
githubId = 26650391;
|
|
||||||
name = "Tom Hubrecht";
|
|
||||||
};
|
|
||||||
Thunderbottom = {
|
Thunderbottom = {
|
||||||
email = "chinmaydpai@gmail.com";
|
email = "chinmaydpai@gmail.com";
|
||||||
github = "Thunderbottom";
|
github = "Thunderbottom";
|
||||||
|
@ -21561,12 +21592,6 @@
|
||||||
githubId = 11740700;
|
githubId = 11740700;
|
||||||
name = "Andrei Pampu";
|
name = "Andrei Pampu";
|
||||||
};
|
};
|
||||||
wolfangaukang = {
|
|
||||||
email = "clone.gleeful135+nixpkgs@anonaddy.me";
|
|
||||||
github = "WolfangAukang";
|
|
||||||
githubId = 8378365;
|
|
||||||
name = "P. R. d. O.";
|
|
||||||
};
|
|
||||||
wolfgangwalther = {
|
wolfgangwalther = {
|
||||||
name = "Wolfgang Walther";
|
name = "Wolfgang Walther";
|
||||||
email = "walther@technowledgy.de";
|
email = "walther@technowledgy.de";
|
||||||
|
|
|
@ -677,12 +677,6 @@ with lib.maintainers;
|
||||||
shortName = "Mercury Employees";
|
shortName = "Mercury Employees";
|
||||||
};
|
};
|
||||||
|
|
||||||
mobile = {
|
|
||||||
members = [ samueldr ];
|
|
||||||
scope = "Maintain Mobile NixOS.";
|
|
||||||
shortName = "Mobile";
|
|
||||||
};
|
|
||||||
|
|
||||||
nix = {
|
nix = {
|
||||||
members = [
|
members = [
|
||||||
eelco
|
eelco
|
||||||
|
|
|
@ -453,7 +453,7 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
|
|
||||||
The new names are as follows:
|
The new names are as follows:
|
||||||
- `bindAddress`: [`services.keycloak.settings.http-host`](#opt-services.keycloak.settings.http-host)
|
- `bindAddress`: [`services.keycloak.settings.http-host`](#opt-services.keycloak.settings.http-host)
|
||||||
- `forceBackendUrlToFrontendUrl`: [`services.keycloak.settings.hostname-strict-backchannel`](#opt-services.keycloak.settings.hostname-strict-backchannel)
|
- `forceBackendUrlToFrontendUrl`: `services.keycloak.settings.hostname-strict-backchannel`
|
||||||
- `httpPort`: [`services.keycloak.settings.http-port`](#opt-services.keycloak.settings.http-port)
|
- `httpPort`: [`services.keycloak.settings.http-port`](#opt-services.keycloak.settings.http-port)
|
||||||
- `httpsPort`: [`services.keycloak.settings.https-port`](#opt-services.keycloak.settings.https-port)
|
- `httpsPort`: [`services.keycloak.settings.https-port`](#opt-services.keycloak.settings.https-port)
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ In addition to numerous new and upgraded packages, this release has the followin
|
||||||
<!-- Please keep entries alphabetically sorted. -->
|
<!-- Please keep entries alphabetically sorted. -->
|
||||||
|
|
||||||
- [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
|
- [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
|
||||||
The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server software.
|
The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the ankisyncd software.
|
||||||
|
|
||||||
- [ALVR](https://github.com/alvr-org/alvr), a VR desktop streamer. Available as [programs.alvr](#opt-programs.alvr.enable).
|
- [ALVR](https://github.com/alvr-org/alvr), a VR desktop streamer. Available as [programs.alvr](#opt-programs.alvr.enable).
|
||||||
|
|
||||||
|
|
|
@ -23,13 +23,35 @@
|
||||||
|
|
||||||
## Backward Incompatibilities {#sec-release-24.11-incompatibilities}
|
## Backward Incompatibilities {#sec-release-24.11-incompatibilities}
|
||||||
|
|
||||||
|
- `transmission` package has been aliased with a `trace` warning to `transmission_3`. Since [Transmission 4 has been released last year](https://github.com/transmission/transmission/releases/tag/4.0.0), and Transmission 3 will eventually go away, it was decided perform this warning alias to make people aware of the new version. The `services.transmission.package` defaults to `transmission_3` as well because the upgrade can cause data loss in certain specific usage patterns (examples: [#5153](https://github.com/transmission/transmission/issues/5153), [#6796](https://github.com/transmission/transmission/issues/6796)). Please make sure to back up to your data directory per your usage:
|
||||||
|
- `transmission-gtk`: `~/.config/transmission`
|
||||||
|
- `transmission-daemon` using NixOS module: `${config.services.transmission.home}/.config/transmission-daemon` (defaults to `/var/lib/transmission/.config/transmission-daemon`)
|
||||||
|
|
||||||
- `androidenv.androidPkgs_9_0` has been removed, and replaced with `androidenv.androidPkgs` for a more complete Android SDK including support for Android 9 and later.
|
- `androidenv.androidPkgs_9_0` has been removed, and replaced with `androidenv.androidPkgs` for a more complete Android SDK including support for Android 9 and later.
|
||||||
|
|
||||||
|
- `wstunnel` has had a major version upgrade that entailed rewriting the program in Rust.
|
||||||
|
The module was updated to accommodate for breaking changes.
|
||||||
|
Breaking changes to the module API were minimised as much as possible,
|
||||||
|
but some were nonetheless inevitable due to changes in the upstream CLI.
|
||||||
|
Certain options were moved from separate CLI arguments into the forward specifications,
|
||||||
|
and those options were also removed from the module's API,
|
||||||
|
please consult the wstunnel man page for more detail.
|
||||||
|
Also be aware that if you have set additional options in `services.wstunnel.{clients,servers}.<name>.extraArgs`,
|
||||||
|
that those might have been removed or modified upstream.
|
||||||
|
|
||||||
|
- `clang-tools_<version>` packages have been moved into `llvmPackages_<version>` (i.e. `clang-tools_18` is now `llvmPackages_18.clang-tools`).
|
||||||
|
- For convenience, the top-level `clang-tools` attribute remains and is now bound to `llvmPackages.clang-tools`.
|
||||||
|
- Top-level `clang_tools_<version>` attributes are now aliases; these will be removed in a future release.
|
||||||
|
|
||||||
- `nginx` package no longer includes `gd` and `geoip` dependencies. For enabling it, override `nginx` package with the optionals `withImageFilter` and `withGeoIP`.
|
- `nginx` package no longer includes `gd` and `geoip` dependencies. For enabling it, override `nginx` package with the optionals `withImageFilter` and `withGeoIP`.
|
||||||
|
|
||||||
- `openssh` and `openssh_hpn` are now compiled without Kerberos 5 / GSSAPI support in an effort to reduce the attack surface of the components for the majority of users. Users needing this support can
|
- `openssh` and `openssh_hpn` are now compiled without Kerberos 5 / GSSAPI support in an effort to reduce the attack surface of the components for the majority of users. Users needing this support can
|
||||||
use the new `opensshWithKerberos` and `openssh_hpnWithKerberos` flavors (e.g. `programs.ssh.package = pkgs.openssh_gssapi`).
|
use the new `opensshWithKerberos` and `openssh_hpnWithKerberos` flavors (e.g. `programs.ssh.package = pkgs.openssh_gssapi`).
|
||||||
|
|
||||||
|
- `security.ipa.ipaHostname` now defaults to the value of `networking.fqdn` if
|
||||||
|
it is set, instead of the previous hardcoded default of
|
||||||
|
`${networking.hostName}.${security.ipa.domain}`.
|
||||||
|
|
||||||
- `nvimpager` was updated to version 0.13.0, which changes the order of user and
|
- `nvimpager` was updated to version 0.13.0, which changes the order of user and
|
||||||
nvimpager settings: user commands in `-c` and `--cmd` now override the
|
nvimpager settings: user commands in `-c` and `--cmd` now override the
|
||||||
respective default settings because they are executed later.
|
respective default settings because they are executed later.
|
||||||
|
@ -53,6 +75,10 @@
|
||||||
before changing the package to `pkgs.stalwart-mail` in
|
before changing the package to `pkgs.stalwart-mail` in
|
||||||
[`services.stalwart-mail.package`](#opt-services.stalwart-mail.package).
|
[`services.stalwart-mail.package`](#opt-services.stalwart-mail.package).
|
||||||
|
|
||||||
|
- `androidndkPkgs` has been updated to `androidndkPkgs_26`.
|
||||||
|
|
||||||
|
- Android NDK version 26 and SDK version 33 are now the default versions used for cross compilation to android.
|
||||||
|
|
||||||
- `haskell.lib.compose.justStaticExecutables` now disallows references to GHC in the
|
- `haskell.lib.compose.justStaticExecutables` now disallows references to GHC in the
|
||||||
output by default, to alert users to closure size issues caused by
|
output by default, to alert users to closure size issues caused by
|
||||||
[#164630](https://github.com/NixOS/nixpkgs/issues/164630). See ["Packaging
|
[#164630](https://github.com/NixOS/nixpkgs/issues/164630). See ["Packaging
|
||||||
|
@ -61,10 +87,19 @@
|
||||||
for information on working around `output '...' is not allowed to refer to
|
for information on working around `output '...' is not allowed to refer to
|
||||||
the following paths` errors caused by this change.
|
the following paths` errors caused by this change.
|
||||||
|
|
||||||
|
- The `stalwart-mail` service now runs under the `stalwart-mail` system user
|
||||||
|
instead of a dynamically created one via `DynamicUser`, to avoid automatic
|
||||||
|
ownership changes on its large file store each time the service was started.
|
||||||
|
This change requires to manually move the state directory from
|
||||||
|
`/var/lib/private/stalwart-mail` to `/var/lib/stalwart-mail` and to
|
||||||
|
change the ownership of the directory and its content to `stalwart-mail`.
|
||||||
|
|
||||||
- The `stalwart-mail` module now uses RocksDB as the default storage backend
|
- The `stalwart-mail` module now uses RocksDB as the default storage backend
|
||||||
for `stateVersion` ≥ 24.11. (It was previously using SQLite for structured
|
for `stateVersion` ≥ 24.11. (It was previously using SQLite for structured
|
||||||
data and the filesystem for blobs).
|
data and the filesystem for blobs).
|
||||||
|
|
||||||
|
- `libe57format` has been updated to `>= 3.0.0`, which contains some backward-incompatible API changes. See the [release note](https://github.com/asmaloney/libE57Format/releases/tag/v3.0.0) for more details.
|
||||||
|
|
||||||
- `zx` was updated to v8, which introduces several breaking changes.
|
- `zx` was updated to v8, which introduces several breaking changes.
|
||||||
See the [v8 changelog](https://github.com/google/zx/releases/tag/8.0.0) for more information.
|
See the [v8 changelog](https://github.com/google/zx/releases/tag/8.0.0) for more information.
|
||||||
|
|
||||||
|
@ -77,6 +112,9 @@
|
||||||
services.portunus.ldap.package = pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; };
|
services.portunus.ldap.package = pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; };
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- `keycloak` was updated to version 25, which introduces new hostname related options.
|
||||||
|
See [Upgrading Guide](https://www.keycloak.org/docs/25.0.1/upgrading/#migrating-to-25-0-0) for instructions.
|
||||||
|
|
||||||
- The `tracy` package no longer works on X11, since it's moved to Wayland
|
- The `tracy` package no longer works on X11, since it's moved to Wayland
|
||||||
support, which is the intended default behavior by Tracy maintainers.
|
support, which is the intended default behavior by Tracy maintainers.
|
||||||
X11 users have to switch to the new package `tracy-x11`.
|
X11 users have to switch to the new package `tracy-x11`.
|
||||||
|
|
|
@ -603,10 +603,11 @@ let format' = format; in let
|
||||||
${lib.optionalString installBootLoader ''
|
${lib.optionalString installBootLoader ''
|
||||||
# In this throwaway resource, we only have /dev/vda, but the actual VM may refer to another disk for bootloader, e.g. /dev/vdb
|
# In this throwaway resource, we only have /dev/vda, but the actual VM may refer to another disk for bootloader, e.g. /dev/vdb
|
||||||
# Use this option to create a symlink from vda to any arbitrary device you want.
|
# Use this option to create a symlink from vda to any arbitrary device you want.
|
||||||
${optionalString (config.boot.loader.grub.enable && config.boot.loader.grub.device != "/dev/vda") ''
|
${optionalString (config.boot.loader.grub.enable) (lib.concatMapStringsSep " " (device:
|
||||||
mkdir -p $(dirname ${config.boot.loader.grub.device})
|
lib.optionalString (device != "/dev/vda") ''
|
||||||
ln -s /dev/vda ${config.boot.loader.grub.device}
|
mkdir -p "$(dirname ${device})"
|
||||||
''}
|
ln -s /dev/vda ${device}
|
||||||
|
'') config.boot.loader.grub.devices)}
|
||||||
|
|
||||||
# Set up core system link, bootloader (sd-boot, GRUB, uboot, etc.), etc.
|
# Set up core system link, bootloader (sd-boot, GRUB, uboot, etc.), etc.
|
||||||
|
|
||||||
|
|
|
@ -275,7 +275,6 @@ in
|
||||||
chattr +C "$DEVICE" 2>/dev/null || true
|
chattr +C "$DEVICE" 2>/dev/null || true
|
||||||
|
|
||||||
dd if=/dev/zero of="$DEVICE" bs=1M count=${toString sw.size}
|
dd if=/dev/zero of="$DEVICE" bs=1M count=${toString sw.size}
|
||||||
chmod 0600 ${sw.device}
|
|
||||||
${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"}
|
${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"}
|
||||||
fi
|
fi
|
||||||
''}
|
''}
|
||||||
|
@ -292,9 +291,12 @@ in
|
||||||
|
|
||||||
unitConfig.RequiresMountsFor = [ "${dirOf sw.device}" ];
|
unitConfig.RequiresMountsFor = [ "${dirOf sw.device}" ];
|
||||||
unitConfig.DefaultDependencies = false; # needed to prevent a cycle
|
unitConfig.DefaultDependencies = false; # needed to prevent a cycle
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig = {
|
||||||
serviceConfig.RemainAfterExit = sw.randomEncryption.enable;
|
Type = "oneshot";
|
||||||
serviceConfig.ExecStop = optionalString sw.randomEncryption.enable "${pkgs.cryptsetup}/bin/cryptsetup luksClose ${sw.deviceName}";
|
RemainAfterExit = sw.randomEncryption.enable;
|
||||||
|
UMask = "0177";
|
||||||
|
ExecStop = optionalString sw.randomEncryption.enable "${pkgs.cryptsetup}/bin/cryptsetup luksClose ${sw.deviceName}";
|
||||||
|
};
|
||||||
restartIfChanged = false;
|
restartIfChanged = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
x86_64-linux = "/nix/store/azvn85cras6xv4z5j85fiy406f24r1q0-nix-2.18.1";
|
x86_64-linux = "/nix/store/yrsmzlw2lgbknzwic1gy1gmv3l2w1ax8-nix-2.18.3";
|
||||||
i686-linux = "/nix/store/9bnwy7f9h0kzdzmcnjjsjg0aak5waj40-nix-2.18.1";
|
i686-linux = "/nix/store/ds9381l9mlwfaclvqnkzn3jl4qb8m3y1-nix-2.18.3";
|
||||||
aarch64-linux = "/nix/store/hh65xwqm9s040s3cgn9vzcmrxj0sf5ij-nix-2.18.1";
|
aarch64-linux = "/nix/store/hw1zny3f8520zyskmp1qaybv1ir5ilxh-nix-2.18.3";
|
||||||
x86_64-darwin = "/nix/store/6zi5fqzn9n17wrk8r41rhdw4j7jqqsi3-nix-2.18.1";
|
x86_64-darwin = "/nix/store/z08yc4sl1fr65q53wz6pw30h67qafaln-nix-2.18.3";
|
||||||
aarch64-darwin = "/nix/store/0pbq6wzr2f1jgpn5212knyxpwmkjgjah-nix-2.18.1";
|
aarch64-darwin = "/nix/store/p57m7m0wrz8sqxiwinzpwzqzak82zn75-nix-2.18.3";
|
||||||
}
|
}
|
||||||
|
|
|
@ -880,6 +880,7 @@
|
||||||
./services/monitoring/osquery.nix
|
./services/monitoring/osquery.nix
|
||||||
./services/monitoring/parsedmarc.nix
|
./services/monitoring/parsedmarc.nix
|
||||||
./services/monitoring/prometheus/alertmanager-irc-relay.nix
|
./services/monitoring/prometheus/alertmanager-irc-relay.nix
|
||||||
|
./services/monitoring/prometheus/alertmanager-webhook-logger.nix
|
||||||
./services/monitoring/prometheus/alertmanager.nix
|
./services/monitoring/prometheus/alertmanager.nix
|
||||||
./services/monitoring/prometheus/default.nix
|
./services/monitoring/prometheus/default.nix
|
||||||
./services/monitoring/prometheus/exporters.nix
|
./services/monitoring/prometheus/exporters.nix
|
||||||
|
|
|
@ -63,7 +63,7 @@ in
|
||||||
|
|
||||||
systemd = lib.mkIf cfg.systemd.setPath.enable {
|
systemd = lib.mkIf cfg.systemd.setPath.enable {
|
||||||
user.extraConfig = ''
|
user.extraConfig = ''
|
||||||
DefaultEnvironment="PATH=$PATH:/run/current-system/sw/bin:/etc/profiles/per-user/%u/bin:/run/wrappers/bin"
|
DefaultEnvironment="PATH=/run/wrappers/bin:/etc/profiles/per-user/%u/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:$PATH"
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
package = lib.mkPackageOption pkgs "xonsh" {
|
package = lib.mkPackageOption pkgs "xonsh" {
|
||||||
example = "xonsh.override { extraPackages = ps: [ ps.requests ]; }";
|
example = "xonsh.wrapper.override { extraPackages = ps: [ ps.requests ]; }";
|
||||||
};
|
};
|
||||||
|
|
||||||
config = lib.mkOption {
|
config = lib.mkOption {
|
||||||
|
@ -61,17 +61,14 @@ in
|
||||||
aliases['ls'] = _ls_alias
|
aliases['ls'] = _ls_alias
|
||||||
del _ls_alias
|
del _ls_alias
|
||||||
|
|
||||||
|
|
||||||
${cfg.config}
|
${cfg.config}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
environment.systemPackages = [ cfg.package ];
|
environment.systemPackages = [ cfg.package ];
|
||||||
|
|
||||||
environment.shells =
|
environment.shells = [
|
||||||
[ "/run/current-system/sw/bin/xonsh"
|
"/run/current-system/sw/bin/xonsh"
|
||||||
"${cfg.package}/bin/xonsh"
|
"${lib.getExe cfg.package}"
|
||||||
];
|
];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,6 +85,18 @@ in {
|
||||||
description = "Whether to cache credentials.";
|
description = "Whether to cache credentials.";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
ipaHostname = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
example = "myworkstation.example.com";
|
||||||
|
default = if config.networking.domain != null then config.networking.fqdn
|
||||||
|
else "${config.networking.hostName}.${cfg.domain}";
|
||||||
|
defaultText = literalExpression ''
|
||||||
|
if config.networking.domain != null then config.networking.fqdn
|
||||||
|
else "''${networking.hostName}.''${security.ipa.domain}"
|
||||||
|
'';
|
||||||
|
description = "Fully-qualified hostname used to identify this host in the IPA domain.";
|
||||||
|
};
|
||||||
|
|
||||||
ifpAllowedUids = mkOption {
|
ifpAllowedUids = mkOption {
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
default = ["root"];
|
default = ["root"];
|
||||||
|
@ -218,7 +230,7 @@ in {
|
||||||
|
|
||||||
ipa_domain = ${cfg.domain}
|
ipa_domain = ${cfg.domain}
|
||||||
ipa_server = _srv_, ${cfg.server}
|
ipa_server = _srv_, ${cfg.server}
|
||||||
ipa_hostname = ${config.networking.hostName}.${cfg.domain}
|
ipa_hostname = ${cfg.ipaHostname}
|
||||||
|
|
||||||
cache_credentials = ${pyBool cfg.cacheCredentials}
|
cache_credentials = ${pyBool cfg.cacheCredentials}
|
||||||
krb5_store_password_if_offline = ${pyBool cfg.offlinePasswords}
|
krb5_store_password_if_offline = ${pyBool cfg.offlinePasswords}
|
||||||
|
@ -232,7 +244,6 @@ in {
|
||||||
ldap_user_extra_attrs = mail:mail, sn:sn, givenname:givenname, telephoneNumber:telephoneNumber, lock:nsaccountlock
|
ldap_user_extra_attrs = mail:mail, sn:sn, givenname:givenname, telephoneNumber:telephoneNumber, lock:nsaccountlock
|
||||||
|
|
||||||
[sssd]
|
[sssd]
|
||||||
debug_level = 65510
|
|
||||||
services = nss, sudo, pam, ssh, ifp
|
services = nss, sudo, pam, ssh, ifp
|
||||||
domains = ${cfg.domain}
|
domains = ${cfg.domain}
|
||||||
|
|
||||||
|
@ -244,7 +255,6 @@ in {
|
||||||
pam_verbosity = 3
|
pam_verbosity = 3
|
||||||
|
|
||||||
[sudo]
|
[sudo]
|
||||||
debug_level = 65510
|
|
||||||
|
|
||||||
[autofs]
|
[autofs]
|
||||||
|
|
||||||
|
|
|
@ -42,5 +42,5 @@ in with lib; {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
meta.maintainers = [ maintainers.lheckemann ];
|
meta.maintainers = [ ];
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ in {
|
||||||
name = "service-account";
|
name = "service-account";
|
||||||
CN = "system:service-account-signer";
|
CN = "system:service-account-signer";
|
||||||
action = ''
|
action = ''
|
||||||
systemctl reload \
|
systemctl restart \
|
||||||
kube-apiserver.service \
|
kube-apiserver.service \
|
||||||
kube-controller-manager.service
|
kube-controller-manager.service
|
||||||
'';
|
'';
|
||||||
|
|
|
@ -237,6 +237,8 @@ in {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
User = cfg.user;
|
User = cfg.user;
|
||||||
StateDirectory = mkIf (hasPrefix "/var/lib/jenkins" cfg.home) "jenkins";
|
StateDirectory = mkIf (hasPrefix "/var/lib/jenkins" cfg.home) "jenkins";
|
||||||
|
# For (possible) socket use
|
||||||
|
RuntimeDirectory = "jenkins";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -264,9 +264,12 @@ in {
|
||||||
enable = true;
|
enable = true;
|
||||||
package = kdePackages.kwallet-pam;
|
package = kdePackages.kwallet-pam;
|
||||||
};
|
};
|
||||||
kde.kwallet = {
|
kde = {
|
||||||
enable = true;
|
allowNullPassword = true;
|
||||||
package = kdePackages.kwallet-pam;
|
kwallet = {
|
||||||
|
enable = true;
|
||||||
|
package = kdePackages.kwallet-pam;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
kde-fingerprint = lib.mkIf config.services.fprintd.enable { fprintAuth = true; };
|
kde-fingerprint = lib.mkIf config.services.fprintd.enable { fprintAuth = true; };
|
||||||
kde-smartcard = lib.mkIf config.security.pam.p11.enable { p11Auth = true; };
|
kde-smartcard = lib.mkIf config.security.pam.p11.enable { p11Auth = true; };
|
||||||
|
|
|
@ -1,45 +1,52 @@
|
||||||
# GNOME Keyring daemon.
|
# GNOME Keyring daemon.
|
||||||
|
|
||||||
{ config, pkgs, lib, ... }:
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.gnome.gnome-keyring;
|
||||||
|
in
|
||||||
{
|
{
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
maintainers = lib.teams.gnome.members;
|
maintainers = lib.teams.gnome.members;
|
||||||
};
|
};
|
||||||
|
|
||||||
###### interface
|
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
|
|
||||||
services.gnome.gnome-keyring = {
|
services.gnome.gnome-keyring = {
|
||||||
|
enable = lib.mkEnableOption ''
|
||||||
enable = lib.mkOption {
|
GNOME Keyring daemon, a service designed to
|
||||||
type = lib.types.bool;
|
take care of the user's security credentials,
|
||||||
default = false;
|
such as user names and passwords
|
||||||
description = ''
|
'';
|
||||||
Whether to enable GNOME Keyring daemon, a service designed to
|
|
||||||
take care of the user's security credentials,
|
|
||||||
such as user names and passwords.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
###### implementation
|
|
||||||
|
|
||||||
config = lib.mkIf config.services.gnome.gnome-keyring.enable {
|
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.gnome.gnome-keyring ];
|
environment.systemPackages = [ pkgs.gnome.gnome-keyring ];
|
||||||
|
|
||||||
services.dbus.packages = [ pkgs.gnome.gnome-keyring pkgs.gcr ];
|
services.dbus.packages = [
|
||||||
|
pkgs.gnome.gnome-keyring
|
||||||
|
pkgs.gcr
|
||||||
|
];
|
||||||
|
|
||||||
xdg.portal.extraPortals = [ pkgs.gnome.gnome-keyring ];
|
xdg.portal.extraPortals = [ pkgs.gnome.gnome-keyring ];
|
||||||
|
|
||||||
security.pam.services.login.enableGnomeKeyring = true;
|
security.pam.services = lib.mkMerge [
|
||||||
|
{
|
||||||
|
login.enableGnomeKeyring = true;
|
||||||
|
}
|
||||||
|
(lib.mkIf config.services.xserver.displayManager.gdm.enable {
|
||||||
|
gdm-password.enableGnomeKeyring = true;
|
||||||
|
gdm-autologin.enableGnomeKeyring = true;
|
||||||
|
})
|
||||||
|
(lib.mkIf (config.services.xserver.displayManager.gdm.enable && config.services.fprintd.enable) {
|
||||||
|
gdm-fingerprint.enableGnomeKeyring = true;
|
||||||
|
})
|
||||||
|
];
|
||||||
|
|
||||||
security.wrappers.gnome-keyring-daemon = {
|
security.wrappers.gnome-keyring-daemon = {
|
||||||
owner = "root";
|
owner = "root";
|
||||||
|
@ -47,7 +54,5 @@
|
||||||
capabilities = "cap_ipc_lock=ep";
|
capabilities = "cap_ipc_lock=ep";
|
||||||
source = "${pkgs.gnome.gnome-keyring}/bin/gnome-keyring-daemon";
|
source = "${pkgs.gnome.gnome-keyring}/bin/gnome-keyring-daemon";
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ in {
|
||||||
|
|
||||||
boot.initrd.kernelModules = lib.optionals cfg.initrd.enable [ "amdgpu" ];
|
boot.initrd.kernelModules = lib.optionals cfg.initrd.enable [ "amdgpu" ];
|
||||||
|
|
||||||
hardware.opengl = lib.mkIf cfg.opencl.enable {
|
hardware.graphics = lib.mkIf cfg.opencl.enable {
|
||||||
enable = lib.mkDefault true;
|
enable = lib.mkDefault true;
|
||||||
extraPackages = [
|
extraPackages = [
|
||||||
pkgs.rocmPackages.clr
|
pkgs.rocmPackages.clr
|
||||||
|
|
|
@ -518,8 +518,9 @@ in {
|
||||||
# recreate symlinks for desired components
|
# recreate symlinks for desired components
|
||||||
declare -a components=(${escapeShellArgs cfg.customComponents})
|
declare -a components=(${escapeShellArgs cfg.customComponents})
|
||||||
for component in "''${components[@]}"; do
|
for component in "''${components[@]}"; do
|
||||||
path="$(dirname $(find "$component" -name "manifest.json"))"
|
readarray -t manifests < <(find "$component" -name manifest.json)
|
||||||
ln -fns "$path" "${cfg.configDir}/custom_components/"
|
readarray -t paths < <(dirname "''${manifests[@]}")
|
||||||
|
ln -fns "''${paths[@]}" "${cfg.configDir}/custom_components/"
|
||||||
done
|
done
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
|
|
|
@ -646,7 +646,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
maintainers = with lib.maintainers; [ lheckemann qyliss ];
|
maintainers = with lib.maintainers; [ qyliss ];
|
||||||
doc = ./mailman.md;
|
doc = ./mailman.md;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,6 @@ in {
|
||||||
ansi = mkDefault false; # no colour markers to journald
|
ansi = mkDefault false; # no colour markers to journald
|
||||||
enable = mkDefault true;
|
enable = mkDefault true;
|
||||||
};
|
};
|
||||||
queue.path = mkDefault "${dataDir}/queue";
|
|
||||||
report.path = mkDefault "${dataDir}/reports";
|
|
||||||
store = if useLegacyStorage then {
|
store = if useLegacyStorage then {
|
||||||
# structured data in SQLite, blobs on filesystem
|
# structured data in SQLite, blobs on filesystem
|
||||||
db.type = mkDefault "sqlite";
|
db.type = mkDefault "sqlite";
|
||||||
|
@ -62,6 +60,9 @@ in {
|
||||||
resolver.public-suffix = lib.mkDefault [
|
resolver.public-suffix = lib.mkDefault [
|
||||||
"file://${pkgs.publicsuffix-list}/share/publicsuffix/public_suffix_list.dat"
|
"file://${pkgs.publicsuffix-list}/share/publicsuffix/public_suffix_list.dat"
|
||||||
];
|
];
|
||||||
|
config.resource = {
|
||||||
|
spam-filter = lib.mkDefault "file://${cfg.package}/etc/stalwart/spamfilter.toml";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
# This service stores a potentially large amount of data.
|
# This service stores a potentially large amount of data.
|
||||||
|
@ -83,9 +84,9 @@ in {
|
||||||
after = [ "local-fs.target" "network.target" ];
|
after = [ "local-fs.target" "network.target" ];
|
||||||
|
|
||||||
preStart = if useLegacyStorage then ''
|
preStart = if useLegacyStorage then ''
|
||||||
mkdir -p ${dataDir}/{queue,reports,data/blobs}
|
mkdir -p ${dataDir}/data/blobs
|
||||||
'' else ''
|
'' else ''
|
||||||
mkdir -p ${dataDir}/{queue,reports,db}
|
mkdir -p ${dataDir}/db
|
||||||
'';
|
'';
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
|
|
|
@ -98,9 +98,25 @@ in
|
||||||
- otherwise defaults to `false`
|
- otherwise defaults to `false`
|
||||||
- `false`: disable GPU, only use CPU
|
- `false`: disable GPU, only use CPU
|
||||||
- `"rocm"`: supported by most modern AMD GPUs
|
- `"rocm"`: supported by most modern AMD GPUs
|
||||||
|
- may require overriding gpu type with `services.ollama.rocmOverrideGfx`
|
||||||
|
if rocm doesn't detect your AMD gpu
|
||||||
- `"cuda"`: supported by most modern NVIDIA GPUs
|
- `"cuda"`: supported by most modern NVIDIA GPUs
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
rocmOverrideGfx = lib.mkOption {
|
||||||
|
type = types.nullOr types.str;
|
||||||
|
default = null;
|
||||||
|
example = "10.3.0";
|
||||||
|
description = ''
|
||||||
|
Override what rocm will detect your gpu model as.
|
||||||
|
For example, make rocm treat your RX 5700 XT (or any other model)
|
||||||
|
as an RX 6900 XT using a value of `"10.3.0"` (gfx 1030).
|
||||||
|
|
||||||
|
This sets the value of `HSA_OVERRIDE_GFX_VERSION`. See [ollama's docs](
|
||||||
|
https://github.com/ollama/ollama/blob/main/docs/gpu.md#amd-radeon
|
||||||
|
) for details.
|
||||||
|
'';
|
||||||
|
};
|
||||||
environmentVariables = lib.mkOption {
|
environmentVariables = lib.mkOption {
|
||||||
type = types.attrsOf types.str;
|
type = types.attrsOf types.str;
|
||||||
default = { };
|
default = { };
|
||||||
|
@ -136,6 +152,7 @@ in
|
||||||
HOME = cfg.home;
|
HOME = cfg.home;
|
||||||
OLLAMA_MODELS = cfg.models;
|
OLLAMA_MODELS = cfg.models;
|
||||||
OLLAMA_HOST = "${cfg.host}:${toString cfg.port}";
|
OLLAMA_HOST = "${cfg.host}:${toString cfg.port}";
|
||||||
|
HSA_OVERRIDE_GFX_VERSION = lib.mkIf (cfg.rocmOverrideGfx != null) cfg.rocmOverrideGfx;
|
||||||
};
|
};
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${lib.getExe ollamaPackage} serve";
|
ExecStart = "${lib.getExe ollamaPackage} serve";
|
||||||
|
|
|
@ -117,5 +117,5 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
meta.maintainers = with lib.maintainers; [ drupol ];
|
meta.maintainers = with lib.maintainers; [ ];
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +1,32 @@
|
||||||
{ config, pkgs, lib, ... }:
|
{
|
||||||
|
config,
|
||||||
|
pkgs,
|
||||||
|
lib,
|
||||||
|
...
|
||||||
|
}:
|
||||||
|
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.snapper;
|
cfg = config.services.snapper;
|
||||||
|
|
||||||
mkValue = v:
|
mkValue =
|
||||||
if isList v then "\"${concatMapStringsSep " " (escape [ "\\" " " ]) v}\""
|
v:
|
||||||
else if v == true then "yes"
|
if isList v then
|
||||||
else if v == false then "no"
|
"\"${
|
||||||
else if isString v then "\"${v}\""
|
concatMapStringsSep " " (escape [
|
||||||
else builtins.toJSON v;
|
"\\"
|
||||||
|
" "
|
||||||
|
]) v
|
||||||
|
}\""
|
||||||
|
else if v == true then
|
||||||
|
"yes"
|
||||||
|
else if v == false then
|
||||||
|
"no"
|
||||||
|
else if isString v then
|
||||||
|
"\"${v}\""
|
||||||
|
else
|
||||||
|
builtins.toJSON v;
|
||||||
|
|
||||||
mkKeyValue = k: v: "${k}=${mkValue v}";
|
mkKeyValue = k: v: "${k}=${mkValue v}";
|
||||||
|
|
||||||
|
@ -43,7 +59,7 @@ let
|
||||||
|
|
||||||
ALLOW_GROUPS = mkOption {
|
ALLOW_GROUPS = mkOption {
|
||||||
type = types.listOf safeStr;
|
type = types.listOf safeStr;
|
||||||
default = [];
|
default = [ ];
|
||||||
description = ''
|
description = ''
|
||||||
List of groups allowed to operate with the config.
|
List of groups allowed to operate with the config.
|
||||||
|
|
||||||
|
@ -53,7 +69,7 @@ let
|
||||||
|
|
||||||
ALLOW_USERS = mkOption {
|
ALLOW_USERS = mkOption {
|
||||||
type = types.listOf safeStr;
|
type = types.listOf safeStr;
|
||||||
default = [];
|
default = [ ];
|
||||||
example = [ "alice" ];
|
example = [ "alice" ];
|
||||||
description = ''
|
description = ''
|
||||||
List of users allowed to operate with the config. "root" is always
|
List of users allowed to operate with the config. "root" is always
|
||||||
|
@ -78,6 +94,54 @@ let
|
||||||
Defines whether hourly snapshots should be created.
|
Defines whether hourly snapshots should be created.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_HOURLY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "10";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_DAILY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "10";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_WEEKLY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "0";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_MONTHLY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "10";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_QUARTERLY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "0";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
TIMELINE_LIMIT_YEARLY = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "10";
|
||||||
|
description = ''
|
||||||
|
Limits for timeline cleanup.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
in
|
in
|
||||||
|
|
||||||
|
@ -152,112 +216,129 @@ in
|
||||||
is valid here, even if NixOS doesn't document it.
|
is valid here, even if NixOS doesn't document it.
|
||||||
'';
|
'';
|
||||||
|
|
||||||
type = types.attrsOf (types.submodule {
|
type = types.attrsOf (
|
||||||
freeformType = types.attrsOf (types.oneOf [ (types.listOf safeStr) types.bool safeStr types.number ]);
|
types.submodule {
|
||||||
|
freeformType = types.attrsOf (
|
||||||
|
types.oneOf [
|
||||||
|
(types.listOf safeStr)
|
||||||
|
types.bool
|
||||||
|
safeStr
|
||||||
|
types.number
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
options = configOptions;
|
options = configOptions;
|
||||||
});
|
}
|
||||||
|
);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf (cfg.configs != {}) (let
|
config = mkIf (cfg.configs != { }) (
|
||||||
documentation = [ "man:snapper(8)" "man:snapper-configs(5)" ];
|
let
|
||||||
in {
|
documentation = [
|
||||||
|
"man:snapper(8)"
|
||||||
|
"man:snapper-configs(5)"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
{
|
||||||
|
environment = {
|
||||||
|
|
||||||
environment = {
|
systemPackages = [ pkgs.snapper ];
|
||||||
|
|
||||||
systemPackages = [ pkgs.snapper ];
|
# Note: snapper/config-templates/default is only needed for create-config
|
||||||
|
# which is not the NixOS way to configure.
|
||||||
|
etc =
|
||||||
|
{
|
||||||
|
|
||||||
# Note: snapper/config-templates/default is only needed for create-config
|
"sysconfig/snapper".text = ''
|
||||||
# which is not the NixOS way to configure.
|
SNAPPER_CONFIGS="${lib.concatStringsSep " " (builtins.attrNames cfg.configs)}"
|
||||||
etc = {
|
'';
|
||||||
|
}
|
||||||
"sysconfig/snapper".text = ''
|
// (mapAttrs' (
|
||||||
SNAPPER_CONFIGS="${lib.concatStringsSep " " (builtins.attrNames cfg.configs)}"
|
name: subvolume:
|
||||||
'';
|
nameValuePair "snapper/configs/${name}" ({
|
||||||
|
text = lib.generators.toKeyValue { inherit mkKeyValue; } (
|
||||||
}
|
filterAttrs (k: v: v != defaultOf k) subvolume
|
||||||
// (mapAttrs' (name: subvolume: nameValuePair "snapper/configs/${name}" ({
|
);
|
||||||
text = lib.generators.toKeyValue { inherit mkKeyValue; } (filterAttrs (k: v: v != defaultOf k) subvolume);
|
})
|
||||||
})) cfg.configs)
|
) cfg.configs)
|
||||||
// (lib.optionalAttrs (cfg.filters != null) {
|
// (lib.optionalAttrs (cfg.filters != null) { "snapper/filters/default.txt".text = cfg.filters; });
|
||||||
"snapper/filters/default.txt".text = cfg.filters;
|
|
||||||
});
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
services.dbus.packages = [ pkgs.snapper ];
|
|
||||||
|
|
||||||
systemd.services.snapperd = {
|
|
||||||
description = "DBus interface for snapper";
|
|
||||||
inherit documentation;
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "dbus";
|
|
||||||
BusName = "org.opensuse.Snapper";
|
|
||||||
ExecStart = "${pkgs.snapper}/bin/snapperd";
|
|
||||||
CapabilityBoundingSet = "CAP_DAC_OVERRIDE CAP_FOWNER CAP_CHOWN CAP_FSETID CAP_SETFCAP CAP_SYS_ADMIN CAP_SYS_MODULE CAP_IPC_LOCK CAP_SYS_NICE";
|
|
||||||
LockPersonality = true;
|
|
||||||
NoNewPrivileges = false;
|
|
||||||
PrivateNetwork = true;
|
|
||||||
ProtectHostname = true;
|
|
||||||
RestrictAddressFamilies = "AF_UNIX";
|
|
||||||
RestrictRealtime = true;
|
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.snapper-timeline = {
|
services.dbus.packages = [ pkgs.snapper ];
|
||||||
description = "Timeline of Snapper Snapshots";
|
|
||||||
inherit documentation;
|
|
||||||
requires = [ "local-fs.target" ];
|
|
||||||
serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --timeline";
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.timers.snapper-timeline = {
|
systemd.services.snapperd = {
|
||||||
wantedBy = [ "timers.target" ];
|
description = "DBus interface for snapper";
|
||||||
timerConfig = {
|
inherit documentation;
|
||||||
Persistent = cfg.persistentTimer;
|
serviceConfig = {
|
||||||
OnCalendar = cfg.snapshotInterval;
|
Type = "dbus";
|
||||||
|
BusName = "org.opensuse.Snapper";
|
||||||
|
ExecStart = "${pkgs.snapper}/bin/snapperd";
|
||||||
|
CapabilityBoundingSet = "CAP_DAC_OVERRIDE CAP_FOWNER CAP_CHOWN CAP_FSETID CAP_SETFCAP CAP_SYS_ADMIN CAP_SYS_MODULE CAP_IPC_LOCK CAP_SYS_NICE";
|
||||||
|
LockPersonality = true;
|
||||||
|
NoNewPrivileges = false;
|
||||||
|
PrivateNetwork = true;
|
||||||
|
ProtectHostname = true;
|
||||||
|
RestrictAddressFamilies = "AF_UNIX";
|
||||||
|
RestrictRealtime = true;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.snapper-cleanup = {
|
systemd.services.snapper-timeline = {
|
||||||
description = "Cleanup of Snapper Snapshots";
|
description = "Timeline of Snapper Snapshots";
|
||||||
inherit documentation;
|
inherit documentation;
|
||||||
serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --cleanup";
|
requires = [ "local-fs.target" ];
|
||||||
};
|
serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --timeline";
|
||||||
|
};
|
||||||
|
|
||||||
systemd.timers.snapper-cleanup = {
|
systemd.timers.snapper-timeline = {
|
||||||
description = "Cleanup of Snapper Snapshots";
|
wantedBy = [ "timers.target" ];
|
||||||
inherit documentation;
|
timerConfig = {
|
||||||
wantedBy = [ "timers.target" ];
|
Persistent = cfg.persistentTimer;
|
||||||
requires = [ "local-fs.target" ];
|
OnCalendar = cfg.snapshotInterval;
|
||||||
timerConfig.OnBootSec = "10m";
|
};
|
||||||
timerConfig.OnUnitActiveSec = cfg.cleanupInterval;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.snapper-boot = lib.mkIf cfg.snapshotRootOnBoot {
|
systemd.services.snapper-cleanup = {
|
||||||
description = "Take snapper snapshot of root on boot";
|
description = "Cleanup of Snapper Snapshots";
|
||||||
inherit documentation;
|
inherit documentation;
|
||||||
serviceConfig.ExecStart = "${pkgs.snapper}/bin/snapper --config root create --cleanup-algorithm number --description boot";
|
serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --cleanup";
|
||||||
serviceConfig.Type = "oneshot";
|
};
|
||||||
requires = [ "local-fs.target" ];
|
|
||||||
wantedBy = [ "multi-user.target" ];
|
|
||||||
unitConfig.ConditionPathExists = "/etc/snapper/configs/root";
|
|
||||||
};
|
|
||||||
|
|
||||||
assertions =
|
systemd.timers.snapper-cleanup = {
|
||||||
concatMap
|
description = "Cleanup of Snapper Snapshots";
|
||||||
(name:
|
inherit documentation;
|
||||||
let
|
wantedBy = [ "timers.target" ];
|
||||||
sub = cfg.configs.${name};
|
requires = [ "local-fs.target" ];
|
||||||
in
|
timerConfig.OnBootSec = "10m";
|
||||||
[ { assertion = !(sub ? extraConfig);
|
timerConfig.OnUnitActiveSec = cfg.cleanupInterval;
|
||||||
message = ''
|
};
|
||||||
The option definition `services.snapper.configs.${name}.extraConfig' no longer has any effect; please remove it.
|
|
||||||
The contents of this option should be migrated to attributes on `services.snapper.configs.${name}'.
|
systemd.services.snapper-boot = lib.mkIf cfg.snapshotRootOnBoot {
|
||||||
'';
|
description = "Take snapper snapshot of root on boot";
|
||||||
}
|
inherit documentation;
|
||||||
] ++
|
serviceConfig.ExecStart = "${pkgs.snapper}/bin/snapper --config root create --cleanup-algorithm number --description boot";
|
||||||
|
serviceConfig.Type = "oneshot";
|
||||||
|
requires = [ "local-fs.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
unitConfig.ConditionPathExists = "/etc/snapper/configs/root";
|
||||||
|
};
|
||||||
|
|
||||||
|
assertions = concatMap (
|
||||||
|
name:
|
||||||
|
let
|
||||||
|
sub = cfg.configs.${name};
|
||||||
|
in
|
||||||
|
[
|
||||||
|
{
|
||||||
|
assertion = !(sub ? extraConfig);
|
||||||
|
message = ''
|
||||||
|
The option definition `services.snapper.configs.${name}.extraConfig' no longer has any effect; please remove it.
|
||||||
|
The contents of this option should be migrated to attributes on `services.snapper.configs.${name}'.
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
]
|
||||||
|
++
|
||||||
map
|
map
|
||||||
(attr: {
|
(attr: {
|
||||||
assertion = !(hasAttr attr sub);
|
assertion = !(hasAttr attr sub);
|
||||||
|
@ -265,8 +346,11 @@ in
|
||||||
The option definition `services.snapper.configs.${name}.${attr}' has been renamed to `services.snapper.configs.${name}.${toUpper attr}'.
|
The option definition `services.snapper.configs.${name}.${attr}' has been renamed to `services.snapper.configs.${name}.${toUpper attr}'.
|
||||||
'';
|
'';
|
||||||
})
|
})
|
||||||
[ "fstype" "subvolume" ]
|
[
|
||||||
)
|
"fstype"
|
||||||
(attrNames cfg.configs);
|
"subvolume"
|
||||||
});
|
]
|
||||||
|
) (attrNames cfg.configs);
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -324,7 +324,8 @@ in
|
||||||
};
|
};
|
||||||
preStart =
|
preStart =
|
||||||
let
|
let
|
||||||
version = pkgs.sourcehut.${srvsrht}.version;
|
package = pkgs.sourcehut.${srvsrht};
|
||||||
|
version = package.version;
|
||||||
stateDir = "/var/lib/sourcehut/${srvsrht}";
|
stateDir = "/var/lib/sourcehut/${srvsrht}";
|
||||||
in
|
in
|
||||||
mkBefore ''
|
mkBefore ''
|
||||||
|
@ -336,14 +337,14 @@ in
|
||||||
if test ! -e ${stateDir}/db; then
|
if test ! -e ${stateDir}/db; then
|
||||||
# Setup the initial database.
|
# Setup the initial database.
|
||||||
# Note that it stamps the alembic head afterward
|
# Note that it stamps the alembic head afterward
|
||||||
${cfg.python}/bin/${srvsrht}-initdb
|
${package}/bin/${srvsrht}-initdb
|
||||||
echo ${version} >${stateDir}/db
|
echo ${version} >${stateDir}/db
|
||||||
fi
|
fi
|
||||||
|
|
||||||
${optionalString cfg.settings.${iniKey}.migrate-on-upgrade ''
|
${optionalString cfg.settings.${iniKey}.migrate-on-upgrade ''
|
||||||
if [ "$(cat ${stateDir}/db)" != "${version}" ]; then
|
if [ "$(cat ${stateDir}/db)" != "${version}" ]; then
|
||||||
# Manage schema migrations using alembic
|
# Manage schema migrations using alembic
|
||||||
${cfg.python}/bin/${srvsrht}-migrate -a upgrade head
|
${package}/bin/${srvsrht}-migrate -a upgrade head
|
||||||
echo ${version} >${stateDir}/db
|
echo ${version} >${stateDir}/db
|
||||||
fi
|
fi
|
||||||
''}
|
''}
|
||||||
|
@ -389,7 +390,7 @@ in
|
||||||
after = [ "network.target" "${srvsrht}.service" ];
|
after = [ "network.target" "${srvsrht}.service" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
ExecStart = "${cfg.python}/bin/${timerName}";
|
ExecStart = "${pkgs.sourcehut.${srvsrht}}/bin/${timerName}";
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
(timer.service or { })
|
(timer.service or { })
|
||||||
|
|
|
@ -22,7 +22,7 @@ let
|
||||||
${lib.toShellVars env}
|
${lib.toShellVars env}
|
||||||
eval "$(${config.systemd.package}/bin/systemctl show -pUID,GID,MainPID tandoor-recipes.service)"
|
eval "$(${config.systemd.package}/bin/systemctl show -pUID,GID,MainPID tandoor-recipes.service)"
|
||||||
exec ${pkgs.util-linux}/bin/nsenter \
|
exec ${pkgs.util-linux}/bin/nsenter \
|
||||||
-t $MainPID -m -S $UID -G $GID \
|
-t $MainPID -m -S $UID -G $GID --wdns=${env.MEDIA_ROOT} \
|
||||||
${pkg}/bin/tandoor-recipes "$@"
|
${pkg}/bin/tandoor-recipes "$@"
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
|
@ -88,7 +88,7 @@ in
|
||||||
Group = "tandoor_recipes";
|
Group = "tandoor_recipes";
|
||||||
DynamicUser = true;
|
DynamicUser = true;
|
||||||
StateDirectory = "tandoor-recipes";
|
StateDirectory = "tandoor-recipes";
|
||||||
WorkingDirectory = "/var/lib/tandoor-recipes";
|
WorkingDirectory = env.MEDIA_ROOT;
|
||||||
RuntimeDirectory = "tandoor-recipes";
|
RuntimeDirectory = "tandoor-recipes";
|
||||||
|
|
||||||
BindReadOnlyPaths = [
|
BindReadOnlyPaths = [
|
||||||
|
|
|
@ -60,7 +60,7 @@ in {
|
||||||
"-templates ${cfg.templateDir}"
|
"-templates ${cfg.templateDir}"
|
||||||
];
|
];
|
||||||
in {
|
in {
|
||||||
ExecStart = "${pkgs.grafana_reporter}/bin/grafana-reporter ${args}";
|
ExecStart = "${pkgs.grafana-reporter}/bin/grafana-reporter ${args}";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -13,6 +13,9 @@ let
|
||||||
ln -s /run/wrappers/bin/slabinfo.plugin $out/libexec/netdata/plugins.d/slabinfo.plugin
|
ln -s /run/wrappers/bin/slabinfo.plugin $out/libexec/netdata/plugins.d/slabinfo.plugin
|
||||||
ln -s /run/wrappers/bin/freeipmi.plugin $out/libexec/netdata/plugins.d/freeipmi.plugin
|
ln -s /run/wrappers/bin/freeipmi.plugin $out/libexec/netdata/plugins.d/freeipmi.plugin
|
||||||
ln -s /run/wrappers/bin/systemd-journal.plugin $out/libexec/netdata/plugins.d/systemd-journal.plugin
|
ln -s /run/wrappers/bin/systemd-journal.plugin $out/libexec/netdata/plugins.d/systemd-journal.plugin
|
||||||
|
ln -s /run/wrappers/bin/logs-management.plugin $out/libexec/netdata/plugins.d/logs-management.plugin
|
||||||
|
ln -s /run/wrappers/bin/network-viewer.plugin $out/libexec/netdata/plugins.d/network-viewer.plugin
|
||||||
|
ln -s /run/wrappers/bin/debugfs.plugin $out/libexec/netdata/plugins.d/debugfs.plugin
|
||||||
'';
|
'';
|
||||||
|
|
||||||
plugins = [
|
plugins = [
|
||||||
|
@ -47,6 +50,7 @@ let
|
||||||
|
|
||||||
defaultUser = "netdata";
|
defaultUser = "netdata";
|
||||||
|
|
||||||
|
isThereAnyWireGuardTunnels = config.networking.wireguard.enable || lib.any (c: lib.hasAttrByPath [ "netdevConfig" "Kind" ] c && c.netdevConfig.Kind == "wireguard") (builtins.attrValues config.systemd.network.netdevs);
|
||||||
in {
|
in {
|
||||||
options = {
|
options = {
|
||||||
services.netdata = {
|
services.netdata = {
|
||||||
|
@ -86,6 +90,14 @@ in {
|
||||||
Whether to enable python-based plugins
|
Whether to enable python-based plugins
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
recommendedPythonPackages = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Whether to enable a set of recommended Python plugins
|
||||||
|
by installing extra Python packages.
|
||||||
|
'';
|
||||||
|
};
|
||||||
extraPackages = mkOption {
|
extraPackages = mkOption {
|
||||||
type = types.functionTo (types.listOf types.package);
|
type = types.functionTo (types.listOf types.package);
|
||||||
default = ps: [];
|
default = ps: [];
|
||||||
|
@ -198,13 +210,26 @@ in {
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
# Includes a set of recommended Python plugins in exchange of imperfect disk consumption.
|
||||||
|
services.netdata.python.extraPackages = lib.mkIf cfg.python.recommendedPythonPackages (ps: [
|
||||||
|
ps.requests
|
||||||
|
ps.pandas
|
||||||
|
ps.numpy
|
||||||
|
ps.psycopg2
|
||||||
|
ps.python-ldap
|
||||||
|
ps.netdata-pandas
|
||||||
|
ps.changefinder
|
||||||
|
]);
|
||||||
|
|
||||||
services.netdata.configDir.".opt-out-from-anonymous-statistics" = mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
|
services.netdata.configDir.".opt-out-from-anonymous-statistics" = mkIf (!cfg.enableAnalyticsReporting) (pkgs.writeText ".opt-out-from-anonymous-statistics" "");
|
||||||
environment.etc."netdata/netdata.conf".source = configFile;
|
environment.etc."netdata/netdata.conf".source = configFile;
|
||||||
environment.etc."netdata/conf.d".source = configDirectory;
|
environment.etc."netdata/conf.d".source = configDirectory;
|
||||||
|
|
||||||
systemd.services.netdata = {
|
systemd.services.netdata = {
|
||||||
description = "Real time performance monitoring";
|
description = "Real time performance monitoring";
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" "suid-sgid-wrappers.service" ];
|
||||||
|
# No wrapper means no "useful" netdata.
|
||||||
|
requires = [ "suid-sgid-wrappers.service" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
path = (with pkgs; [
|
path = (with pkgs; [
|
||||||
curl
|
curl
|
||||||
|
@ -213,10 +238,16 @@ in {
|
||||||
which
|
which
|
||||||
procps
|
procps
|
||||||
bash
|
bash
|
||||||
|
nvme-cli # for go.d
|
||||||
|
iw # for charts.d
|
||||||
|
apcupsd # for charts.d
|
||||||
|
# TODO: firehol # for FireQoS -- this requires more NixOS module support.
|
||||||
util-linux # provides logger command; required for syslog health alarms
|
util-linux # provides logger command; required for syslog health alarms
|
||||||
])
|
])
|
||||||
++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
|
++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
|
||||||
++ lib.optional config.virtualisation.libvirtd.enable (config.virtualisation.libvirtd.package);
|
++ lib.optional config.virtualisation.libvirtd.enable config.virtualisation.libvirtd.package
|
||||||
|
++ lib.optional config.virtualisation.docker.enable config.virtualisation.docker.package
|
||||||
|
++ lib.optionals config.virtualisation.podman.enable [ pkgs.jq config.virtualisation.podman.package ];
|
||||||
environment = {
|
environment = {
|
||||||
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
|
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
|
||||||
NETDATA_PIPENAME = "/run/netdata/ipc";
|
NETDATA_PIPENAME = "/run/netdata/ipc";
|
||||||
|
@ -256,6 +287,8 @@ in {
|
||||||
# Configuration directory and mode
|
# Configuration directory and mode
|
||||||
ConfigurationDirectory = "netdata";
|
ConfigurationDirectory = "netdata";
|
||||||
ConfigurationDirectoryMode = "0755";
|
ConfigurationDirectoryMode = "0755";
|
||||||
|
# AmbientCapabilities
|
||||||
|
AmbientCapabilities = lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
|
||||||
# Capabilities
|
# Capabilities
|
||||||
CapabilityBoundingSet = [
|
CapabilityBoundingSet = [
|
||||||
"CAP_DAC_OVERRIDE" # is required for freeipmi and slabinfo plugins
|
"CAP_DAC_OVERRIDE" # is required for freeipmi and slabinfo plugins
|
||||||
|
@ -269,7 +302,7 @@ in {
|
||||||
"CAP_SYS_CHROOT" # is required for cgroups plugin
|
"CAP_SYS_CHROOT" # is required for cgroups plugin
|
||||||
"CAP_SETUID" # is required for cgroups and cgroups-network plugins
|
"CAP_SETUID" # is required for cgroups and cgroups-network plugins
|
||||||
"CAP_SYSLOG" # is required for systemd-journal plugin
|
"CAP_SYSLOG" # is required for systemd-journal plugin
|
||||||
];
|
] ++ lib.optional isThereAnyWireGuardTunnels "CAP_NET_ADMIN";
|
||||||
# Sandboxing
|
# Sandboxing
|
||||||
ProtectSystem = "full";
|
ProtectSystem = "full";
|
||||||
ProtectHome = "read-only";
|
ProtectHome = "read-only";
|
||||||
|
@ -308,6 +341,14 @@ in {
|
||||||
permissions = "u+rx,g+x,o-rwx";
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
"debugfs.plugin" = {
|
||||||
|
source = "${cfg.package}/libexec/netdata/plugins.d/debugfs.plugin.org";
|
||||||
|
capabilities = "cap_dac_read_search+ep";
|
||||||
|
owner = cfg.user;
|
||||||
|
group = cfg.group;
|
||||||
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
|
};
|
||||||
|
|
||||||
"cgroup-network" = {
|
"cgroup-network" = {
|
||||||
source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
|
source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
|
||||||
capabilities = "cap_setuid+ep";
|
capabilities = "cap_setuid+ep";
|
||||||
|
@ -332,6 +373,14 @@ in {
|
||||||
permissions = "u+rx,g+x,o-rwx";
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
"logs-management.plugin" = {
|
||||||
|
source = "${cfg.package}/libexec/netdata/plugins.d/logs-management.plugin.org";
|
||||||
|
capabilities = "cap_dac_read_search,cap_syslog+ep";
|
||||||
|
owner = cfg.user;
|
||||||
|
group = cfg.group;
|
||||||
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
|
};
|
||||||
|
|
||||||
"slabinfo.plugin" = {
|
"slabinfo.plugin" = {
|
||||||
source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
|
source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
|
||||||
capabilities = "cap_dac_override+ep";
|
capabilities = "cap_dac_override+ep";
|
||||||
|
@ -348,6 +397,14 @@ in {
|
||||||
group = cfg.group;
|
group = cfg.group;
|
||||||
permissions = "u+rx,g+x,o-rwx";
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
};
|
};
|
||||||
|
} // optionalAttrs (cfg.package.withNetworkViewer) {
|
||||||
|
"network-viewer.plugin" = {
|
||||||
|
source = "${cfg.package}/libexec/netdata/plugins.d/network-viewer.plugin.org";
|
||||||
|
capabilities = "cap_sys_admin,cap_dac_read_search,cap_sys_ptrace+ep";
|
||||||
|
owner = cfg.user;
|
||||||
|
group = cfg.group;
|
||||||
|
permissions = "u+rx,g+x,o-rwx";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
security.pam.loginLimits = [
|
security.pam.loginLimits = [
|
||||||
|
@ -359,6 +416,8 @@ in {
|
||||||
${defaultUser} = {
|
${defaultUser} = {
|
||||||
group = defaultUser;
|
group = defaultUser;
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
|
extraGroups = lib.optional config.virtualisation.docker.enable "docker"
|
||||||
|
++ lib.optional config.virtualisation.podman.enable "podman";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
70
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-webhook-logger.nix
vendored
Normal file
70
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-webhook-logger.nix
vendored
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.prometheus.alertmanagerWebhookLogger;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.prometheus.alertmanagerWebhookLogger = {
|
||||||
|
enable = mkEnableOption "Alertmanager Webhook Logger";
|
||||||
|
|
||||||
|
package = mkPackageOption pkgs "alertmanager-webhook-logger" { };
|
||||||
|
|
||||||
|
extraFlags = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
default = [];
|
||||||
|
description = "Extra command line options to pass to alertmanager-webhook-logger.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.alertmanager-webhook-logger = {
|
||||||
|
description = "Alertmanager Webhook Logger";
|
||||||
|
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = ''
|
||||||
|
${cfg.package}/bin/alertmanager-webhook-logger \
|
||||||
|
${escapeShellArgs cfg.extraFlags}
|
||||||
|
'';
|
||||||
|
|
||||||
|
DynamicUser = true;
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
|
||||||
|
ProtectProc = "invisible";
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ProtectHome = "tmpfs";
|
||||||
|
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
PrivateIPC = true;
|
||||||
|
|
||||||
|
ProtectHostname = true;
|
||||||
|
ProtectClock = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
|
||||||
|
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
|
||||||
|
SystemCallFilter = [
|
||||||
|
"@system-service"
|
||||||
|
"~@cpu-emulation"
|
||||||
|
"~@privileged"
|
||||||
|
"~@reboot"
|
||||||
|
"~@setuid"
|
||||||
|
"~@swap"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
meta.maintainers = [ maintainers.jpds ];
|
||||||
|
}
|
|
@ -4,7 +4,7 @@ let
|
||||||
inherit (lib.attrsets) optionalAttrs;
|
inherit (lib.attrsets) optionalAttrs;
|
||||||
inherit (lib.generators) toINIWithGlobalSection;
|
inherit (lib.generators) toINIWithGlobalSection;
|
||||||
inherit (lib.lists) optional;
|
inherit (lib.lists) optional;
|
||||||
inherit (lib.modules) mkIf;
|
inherit (lib.modules) mkIf mkRemovedOptionModule;
|
||||||
inherit (lib.options) literalExpression mkEnableOption mkOption;
|
inherit (lib.options) literalExpression mkEnableOption mkOption;
|
||||||
inherit (lib.strings) escape;
|
inherit (lib.strings) escape;
|
||||||
inherit (lib.types) attrsOf bool int lines oneOf str submodule;
|
inherit (lib.types) attrsOf bool int lines oneOf str submodule;
|
||||||
|
@ -27,6 +27,13 @@ let
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
(mkRemovedOptionModule [ "services" "davfs2" "extraConfig" ] ''
|
||||||
|
The option extraConfig got removed, please migrate to
|
||||||
|
services.davfs2.settings instead.
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
|
||||||
options.services.davfs2 = {
|
options.services.davfs2 = {
|
||||||
enable = mkEnableOption "davfs2";
|
enable = mkEnableOption "davfs2";
|
||||||
|
|
||||||
|
|
|
@ -50,10 +50,7 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${pkgs.antennas}/bin/antennas";
|
ExecStart = "${pkgs.antennas}/bin/antennas";
|
||||||
|
|
||||||
# Antennas expects all resources like html and config to be relative to it's working directory
|
|
||||||
WorkingDirectory = "${pkgs.antennas}/libexec/antennas/deps/antennas/";
|
|
||||||
|
|
||||||
# Hardening
|
# Hardening
|
||||||
CapabilityBoundingSet = [ "" ];
|
CapabilityBoundingSet = [ "" ];
|
||||||
|
|
|
@ -37,7 +37,6 @@ in
|
||||||
{
|
{
|
||||||
meta.maintainers = with maintainers; [
|
meta.maintainers = with maintainers; [
|
||||||
misuzu
|
misuzu
|
||||||
thubrecht
|
|
||||||
];
|
];
|
||||||
meta.doc = ./netbird.md;
|
meta.doc = ./netbird.md;
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ in
|
||||||
|
|
||||||
{
|
{
|
||||||
meta = {
|
meta = {
|
||||||
maintainers = with lib.maintainers; [thubrecht patrickdag];
|
maintainers = with lib.maintainers; [patrickdag];
|
||||||
doc = ./server.md;
|
doc = ./server.md;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,83 +1,94 @@
|
||||||
{ config, lib, options, pkgs, utils, ... }:
|
{ config
|
||||||
with lib;
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.wstunnel;
|
cfg = config.services.wstunnel;
|
||||||
attrsToArgs = attrs: utils.escapeSystemdExecArgs (
|
|
||||||
mapAttrsToList
|
|
||||||
(name: value: if value == true then "--${name}" else "--${name}=${value}")
|
|
||||||
attrs
|
|
||||||
);
|
|
||||||
|
|
||||||
hostPortToString = { host, port }: "${host}:${builtins.toString port}";
|
hostPortToString = { host, port }: "${host}:${toString port}";
|
||||||
|
|
||||||
hostPortSubmodule = {
|
hostPortSubmodule = {
|
||||||
options = {
|
options = {
|
||||||
host = mkOption {
|
host = lib.mkOption {
|
||||||
description = "The hostname.";
|
description = "The hostname.";
|
||||||
type = types.str;
|
type = lib.types.str;
|
||||||
};
|
};
|
||||||
port = mkOption {
|
port = lib.mkOption {
|
||||||
description = "The port.";
|
description = "The port.";
|
||||||
type = types.port;
|
type = lib.types.port;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
commonOptions = {
|
commonOptions = {
|
||||||
enable = mkOption {
|
enable = lib.mkEnableOption "this `wstunnel` instance." // {
|
||||||
description = "Whether to enable this `wstunnel` instance.";
|
|
||||||
type = types.bool;
|
|
||||||
default = true;
|
default = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
package = mkPackageOption pkgs "wstunnel" {};
|
package = lib.mkPackageOption pkgs "wstunnel" { };
|
||||||
|
|
||||||
autoStart = mkOption {
|
autoStart =
|
||||||
description = "Whether this tunnel server should be started automatically.";
|
lib.mkEnableOption "starting this wstunnel instance automatically." // {
|
||||||
type = types.bool;
|
default = true;
|
||||||
default = true;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
extraArgs = mkOption {
|
extraArgs = lib.mkOption {
|
||||||
description = "Extra command line arguments to pass to `wstunnel`. Attributes of the form `argName = true;` will be translated to `--argName`, and `argName = \"value\"` to `--argName=value`.";
|
description = ''
|
||||||
type = with types; attrsOf (either str bool);
|
Extra command line arguments to pass to `wstunnel`.
|
||||||
default = {};
|
Attributes of the form `argName = true;` will be translated to `--argName`,
|
||||||
|
and `argName = \"value\"` to `--argName value`.
|
||||||
|
'';
|
||||||
|
type = with lib.types; attrsOf (either str bool);
|
||||||
|
default = { };
|
||||||
example = {
|
example = {
|
||||||
"someNewOption" = true;
|
"someNewOption" = true;
|
||||||
"someNewOptionWithValue" = "someValue";
|
"someNewOptionWithValue" = "someValue";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
loggingLevel = mkOption {
|
loggingLevel = lib.mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Passed to --log-lvl
|
Passed to --log-lvl
|
||||||
|
|
||||||
Control the log verbosity. i.e: TRACE, DEBUG, INFO, WARN, ERROR, OFF
|
Control the log verbosity. i.e: TRACE, DEBUG, INFO, WARN, ERROR, OFF
|
||||||
For more details, checkout [EnvFilter](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#example-syntax)
|
For more details, checkout [EnvFilter](https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#example-syntax)
|
||||||
'';
|
'';
|
||||||
type = types.nullOr types.str;
|
type = lib.types.nullOr lib.types.str;
|
||||||
example = "INFO";
|
example = "INFO";
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
environmentFile = mkOption {
|
environmentFile = lib.mkOption {
|
||||||
description = "Environment file to be passed to the systemd service. Useful for passing secrets to the service to prevent them from being world-readable in the Nix store. Note however that the secrets are passed to `wstunnel` through the command line, which makes them locally readable for all users of the system at runtime.";
|
description = ''
|
||||||
type = types.nullOr types.path;
|
Environment file to be passed to the systemd service.
|
||||||
|
Useful for passing secrets to the service to prevent them from being
|
||||||
|
world-readable in the Nix store.
|
||||||
|
Note however that the secrets are passed to `wstunnel` through
|
||||||
|
the command line, which makes them locally readable for all users of
|
||||||
|
the system at runtime.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
default = null;
|
default = null;
|
||||||
example = "/var/lib/secrets/wstunnelSecrets";
|
example = "/var/lib/secrets/wstunnelSecrets";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
serverSubmodule = { config, ...}: {
|
serverSubmodule = { config, ... }: {
|
||||||
options = commonOptions // {
|
options = commonOptions // {
|
||||||
listen = mkOption {
|
listen = lib.mkOption {
|
||||||
description = "Address and port to listen on. Setting the port to a value below 1024 will also give the process the required `CAP_NET_BIND_SERVICE` capability.";
|
description = ''
|
||||||
type = types.submodule hostPortSubmodule;
|
Address and port to listen on.
|
||||||
|
Setting the port to a value below 1024 will also give the process
|
||||||
|
the required `CAP_NET_BIND_SERVICE` capability.
|
||||||
|
'';
|
||||||
|
type = lib.types.submodule hostPortSubmodule;
|
||||||
default = {
|
default = {
|
||||||
host = "0.0.0.0";
|
host = "0.0.0.0";
|
||||||
port = if config.enableHTTPS then 443 else 80;
|
port = if config.enableHTTPS then 443 else 80;
|
||||||
};
|
};
|
||||||
defaultText = literalExpression ''
|
defaultText = lib.literalExpression ''
|
||||||
{
|
{
|
||||||
host = "0.0.0.0";
|
host = "0.0.0.0";
|
||||||
port = if enableHTTPS then 443 else 80;
|
port = if enableHTTPS then 443 else 80;
|
||||||
|
@ -85,39 +96,50 @@ let
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
restrictTo = mkOption {
|
restrictTo = lib.mkOption {
|
||||||
description = "Accepted traffic will be forwarded only to this service. Set to `null` to allow forwarding to arbitrary addresses.";
|
description = ''
|
||||||
type = types.listOf (types.submodule hostPortSubmodule);
|
Accepted traffic will be forwarded only to this service.
|
||||||
default = [];
|
'';
|
||||||
|
type = lib.types.listOf (lib.types.submodule hostPortSubmodule);
|
||||||
|
default = [ ];
|
||||||
example = [{
|
example = [{
|
||||||
host = "127.0.0.1";
|
host = "127.0.0.1";
|
||||||
port = 51820;
|
port = 51820;
|
||||||
}];
|
}];
|
||||||
};
|
};
|
||||||
|
|
||||||
enableHTTPS = mkOption {
|
enableHTTPS = lib.mkOption {
|
||||||
description = "Use HTTPS for the tunnel server.";
|
description = "Use HTTPS for the tunnel server.";
|
||||||
type = types.bool;
|
type = lib.types.bool;
|
||||||
default = true;
|
default = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
tlsCertificate = mkOption {
|
tlsCertificate = lib.mkOption {
|
||||||
description = "TLS certificate to use instead of the hardcoded one in case of HTTPS connections. Use together with `tlsKey`.";
|
description = ''
|
||||||
type = types.nullOr types.path;
|
TLS certificate to use instead of the hardcoded one in case of HTTPS connections.
|
||||||
|
Use together with `tlsKey`.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
default = null;
|
default = null;
|
||||||
example = "/var/lib/secrets/cert.pem";
|
example = "/var/lib/secrets/cert.pem";
|
||||||
};
|
};
|
||||||
|
|
||||||
tlsKey = mkOption {
|
tlsKey = lib.mkOption {
|
||||||
description = "TLS key to use instead of the hardcoded on in case of HTTPS connections. Use together with `tlsCertificate`.";
|
description = ''
|
||||||
type = types.nullOr types.path;
|
TLS key to use instead of the hardcoded on in case of HTTPS connections.
|
||||||
|
Use together with `tlsCertificate`.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.path;
|
||||||
default = null;
|
default = null;
|
||||||
example = "/var/lib/secrets/key.pem";
|
example = "/var/lib/secrets/key.pem";
|
||||||
};
|
};
|
||||||
|
|
||||||
useACMEHost = mkOption {
|
useACMEHost = lib.mkOption {
|
||||||
description = "Use a certificate generated by the NixOS ACME module for the given host. Note that this will not generate a new certificate - you will need to do so with `security.acme.certs`.";
|
description = ''
|
||||||
type = types.nullOr types.str;
|
Use a certificate generated by the NixOS ACME module for the given host.
|
||||||
|
Note that this will not generate a new certificate - you will need to do so with `security.acme.certs`.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
default = null;
|
default = null;
|
||||||
example = "example.com";
|
example = "example.com";
|
||||||
};
|
};
|
||||||
|
@ -126,95 +148,113 @@ let
|
||||||
|
|
||||||
clientSubmodule = { config, ... }: {
|
clientSubmodule = { config, ... }: {
|
||||||
options = commonOptions // {
|
options = commonOptions // {
|
||||||
connectTo = mkOption {
|
connectTo = lib.mkOption {
|
||||||
description = "Server address and port to connect to.";
|
description = "Server address and port to connect to.";
|
||||||
type = types.str;
|
type = lib.types.str;
|
||||||
example = "https://wstunnel.server.com:8443";
|
example = "https://wstunnel.server.com:8443";
|
||||||
};
|
};
|
||||||
|
|
||||||
localToRemote = mkOption {
|
localToRemote = lib.mkOption {
|
||||||
description = ''Listen on local and forwards traffic from remote.'';
|
description = ''Listen on local and forwards traffic from remote.'';
|
||||||
type = types.listOf (types.str);
|
type = lib.types.listOf (lib.types.str);
|
||||||
default = [];
|
default = [ ];
|
||||||
example = [
|
example = [
|
||||||
"tcp://1212:google.com:443"
|
"tcp://1212:google.com:443"
|
||||||
"unix:///tmp/wstunnel.sock:g.com:443"
|
"unix:///tmp/wstunnel.sock:g.com:443"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
remoteToLocal = mkOption {
|
remoteToLocal = lib.mkOption {
|
||||||
description = "Listen on remote and forwards traffic from local. Only tcp is supported";
|
description = "Listen on remote and forwards traffic from local. Only tcp is supported";
|
||||||
type = types.listOf (types.str);
|
type = lib.types.listOf lib.types.str;
|
||||||
default = [];
|
default = [ ];
|
||||||
example = [
|
example = [
|
||||||
"tcp://1212:google.com:443"
|
"tcp://1212:google.com:443"
|
||||||
"unix://wstunnel.sock:g.com:443"
|
"unix://wstunnel.sock:g.com:443"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
addNetBind = mkEnableOption "Whether add CAP_NET_BIND_SERVICE to the tunnel service, this should be enabled if you want to bind port < 1024";
|
addNetBind = lib.mkEnableOption "Whether add CAP_NET_BIND_SERVICE to the tunnel service, this should be enabled if you want to bind port < 1024";
|
||||||
|
|
||||||
httpProxy = mkOption {
|
httpProxy = lib.mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Proxy to use to connect to the wstunnel server (`USER:PASS@HOST:PORT`).
|
Proxy to use to connect to the wstunnel server (`USER:PASS@HOST:PORT`).
|
||||||
|
|
||||||
::: {.warning}
|
::: {.warning}
|
||||||
Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `PROXY_PASSWORD=<your-password-here>` and set this option to `<user>:$PROXY_PASSWORD@<host>:<port>`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
|
Passwords specified here will be world-readable in the Nix store!
|
||||||
|
To pass a password to the service, point the `environmentFile` option
|
||||||
|
to a file containing `PROXY_PASSWORD=<your-password-here>` and set
|
||||||
|
this option to `<user>:$PROXY_PASSWORD@<host>:<port>`.
|
||||||
|
Note however that this will also locally leak the passwords at
|
||||||
|
runtime via e.g. /proc/<pid>/cmdline.
|
||||||
:::
|
:::
|
||||||
'';
|
'';
|
||||||
type = types.nullOr types.str;
|
type = lib.types.nullOr lib.types.str;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
soMark = mkOption {
|
soMark = lib.mkOption {
|
||||||
description = "Mark network packets with the SO_MARK sockoption with the specified value. Setting this option will also enable the required `CAP_NET_ADMIN` capability for the systemd service.";
|
description = ''
|
||||||
type = types.nullOr types.int;
|
Mark network packets with the SO_MARK sockoption with the specified value.
|
||||||
|
Setting this option will also enable the required `CAP_NET_ADMIN` capability
|
||||||
|
for the systemd service.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.ints.unsigned;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
upgradePathPrefix = mkOption {
|
upgradePathPrefix = lib.mkOption {
|
||||||
description = "Use a specific HTTP path prefix that will show up in the upgrade request to the `wstunnel` server. Useful when running `wstunnel` behind a reverse proxy.";
|
description = ''
|
||||||
type = types.nullOr types.str;
|
Use a specific HTTP path prefix that will show up in the upgrade
|
||||||
|
request to the `wstunnel` server.
|
||||||
|
Useful when running `wstunnel` behind a reverse proxy.
|
||||||
|
'';
|
||||||
|
type = lib.types.nullOr lib.types.str;
|
||||||
default = null;
|
default = null;
|
||||||
example = "wstunnel";
|
example = "wstunnel";
|
||||||
};
|
};
|
||||||
|
|
||||||
tlsSNI = mkOption {
|
tlsSNI = lib.mkOption {
|
||||||
description = "Use this as the SNI while connecting via TLS. Useful for circumventing hostname-based firewalls.";
|
description = "Use this as the SNI while connecting via TLS. Useful for circumventing hostname-based firewalls.";
|
||||||
type = types.nullOr types.str;
|
type = lib.types.nullOr lib.types.str;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
tlsVerifyCertificate = mkOption {
|
tlsVerifyCertificate = lib.mkOption {
|
||||||
description = "Whether to verify the TLS certificate of the server. It might be useful to set this to `false` when working with the `tlsSNI` option.";
|
description = "Whether to verify the TLS certificate of the server. It might be useful to set this to `false` when working with the `tlsSNI` option.";
|
||||||
type = types.bool;
|
type = lib.types.bool;
|
||||||
default = true;
|
default = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
# The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
|
# The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
|
||||||
websocketPingInterval = mkOption {
|
websocketPingInterval = lib.mkOption {
|
||||||
description = "Frequency at which the client will send websocket ping to the server.";
|
description = "Frequency at which the client will send websocket ping to the server.";
|
||||||
type = types.nullOr types.ints.unsigned;
|
type = lib.types.nullOr lib.types.ints.unsigned;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
upgradeCredentials = mkOption {
|
upgradeCredentials = lib.mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Use these credentials to authenticate during the HTTP upgrade request (Basic authorization type, `USER:[PASS]`).
|
Use these credentials to authenticate during the HTTP upgrade request
|
||||||
|
(Basic authorization type, `USER:[PASS]`).
|
||||||
|
|
||||||
::: {.warning}
|
::: {.warning}
|
||||||
Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `HTTP_PASSWORD=<your-password-here>` and set this option to `<user>:$HTTP_PASSWORD`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
|
Passwords specified here will be world-readable in the Nix store!
|
||||||
|
To pass a password to the service, point the `environmentFile` option
|
||||||
|
to a file containing `HTTP_PASSWORD=<your-password-here>` and set this
|
||||||
|
option to `<user>:$HTTP_PASSWORD`.
|
||||||
|
Note however that this will also locally leak the passwords at runtime
|
||||||
|
via e.g. /proc/<pid>/cmdline.
|
||||||
:::
|
:::
|
||||||
'';
|
'';
|
||||||
type = types.nullOr types.str;
|
type = lib.types.nullOr lib.types.str;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
|
|
||||||
customHeaders = mkOption {
|
customHeaders = lib.mkOption {
|
||||||
description = "Custom HTTP headers to send during the upgrade request.";
|
description = "Custom HTTP headers to send during the upgrade request.";
|
||||||
type = types.attrsOf types.str;
|
type = lib.types.attrsOf lib.types.str;
|
||||||
default = {};
|
default = { };
|
||||||
example = {
|
example = {
|
||||||
"X-Some-Header" = "some-value";
|
"X-Some-Header" = "some-value";
|
||||||
};
|
};
|
||||||
|
@ -224,49 +264,63 @@ let
|
||||||
|
|
||||||
generateServerUnit = name: serverCfg: {
|
generateServerUnit = name: serverCfg: {
|
||||||
name = "wstunnel-server-${name}";
|
name = "wstunnel-server-${name}";
|
||||||
value = {
|
value =
|
||||||
description = "wstunnel server - ${name}";
|
let
|
||||||
requires = [ "network.target" "network-online.target" ];
|
certConfig = config.security.acme.certs.${serverCfg.useACMEHost};
|
||||||
after = [ "network.target" "network-online.target" ];
|
in
|
||||||
wantedBy = optional serverCfg.autoStart "multi-user.target";
|
{
|
||||||
|
description = "wstunnel server - ${name}";
|
||||||
|
requires = [ "network.target" "network-online.target" ];
|
||||||
|
after = [ "network.target" "network-online.target" ];
|
||||||
|
wantedBy = lib.optional serverCfg.autoStart "multi-user.target";
|
||||||
|
|
||||||
serviceConfig = let
|
environment.RUST_LOG = serverCfg.loggingLevel;
|
||||||
certConfig = config.security.acme.certs."${serverCfg.useACMEHost}";
|
|
||||||
in {
|
serviceConfig = {
|
||||||
Type = "simple";
|
Type = "exec";
|
||||||
ExecStart = with serverCfg; let
|
EnvironmentFile =
|
||||||
resolvedTlsCertificate = if useACMEHost != null
|
lib.optional (serverCfg.environmentFile != null) serverCfg.environmentFile;
|
||||||
then "${certConfig.directory}/fullchain.pem"
|
DynamicUser = true;
|
||||||
else tlsCertificate;
|
SupplementaryGroups =
|
||||||
resolvedTlsKey = if useACMEHost != null
|
lib.optional (serverCfg.useACMEHost != null) certConfig.group;
|
||||||
then "${certConfig.directory}/key.pem"
|
PrivateTmp = true;
|
||||||
else tlsKey;
|
AmbientCapabilities =
|
||||||
in ''
|
lib.optionals (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
|
||||||
${package}/bin/wstunnel \
|
NoNewPrivileges = true;
|
||||||
|
RestrictNamespaces = "uts ipc pid user cgroup";
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ProtectHome = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 2;
|
||||||
|
RestartSteps = 20;
|
||||||
|
RestartMaxDelaySec = "5min";
|
||||||
|
};
|
||||||
|
|
||||||
|
script = with serverCfg; ''
|
||||||
|
${lib.getExe package} \
|
||||||
server \
|
server \
|
||||||
${concatStringsSep " " (builtins.map (hostPair: "--restrict-to ${utils.escapeSystemdExecArg (hostPortToString hostPair)}") restrictTo)} \
|
${lib.cli.toGNUCommandLineShell { } (
|
||||||
${optionalString (resolvedTlsCertificate != null) "--tls-certificate ${utils.escapeSystemdExecArg resolvedTlsCertificate}"} \
|
lib.recursiveUpdate
|
||||||
${optionalString (resolvedTlsKey != null) "--tls-private-key ${utils.escapeSystemdExecArg resolvedTlsKey}"} \
|
{
|
||||||
${optionalString (loggingLevel != null) "--log-lvl ${loggingLevel}"} \
|
restrict-to = map hostPortToString restrictTo;
|
||||||
${attrsToArgs extraArgs} \
|
tls-certificate = if useACMEHost != null
|
||||||
${utils.escapeSystemdExecArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString listen}"}
|
then "${certConfig.directory}/fullchain.pem"
|
||||||
|
else "${tlsCertificate}";
|
||||||
|
tls-private-key = if useACMEHost != null
|
||||||
|
then "${certConfig.directory}/key.pem"
|
||||||
|
else "${tlsKey}";
|
||||||
|
}
|
||||||
|
extraArgs
|
||||||
|
)} \
|
||||||
|
${lib.escapeShellArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString listen}"}
|
||||||
'';
|
'';
|
||||||
EnvironmentFile = optional (serverCfg.environmentFile != null) serverCfg.environmentFile;
|
|
||||||
DynamicUser = true;
|
|
||||||
SupplementaryGroups = optional (serverCfg.useACMEHost != null) certConfig.group;
|
|
||||||
PrivateTmp = true;
|
|
||||||
AmbientCapabilities = optionals (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
|
|
||||||
NoNewPrivileges = true;
|
|
||||||
RestrictNamespaces = "uts ipc pid user cgroup";
|
|
||||||
ProtectSystem = "strict";
|
|
||||||
ProtectHome = true;
|
|
||||||
ProtectKernelTunables = true;
|
|
||||||
ProtectKernelModules = true;
|
|
||||||
ProtectControlGroups = true;
|
|
||||||
PrivateDevices = true;
|
|
||||||
RestrictSUIDSGID = true;
|
|
||||||
};
|
};
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
generateClientUnit = name: clientCfg: {
|
generateClientUnit = name: clientCfg: {
|
||||||
|
@ -275,30 +329,19 @@ let
|
||||||
description = "wstunnel client - ${name}";
|
description = "wstunnel client - ${name}";
|
||||||
requires = [ "network.target" "network-online.target" ];
|
requires = [ "network.target" "network-online.target" ];
|
||||||
after = [ "network.target" "network-online.target" ];
|
after = [ "network.target" "network-online.target" ];
|
||||||
wantedBy = optional clientCfg.autoStart "multi-user.target";
|
wantedBy = lib.optional clientCfg.autoStart "multi-user.target";
|
||||||
|
|
||||||
|
environment.RUST_LOG = clientCfg.loggingLevel;
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "simple";
|
Type = "exec";
|
||||||
ExecStart = with clientCfg; ''
|
EnvironmentFile =
|
||||||
${package}/bin/wstunnel client \
|
lib.optional (clientCfg.environmentFile != null) clientCfg.environmentFile;
|
||||||
${concatStringsSep " " (builtins.map (x: "--local-to-remote ${x}") localToRemote)} \
|
|
||||||
${concatStringsSep " " (builtins.map (x: "--remote-to-local ${x}") remoteToLocal)} \
|
|
||||||
${concatStringsSep " " (mapAttrsToList (n: v: "--http-headers \"${n}: ${v}\"") customHeaders)} \
|
|
||||||
${optionalString (httpProxy != null) "--http-proxy ${httpProxy}"} \
|
|
||||||
${optionalString (soMark != null) "--socket-so-mark=${toString soMark}"} \
|
|
||||||
${optionalString (upgradePathPrefix != null) "--http-upgrade-path-prefix ${upgradePathPrefix}"} \
|
|
||||||
${optionalString (tlsSNI != null) "--tls-sni-override ${tlsSNI}"} \
|
|
||||||
${optionalString tlsVerifyCertificate "--tls-verify-certificate"} \
|
|
||||||
${optionalString (websocketPingInterval != null) "--websocket-ping-frequency-sec ${toString websocketPingInterval}"} \
|
|
||||||
${optionalString (upgradeCredentials != null) "--http-upgrade-credentials ${upgradeCredentials}"} \
|
|
||||||
${optionalString (loggingLevel != null) "--log-lvl ${loggingLevel}"} \
|
|
||||||
${attrsToArgs extraArgs} \
|
|
||||||
${utils.escapeSystemdExecArg connectTo}
|
|
||||||
'';
|
|
||||||
EnvironmentFile = optional (clientCfg.environmentFile != null) clientCfg.environmentFile;
|
|
||||||
DynamicUser = true;
|
DynamicUser = true;
|
||||||
PrivateTmp = true;
|
PrivateTmp = true;
|
||||||
AmbientCapabilities = (optionals (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]) ++ (optionals (clientCfg.addNetBind) [ "CAP_NET_BIND_SERVICE" ]);
|
AmbientCapabilities =
|
||||||
|
(lib.optionals clientCfg.addNetBind [ "CAP_NET_BIND_SERVICE" ]) ++
|
||||||
|
(lib.optionals (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]);
|
||||||
NoNewPrivileges = true;
|
NoNewPrivileges = true;
|
||||||
RestrictNamespaces = "uts ipc pid user cgroup";
|
RestrictNamespaces = "uts ipc pid user cgroup";
|
||||||
ProtectSystem = "strict";
|
ProtectSystem = "strict";
|
||||||
|
@ -308,17 +351,45 @@ let
|
||||||
ProtectControlGroups = true;
|
ProtectControlGroups = true;
|
||||||
PrivateDevices = true;
|
PrivateDevices = true;
|
||||||
RestrictSUIDSGID = true;
|
RestrictSUIDSGID = true;
|
||||||
|
|
||||||
|
Restart = "on-failure";
|
||||||
|
RestartSec = 2;
|
||||||
|
RestartSteps = 20;
|
||||||
|
RestartMaxDelaySec = "5min";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
script = with clientCfg; ''
|
||||||
|
${lib.getExe package} \
|
||||||
|
client \
|
||||||
|
${lib.cli.toGNUCommandLineShell { } (
|
||||||
|
lib.recursiveUpdate
|
||||||
|
{
|
||||||
|
local-to-remote = localToRemote;
|
||||||
|
remote-to-local = remoteToLocal;
|
||||||
|
http-headers = lib.mapAttrsToList (n: v: "${n}:${v}") customHeaders;
|
||||||
|
http-proxy = httpProxy;
|
||||||
|
socket-so-mark = soMark;
|
||||||
|
http-upgrade-path-prefix = upgradePathPrefix;
|
||||||
|
tls-sni-override = tlsSNI;
|
||||||
|
tls-verify-certificate = tlsVerifyCertificate;
|
||||||
|
websocket-ping-frequency-sec = websocketPingInterval;
|
||||||
|
http-upgrade-credentials = upgradeCredentials;
|
||||||
|
}
|
||||||
|
extraArgs
|
||||||
|
)} \
|
||||||
|
${lib.escapeShellArg connectTo}
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
options.services.wstunnel = {
|
options.services.wstunnel = {
|
||||||
enable = mkEnableOption "wstunnel";
|
enable = lib.mkEnableOption "wstunnel";
|
||||||
|
|
||||||
servers = mkOption {
|
servers = lib.mkOption {
|
||||||
description = "`wstunnel` servers to set up.";
|
description = "`wstunnel` servers to set up.";
|
||||||
type = types.attrsOf (types.submodule serverSubmodule);
|
type = lib.types.attrsOf (lib.types.submodule serverSubmodule);
|
||||||
default = {};
|
default = { };
|
||||||
example = {
|
example = {
|
||||||
"wg-tunnel" = {
|
"wg-tunnel" = {
|
||||||
listen = {
|
listen = {
|
||||||
|
@ -336,13 +407,13 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
clients = mkOption {
|
clients = lib.mkOption {
|
||||||
description = "`wstunnel` clients to set up.";
|
description = "`wstunnel` clients to set up.";
|
||||||
type = types.attrsOf (types.submodule clientSubmodule);
|
type = lib.types.attrsOf (lib.types.submodule clientSubmodule);
|
||||||
default = {};
|
default = { };
|
||||||
example = {
|
example = {
|
||||||
"wg-tunnel" = {
|
"wg-tunnel" = {
|
||||||
connectTo = "https://wstunnel.server.com:8443";
|
connectTo = "wss://wstunnel.server.com:8443";
|
||||||
localToRemote = [
|
localToRemote = [
|
||||||
"tcp://1212:google.com:443"
|
"tcp://1212:google.com:443"
|
||||||
"tcp://2:n.lan:4?proxy_protocol"
|
"tcp://2:n.lan:4?proxy_protocol"
|
||||||
|
@ -356,28 +427,42 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = lib.mkIf cfg.enable {
|
||||||
systemd.services = (mapAttrs' generateServerUnit (filterAttrs (n: v: v.enable) cfg.servers)) // (mapAttrs' generateClientUnit (filterAttrs (n: v: v.enable) cfg.clients));
|
systemd.services =
|
||||||
|
(lib.mapAttrs' generateServerUnit (lib.filterAttrs (n: v: v.enable) cfg.servers)) //
|
||||||
|
(lib.mapAttrs' generateClientUnit (lib.filterAttrs (n: v: v.enable) cfg.clients));
|
||||||
|
|
||||||
assertions = (mapAttrsToList (name: serverCfg: {
|
assertions =
|
||||||
assertion = !(serverCfg.useACMEHost != null && (serverCfg.tlsCertificate != null || serverCfg.tlsKey != null));
|
(lib.mapAttrsToList
|
||||||
message = ''
|
(name: serverCfg: {
|
||||||
Options services.wstunnel.servers."${name}".useACMEHost and services.wstunnel.servers."${name}".{tlsCertificate, tlsKey} are mutually exclusive.
|
assertion =
|
||||||
'';
|
!(serverCfg.useACMEHost != null && serverCfg.tlsCertificate != null);
|
||||||
}) cfg.servers) ++
|
message = ''
|
||||||
(mapAttrsToList (name: serverCfg: {
|
Options services.wstunnel.servers."${name}".useACMEHost and services.wstunnel.servers."${name}".{tlsCertificate, tlsKey} are mutually exclusive.
|
||||||
assertion = !((serverCfg.tlsCertificate != null || serverCfg.tlsKey != null) && !(serverCfg.tlsCertificate != null && serverCfg.tlsKey != null));
|
'';
|
||||||
message = ''
|
})
|
||||||
services.wstunnel.servers."${name}".tlsCertificate and services.wstunnel.servers."${name}".tlsKey need to be set together.
|
cfg.servers) ++
|
||||||
'';
|
|
||||||
}) cfg.servers) ++
|
(lib.mapAttrsToList
|
||||||
(mapAttrsToList (name: clientCfg: {
|
(name: serverCfg: {
|
||||||
assertion = !(clientCfg.localToRemote == [] && clientCfg.remoteToLocal == []);
|
assertion =
|
||||||
message = ''
|
(serverCfg.tlsCertificate == null && serverCfg.tlsKey == null) ||
|
||||||
Either one of services.wstunnel.clients."${name}".localToRemote or services.wstunnel.clients."${name}".remoteToLocal must be set.
|
(serverCfg.tlsCertificate != null && serverCfg.tlsKey != null);
|
||||||
'';
|
message = ''
|
||||||
}) cfg.clients);
|
services.wstunnel.servers."${name}".tlsCertificate and services.wstunnel.servers."${name}".tlsKey need to be set together.
|
||||||
|
'';
|
||||||
|
})
|
||||||
|
cfg.servers) ++
|
||||||
|
|
||||||
|
(lib.mapAttrsToList
|
||||||
|
(name: clientCfg: {
|
||||||
|
assertion = !(clientCfg.localToRemote == [ ] && clientCfg.remoteToLocal == [ ]);
|
||||||
|
message = ''
|
||||||
|
Either one of services.wstunnel.clients."${name}".localToRemote or services.wstunnel.clients."${name}".remoteToLocal must be set.
|
||||||
|
'';
|
||||||
|
})
|
||||||
|
cfg.clients);
|
||||||
};
|
};
|
||||||
|
|
||||||
meta.maintainers = with maintainers; [ alyaeanyx neverbehave ];
|
meta.maintainers = with lib.maintainers; [ alyaeanyx rvdp neverbehave ];
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,9 @@ with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.zerotierone;
|
cfg = config.services.zerotierone;
|
||||||
localConfFile = pkgs.writeText "zt-local.conf" (builtins.toJSON cfg.localConf);
|
|
||||||
|
settingsFormat = pkgs.formats.json {};
|
||||||
|
localConfFile = settingsFormat.generate "zt-local.conf" cfg.localConf;
|
||||||
localConfFilePath = "/var/lib/zerotier-one/local.conf";
|
localConfFilePath = "/var/lib/zerotier-one/local.conf";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
@ -41,7 +43,7 @@ in
|
||||||
example = {
|
example = {
|
||||||
settings.allowTcpFallbackRelay = false;
|
settings.allowTcpFallbackRelay = false;
|
||||||
};
|
};
|
||||||
type = types.nullOr types.attrs;
|
type = settingsFormat.type;
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
@ -60,7 +62,7 @@ in
|
||||||
chown -R root:root /var/lib/zerotier-one
|
chown -R root:root /var/lib/zerotier-one
|
||||||
'' + (concatMapStrings (netId: ''
|
'' + (concatMapStrings (netId: ''
|
||||||
touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
|
touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
|
||||||
'') cfg.joinNetworks) + optionalString (cfg.localConf != null) ''
|
'') cfg.joinNetworks) + optionalString (cfg.localConf != {}) ''
|
||||||
if [ -L "${localConfFilePath}" ]
|
if [ -L "${localConfFilePath}" ]
|
||||||
then
|
then
|
||||||
rm ${localConfFilePath}
|
rm ${localConfFilePath}
|
||||||
|
|
|
@ -6,7 +6,7 @@ let
|
||||||
cfg = config.services.sslmate-agent;
|
cfg = config.services.sslmate-agent;
|
||||||
|
|
||||||
in {
|
in {
|
||||||
meta.maintainers = with maintainers; [ wolfangaukang ];
|
meta.maintainers = [ ];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
services.sslmate-agent = {
|
services.sslmate-agent = {
|
||||||
|
|
|
@ -4,7 +4,7 @@ let
|
||||||
settingsFormat = (pkgs.formats.json { });
|
settingsFormat = (pkgs.formats.json { });
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
meta.maintainers = with lib.maintainers; [ mohe2015 ];
|
meta.maintainers = with lib.maintainers; [ ];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
services.step-ca = {
|
services.step-ca = {
|
||||||
|
|
|
@ -174,7 +174,10 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
package = mkPackageOption pkgs "transmission" {};
|
package = mkPackageOption pkgs "transmission" {
|
||||||
|
default = "transmission_3";
|
||||||
|
example = "pkgs.transmission_4";
|
||||||
|
};
|
||||||
|
|
||||||
downloadDirPermissions = mkOption {
|
downloadDirPermissions = mkOption {
|
||||||
type = with types; nullOr str;
|
type = with types; nullOr str;
|
||||||
|
|
|
@ -33,10 +33,10 @@ let
|
||||||
|
|
||||||
${optionalString (cfg.settings.DB_CONNECTION == "sqlite")
|
${optionalString (cfg.settings.DB_CONNECTION == "sqlite")
|
||||||
"touch ${cfg.dataDir}/storage/database/database.sqlite"}
|
"touch ${cfg.dataDir}/storage/database/database.sqlite"}
|
||||||
|
${artisan} cache:clear
|
||||||
${artisan} package:discover
|
${artisan} package:discover
|
||||||
${artisan} firefly-iii:upgrade-database
|
${artisan} firefly-iii:upgrade-database
|
||||||
${artisan} firefly-iii:laravel-passport-keys
|
${artisan} firefly-iii:laravel-passport-keys
|
||||||
${artisan} cache:clear
|
|
||||||
${artisan} view:cache
|
${artisan} view:cache
|
||||||
${artisan} route:cache
|
${artisan} route:cache
|
||||||
${artisan} config:cache
|
${artisan} config:cache
|
||||||
|
@ -283,8 +283,6 @@ in {
|
||||||
before = [ "phpfpm-firefly-iii.service" ];
|
before = [ "phpfpm-firefly-iii.service" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = firefly-iii-maintenance;
|
ExecStart = firefly-iii-maintenance;
|
||||||
RuntimeDirectory = "phpfpm";
|
|
||||||
RuntimeDirectoryPreserve = true;
|
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
} // commonServiceConfig;
|
} // commonServiceConfig;
|
||||||
unitConfig.JoinsNamespaceOf = "phpfpm-firefly-iii.service";
|
unitConfig.JoinsNamespaceOf = "phpfpm-firefly-iii.service";
|
||||||
|
|
|
@ -68,13 +68,11 @@ to `/auth`. See the option description
|
||||||
for more details.
|
for more details.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
[](#opt-services.keycloak.settings.hostname-strict-backchannel)
|
[](#opt-services.keycloak.settings.hostname-backchannel-dynamic)
|
||||||
determines whether Keycloak should force all requests to go
|
Keycloak has the capability to offer a separate URL for backchannel requests,
|
||||||
through the frontend URL. By default,
|
enabling internal communication while maintaining the use of a public URL
|
||||||
Keycloak allows backend requests to
|
for frontchannel requests. Moreover, the backchannel is dynamically
|
||||||
instead use its local hostname or IP address and may also
|
resolved based on incoming headers endpoint.
|
||||||
advertise it to clients through its OpenID Connect Discovery
|
|
||||||
endpoint.
|
|
||||||
|
|
||||||
For more information on hostname configuration, see the [Hostname
|
For more information on hostname configuration, see the [Hostname
|
||||||
section of the Keycloak Server Installation and Configuration
|
section of the Keycloak Server Installation and Configuration
|
||||||
|
|
|
@ -328,8 +328,7 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
hostname = mkOption {
|
hostname = mkOption {
|
||||||
type = nullOr str;
|
type = str;
|
||||||
default = null;
|
|
||||||
example = "keycloak.example.com";
|
example = "keycloak.example.com";
|
||||||
description = ''
|
description = ''
|
||||||
The hostname part of the public URL used as base for
|
The hostname part of the public URL used as base for
|
||||||
|
@ -340,16 +339,13 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
hostname-strict-backchannel = mkOption {
|
hostname-backchannel-dynamic = mkOption {
|
||||||
type = bool;
|
type = bool;
|
||||||
default = false;
|
default = false;
|
||||||
example = true;
|
example = true;
|
||||||
description = ''
|
description = ''
|
||||||
Whether Keycloak should force all requests to go
|
Enables dynamic resolving of backchannel URLs,
|
||||||
through the frontend URL. By default, Keycloak allows
|
including hostname, scheme, port and context path.
|
||||||
backend requests to instead use its local hostname or
|
|
||||||
IP address and may also advertise it to clients
|
|
||||||
through its OpenID Connect Discovery endpoint.
|
|
||||||
|
|
||||||
See <https://www.keycloak.org/server/hostname>
|
See <https://www.keycloak.org/server/hostname>
|
||||||
for more information about hostname configuration.
|
for more information about hostname configuration.
|
||||||
|
@ -482,12 +478,20 @@ in
|
||||||
message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
|
message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assertion = cfg.settings.hostname != null || cfg.settings.hostname-url or null != null;
|
assertion = cfg.settings.hostname-url or null == null;
|
||||||
message = "Setting the Keycloak hostname is required, see `services.keycloak.settings.hostname`";
|
message = ''
|
||||||
|
The option `services.keycloak.settings.hostname-url' has been removed.
|
||||||
|
Set `services.keycloak.settings.hostname' instead.
|
||||||
|
See [New Hostname options](https://www.keycloak.org/docs/25.0.0/upgrading/#new-hostname-options) for details.
|
||||||
|
'';
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
assertion = !(cfg.settings.hostname != null && cfg.settings.hostname-url or null != null);
|
assertion = cfg.settings.hostname-strict-backchannel or null == null;
|
||||||
message = "`services.keycloak.settings.hostname` and `services.keycloak.settings.hostname-url` are mutually exclusive";
|
message = ''
|
||||||
|
The option `services.keycloak.settings.hostname-strict-backchannel' has been removed.
|
||||||
|
Set `services.keycloak.settings.hostname-backchannel-dynamic' instead.
|
||||||
|
See [New Hostname options](https://www.keycloak.org/docs/25.0.0/upgrading/#new-hostname-options) for details.
|
||||||
|
'';
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,12 @@ let
|
||||||
mkKeyValue = generators.mkKeyValueDefault {} " = ";
|
mkKeyValue = generators.mkKeyValueDefault {} " = ";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
phpCli = concatStringsSep " " ([
|
||||||
|
"${getExe phpPackage}"
|
||||||
|
] ++ optionals (cfg.cli.memoryLimit != null) [
|
||||||
|
"-dmemory_limit=${cfg.cli.memoryLimit}"
|
||||||
|
]);
|
||||||
|
|
||||||
occ = pkgs.writeScriptBin "nextcloud-occ" ''
|
occ = pkgs.writeScriptBin "nextcloud-occ" ''
|
||||||
#! ${pkgs.runtimeShell}
|
#! ${pkgs.runtimeShell}
|
||||||
cd ${webroot}
|
cd ${webroot}
|
||||||
|
@ -89,7 +95,7 @@ let
|
||||||
fi
|
fi
|
||||||
export NEXTCLOUD_CONFIG_DIR="${datadir}/config"
|
export NEXTCLOUD_CONFIG_DIR="${datadir}/config"
|
||||||
$sudo \
|
$sudo \
|
||||||
${phpPackage}/bin/php \
|
${phpCli} \
|
||||||
occ "$@"
|
occ "$@"
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
@ -196,6 +202,9 @@ let
|
||||||
in {
|
in {
|
||||||
|
|
||||||
imports = [
|
imports = [
|
||||||
|
(mkRenamedOptionModule
|
||||||
|
[ "services" "nextcloud" "cron" "memoryLimit" ]
|
||||||
|
[ "services" "nextcloud" "cli" "memoryLimit" ])
|
||||||
(mkRemovedOptionModule [ "services" "nextcloud" "enableBrokenCiphersForSSE" ] ''
|
(mkRemovedOptionModule [ "services" "nextcloud" "enableBrokenCiphersForSSE" ] ''
|
||||||
This option has no effect since there's no supported Nextcloud version packaged here
|
This option has no effect since there's no supported Nextcloud version packaged here
|
||||||
using OpenSSL for RC4 SSE.
|
using OpenSSL for RC4 SSE.
|
||||||
|
@ -446,7 +455,13 @@ in {
|
||||||
dbtableprefix = mkOption {
|
dbtableprefix = mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.nullOr types.str;
|
||||||
default = null;
|
default = null;
|
||||||
description = "Table prefix in Nextcloud's database.";
|
description = ''
|
||||||
|
Table prefix in Nextcloud's database.
|
||||||
|
|
||||||
|
__Note:__ since Nextcloud 20 it's not an option anymore to create a database
|
||||||
|
schema with a custom table prefix. This option only exists for backwards compatibility
|
||||||
|
with installations that were originally provisioned with Nextcloud <20.
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
adminuser = mkOption {
|
adminuser = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
|
@ -642,7 +657,6 @@ in {
|
||||||
type = types.package;
|
type = types.package;
|
||||||
default = occ;
|
default = occ;
|
||||||
defaultText = literalMD "generated script";
|
defaultText = literalMD "generated script";
|
||||||
internal = true;
|
|
||||||
description = ''
|
description = ''
|
||||||
The nextcloud-occ program preconfigured to target this Nextcloud instance.
|
The nextcloud-occ program preconfigured to target this Nextcloud instance.
|
||||||
'';
|
'';
|
||||||
|
@ -794,7 +808,7 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
cron.memoryLimit = mkOption {
|
cli.memoryLimit = mkOption {
|
||||||
type = types.nullOr types.str;
|
type = types.nullOr types.str;
|
||||||
default = null;
|
default = null;
|
||||||
example = "1G";
|
example = "1G";
|
||||||
|
@ -826,6 +840,13 @@ in {
|
||||||
Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
|
Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
|
||||||
Please migrate your configuration to config.services.nextcloud.poolSettings.
|
Please migrate your configuration to config.services.nextcloud.poolSettings.
|
||||||
'')
|
'')
|
||||||
|
++ (optional (cfg.config.dbtableprefix != null) ''
|
||||||
|
Using `services.nextcloud.config.dbtableprefix` is deprecated. Fresh installations with this
|
||||||
|
option set are not allowed anymore since v20.
|
||||||
|
|
||||||
|
If you have an existing installation with a custom table prefix, make sure it is
|
||||||
|
set correctly in `config.php` and remove the option from your NixOS config.
|
||||||
|
'')
|
||||||
++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
|
++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
|
||||||
++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
|
++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
|
||||||
++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"))
|
++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"))
|
||||||
|
@ -1010,14 +1031,8 @@ in {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "exec";
|
Type = "exec";
|
||||||
User = "nextcloud";
|
User = "nextcloud";
|
||||||
ExecCondition = "${lib.getExe phpPackage} -f ${webroot}/occ status -e";
|
ExecCondition = "${phpCli} -f ${webroot}/occ status -e";
|
||||||
ExecStart = lib.concatStringsSep " " ([
|
ExecStart = "${phpCli} -f ${webroot}/cron.php";
|
||||||
(lib.getExe phpPackage)
|
|
||||||
] ++ optional (cfg.cron.memoryLimit != null) "-dmemory_limit=${cfg.cron.memoryLimit}"
|
|
||||||
++ [
|
|
||||||
"-f"
|
|
||||||
"${webroot}/cron.php"
|
|
||||||
]);
|
|
||||||
KillMode = "process";
|
KillMode = "process";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -1041,7 +1056,7 @@ in {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "exec";
|
Type = "exec";
|
||||||
User = "nextcloud";
|
User = "nextcloud";
|
||||||
ExecCondition = "${lib.getExe phpPackage} -f ${webroot}/occ status -e";
|
ExecCondition = "${phpCli} -f ${webroot}/occ status -e";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -219,5 +219,5 @@ in
|
||||||
users.groups.zitadel = lib.mkIf (cfg.group == "zitadel") { };
|
users.groups.zitadel = lib.mkIf (cfg.group == "zitadel") { };
|
||||||
};
|
};
|
||||||
|
|
||||||
meta.maintainers = with lib.maintainers; [ Sorixelle ];
|
meta.maintainers = [ ];
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,9 +71,7 @@ in {
|
||||||
|
|
||||||
host = mkOption {
|
host = mkOption {
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
default = "bluemap.${config.networking.domain}";
|
description = "Domain on which nginx will serve the bluemap webapp";
|
||||||
defaultText = lib.literalExpression "bluemap.\${config.networking.domain}";
|
|
||||||
description = "Domain to configure nginx for";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
onCalendar = mkOption {
|
onCalendar = mkOption {
|
||||||
|
|
|
@ -116,7 +116,7 @@ in
|
||||||
] # TODO: NetworkManager doesn't belong here
|
] # TODO: NetworkManager doesn't belong here
|
||||||
++ optional config.networking.networkmanager.enable networkmanagerapplet
|
++ optional config.networking.networkmanager.enable networkmanagerapplet
|
||||||
++ optional config.powerManagement.enable xfce4-power-manager
|
++ optional config.powerManagement.enable xfce4-power-manager
|
||||||
++ optionals config.hardware.pulseaudio.enable [
|
++ optionals (config.hardware.pulseaudio.enable || config.services.pipewire.pulse.enable) [
|
||||||
pavucontrol
|
pavucontrol
|
||||||
# volume up/down keys support:
|
# volume up/down keys support:
|
||||||
# xfce4-pulseaudio-plugin includes all the functionalities of xfce4-volumed-pulse
|
# xfce4-pulseaudio-plugin includes all the functionalities of xfce4-volumed-pulse
|
||||||
|
|
|
@ -48,7 +48,7 @@ in
|
||||||
|
|
||||||
assertions = (attrValues (mapAttrs
|
assertions = (attrValues (mapAttrs
|
||||||
(device: _: {
|
(device: _: {
|
||||||
assertion = (any (fs: fs.device == device && (elem fs.fsType supportedFs)) config.system.build.fileSystems) || (hasAttr device config.boot.initrd.luks.devices);
|
assertion = (any (fs: fs.device == device && (elem fs.fsType supportedFs) || (fs.fsType == "zfs" && hasPrefix "${device}/" fs.device)) config.system.build.fileSystems) || (hasAttr device config.boot.initrd.luks.devices);
|
||||||
message = ''
|
message = ''
|
||||||
No filesystem or LUKS device with the name ${device} is declared in your configuration.'';
|
No filesystem or LUKS device with the name ${device} is declared in your configuration.'';
|
||||||
})
|
})
|
||||||
|
|
|
@ -17,8 +17,7 @@ let
|
||||||
cfgZED = config.services.zfs.zed;
|
cfgZED = config.services.zfs.zed;
|
||||||
|
|
||||||
selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
|
selectModulePackage = package: config.boot.kernelPackages.${package.kernelModuleAttribute};
|
||||||
clevisDatasets = map (e: e.device) (filter (e: e.device != null && (hasAttr e.device config.boot.initrd.clevis.devices) && e.fsType == "zfs" && (fsNeededForBoot e)) config.system.build.fileSystems);
|
clevisDatasets = attrNames (filterAttrs (device: _: any (e: e.fsType == "zfs" && (fsNeededForBoot e) && (e.device == device || hasPrefix "${device}/" e.device)) config.system.build.fileSystems) config.boot.initrd.clevis.devices);
|
||||||
|
|
||||||
|
|
||||||
inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
|
inInitrd = config.boot.initrd.supportedFilesystems.zfs or false;
|
||||||
inSystem = config.boot.supportedFilesystems.zfs or false;
|
inSystem = config.boot.supportedFilesystems.zfs or false;
|
||||||
|
|
|
@ -9,10 +9,10 @@ in
|
||||||
config = {
|
config = {
|
||||||
system.build.OCIImage = import ../../lib/make-disk-image.nix {
|
system.build.OCIImage = import ../../lib/make-disk-image.nix {
|
||||||
inherit config lib pkgs;
|
inherit config lib pkgs;
|
||||||
|
inherit (cfg) diskSize;
|
||||||
name = "oci-image";
|
name = "oci-image";
|
||||||
configFile = ./oci-config-user.nix;
|
configFile = ./oci-config-user.nix;
|
||||||
format = "qcow2";
|
format = "qcow2";
|
||||||
diskSize = 8192;
|
|
||||||
partitionTableType = if cfg.efi then "efi" else "legacy";
|
partitionTableType = if cfg.efi then "efi" else "legacy";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,12 @@
|
||||||
Whether the OCI instance is using EFI.
|
Whether the OCI instance is using EFI.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
diskSize = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = 8192;
|
||||||
|
description = "Size of the disk image created in MB.";
|
||||||
|
example = "diskSize = 12 * 1024; # 12GiB";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,5 +22,5 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
meta.maintainers = [ lib.maintainers.lheckemann ];
|
meta.maintainers = [ ];
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,7 @@ let
|
||||||
#!${pkgs.runtimeShell}
|
#!${pkgs.runtimeShell}
|
||||||
if [ ! -e ~/.ssh/authorized_keys ]; then
|
if [ ! -e ~/.ssh/authorized_keys ]; then
|
||||||
mkdir -m 0700 -p ~/.ssh
|
mkdir -m 0700 -p ~/.ssh
|
||||||
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" >> ~/.ssh/authorized_keys
|
install -m 0600 <(echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key") ~/.ssh/authorized_keys
|
||||||
chmod 0600 ~/.ssh/authorized_keys
|
|
||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
|
|
10
third_party/nixpkgs/nixos/tests/all-tests.nix
vendored
10
third_party/nixpkgs/nixos/tests/all-tests.nix
vendored
|
@ -500,7 +500,8 @@ in {
|
||||||
libreddit = handleTest ./libreddit.nix {};
|
libreddit = handleTest ./libreddit.nix {};
|
||||||
librenms = handleTest ./librenms.nix {};
|
librenms = handleTest ./librenms.nix {};
|
||||||
libresprite = handleTest ./libresprite.nix {};
|
libresprite = handleTest ./libresprite.nix {};
|
||||||
libreswan = handleTest ./libreswan.nix {};
|
libreswan = runTest ./libreswan.nix;
|
||||||
|
libreswan-nat = runTest ./libreswan-nat.nix;
|
||||||
librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
|
librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
|
||||||
libuiohook = handleTest ./libuiohook.nix {};
|
libuiohook = handleTest ./libuiohook.nix {};
|
||||||
libvirtd = handleTest ./libvirtd.nix {};
|
libvirtd = handleTest ./libvirtd.nix {};
|
||||||
|
@ -774,7 +775,7 @@ in {
|
||||||
printing-service = handleTest ./printing.nix { socket = false; };
|
printing-service = handleTest ./printing.nix { socket = false; };
|
||||||
private-gpt = handleTest ./private-gpt.nix {};
|
private-gpt = handleTest ./private-gpt.nix {};
|
||||||
privoxy = handleTest ./privoxy.nix {};
|
privoxy = handleTest ./privoxy.nix {};
|
||||||
prometheus = handleTest ./prometheus.nix {};
|
prometheus = handleTest ./prometheus {};
|
||||||
prometheus-exporters = handleTest ./prometheus-exporters.nix {};
|
prometheus-exporters = handleTest ./prometheus-exporters.nix {};
|
||||||
prosody = handleTest ./xmpp/prosody.nix {};
|
prosody = handleTest ./xmpp/prosody.nix {};
|
||||||
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix {};
|
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix {};
|
||||||
|
@ -957,6 +958,7 @@ in {
|
||||||
systemd-homed = handleTest ./systemd-homed.nix {};
|
systemd-homed = handleTest ./systemd-homed.nix {};
|
||||||
systemtap = handleTest ./systemtap.nix {};
|
systemtap = handleTest ./systemtap.nix {};
|
||||||
tandoor-recipes = handleTest ./tandoor-recipes.nix {};
|
tandoor-recipes = handleTest ./tandoor-recipes.nix {};
|
||||||
|
tandoor-recipes-script-name = handleTest ./tandoor-recipes-script-name.nix {};
|
||||||
tang = handleTest ./tang.nix {};
|
tang = handleTest ./tang.nix {};
|
||||||
taskserver = handleTest ./taskserver.nix {};
|
taskserver = handleTest ./taskserver.nix {};
|
||||||
tayga = handleTest ./tayga.nix {};
|
tayga = handleTest ./tayga.nix {};
|
||||||
|
@ -966,6 +968,7 @@ in {
|
||||||
teleport = handleTest ./teleport.nix {};
|
teleport = handleTest ./teleport.nix {};
|
||||||
thelounge = handleTest ./thelounge.nix {};
|
thelounge = handleTest ./thelounge.nix {};
|
||||||
terminal-emulators = handleTest ./terminal-emulators.nix {};
|
terminal-emulators = handleTest ./terminal-emulators.nix {};
|
||||||
|
thanos = handleTest ./thanos.nix {};
|
||||||
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
tiddlywiki = handleTest ./tiddlywiki.nix {};
|
||||||
tigervnc = handleTest ./tigervnc.nix {};
|
tigervnc = handleTest ./tigervnc.nix {};
|
||||||
timescaledb = handleTest ./timescaledb.nix {};
|
timescaledb = handleTest ./timescaledb.nix {};
|
||||||
|
@ -980,7 +983,7 @@ in {
|
||||||
traefik = handleTestOn ["aarch64-linux" "x86_64-linux"] ./traefik.nix {};
|
traefik = handleTestOn ["aarch64-linux" "x86_64-linux"] ./traefik.nix {};
|
||||||
trafficserver = handleTest ./trafficserver.nix {};
|
trafficserver = handleTest ./trafficserver.nix {};
|
||||||
transfer-sh = handleTest ./transfer-sh.nix {};
|
transfer-sh = handleTest ./transfer-sh.nix {};
|
||||||
transmission = handleTest ./transmission.nix { transmission = pkgs.transmission; };
|
transmission_3 = handleTest ./transmission.nix { transmission = pkgs.transmission_3; };
|
||||||
transmission_4 = handleTest ./transmission.nix { transmission = pkgs.transmission_4; };
|
transmission_4 = handleTest ./transmission.nix { transmission = pkgs.transmission_4; };
|
||||||
# tracee requires bpf
|
# tracee requires bpf
|
||||||
tracee = handleTestOn ["x86_64-linux"] ./tracee.nix {};
|
tracee = handleTestOn ["x86_64-linux"] ./tracee.nix {};
|
||||||
|
@ -1045,6 +1048,7 @@ in {
|
||||||
wordpress = handleTest ./wordpress.nix {};
|
wordpress = handleTest ./wordpress.nix {};
|
||||||
wrappers = handleTest ./wrappers.nix {};
|
wrappers = handleTest ./wrappers.nix {};
|
||||||
writefreely = handleTest ./web-apps/writefreely.nix {};
|
writefreely = handleTest ./web-apps/writefreely.nix {};
|
||||||
|
wstunnel = runTest ./wstunnel.nix;
|
||||||
xandikos = handleTest ./xandikos.nix {};
|
xandikos = handleTest ./xandikos.nix {};
|
||||||
xautolock = handleTest ./xautolock.nix {};
|
xautolock = handleTest ./xautolock.nix {};
|
||||||
xfce = handleTest ./xfce.nix {};
|
xfce = handleTest ./xfce.nix {};
|
||||||
|
|
2
third_party/nixpkgs/nixos/tests/crabfit.nix
vendored
2
third_party/nixpkgs/nixos/tests/crabfit.nix
vendored
|
@ -4,7 +4,7 @@ import ./make-test-python.nix (
|
||||||
{
|
{
|
||||||
name = "crabfit";
|
name = "crabfit";
|
||||||
|
|
||||||
meta.maintainers = with lib.maintainers; [ thubrecht ];
|
meta.maintainers = with lib.maintainers; [ ];
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
machine =
|
machine =
|
||||||
|
|
|
@ -44,6 +44,8 @@ in {
|
||||||
# test loading custom components
|
# test loading custom components
|
||||||
customComponents = with pkgs.home-assistant-custom-components; [
|
customComponents = with pkgs.home-assistant-custom-components; [
|
||||||
prometheus_sensor
|
prometheus_sensor
|
||||||
|
# tests loading multiple components from a single package
|
||||||
|
spook
|
||||||
];
|
];
|
||||||
|
|
||||||
# test loading lovelace modules
|
# test loading lovelace modules
|
||||||
|
@ -179,7 +181,8 @@ in {
|
||||||
|
|
||||||
with subtest("Check that custom components get installed"):
|
with subtest("Check that custom components get installed"):
|
||||||
hass.succeed("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
|
hass.succeed("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
|
||||||
hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
|
for integration in ("prometheus_sensor", "spook", "spook_inverse"):
|
||||||
|
hass.wait_until_succeeds(f"journalctl -u home-assistant.service | grep -q 'We found a custom integration {integration} which has not been tested by Home Assistant'")
|
||||||
|
|
||||||
with subtest("Check that lovelace modules are referenced and fetchable"):
|
with subtest("Check that lovelace modules are referenced and fetchable"):
|
||||||
hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
|
hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/configuration.yaml'")
|
||||||
|
@ -228,7 +231,8 @@ in {
|
||||||
cursor = get_journal_cursor()
|
cursor = get_journal_cursor()
|
||||||
hass.succeed("${system}/specialisation/removeCustomThings/bin/switch-to-configuration test")
|
hass.succeed("${system}/specialisation/removeCustomThings/bin/switch-to-configuration test")
|
||||||
hass.fail("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
|
hass.fail("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
|
||||||
hass.fail("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
|
for integration in ("prometheus_sensor", "spook", "spook_inverse"):
|
||||||
|
hass.fail(f"test -f ${configDir}/custom_components/{integration}/manifest.json")
|
||||||
wait_for_homeassistant(cursor)
|
wait_for_homeassistant(cursor)
|
||||||
|
|
||||||
with subtest("Check that no errors were logged"):
|
with subtest("Check that no errors were logged"):
|
||||||
|
|
|
@ -9,7 +9,7 @@ let
|
||||||
testWithCompressor = compressor: testing.makeTest {
|
testWithCompressor = compressor: testing.makeTest {
|
||||||
name = "initrd-secrets-${compressor}";
|
name = "initrd-secrets-${compressor}";
|
||||||
|
|
||||||
meta.maintainers = [ lib.maintainers.lheckemann ];
|
meta.maintainers = [ ];
|
||||||
|
|
||||||
nodes.machine = { ... }: {
|
nodes.machine = { ... }: {
|
||||||
virtualisation.useBootLoader = true;
|
virtualisation.useBootLoader = true;
|
||||||
|
|
|
@ -37,6 +37,8 @@
|
||||||
clevisLuksFallback
|
clevisLuksFallback
|
||||||
clevisZfs
|
clevisZfs
|
||||||
clevisZfsFallback
|
clevisZfsFallback
|
||||||
|
clevisZfsParentDataset
|
||||||
|
clevisZfsParentDatasetFallback
|
||||||
gptAutoRoot
|
gptAutoRoot
|
||||||
clevisBcachefs
|
clevisBcachefs
|
||||||
clevisBcachefsFallback
|
clevisBcachefsFallback
|
||||||
|
|
16
third_party/nixpkgs/nixos/tests/installer.nix
vendored
16
third_party/nixpkgs/nixos/tests/installer.nix
vendored
|
@ -714,7 +714,7 @@ let
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
mkClevisZfsTest = { fallback ? false }: makeInstallerTest "clevis-zfs${optionalString fallback "-fallback"}" {
|
mkClevisZfsTest = { fallback ? false, parentDataset ? false }: makeInstallerTest "clevis-zfs${optionalString parentDataset "-parent-dataset"}${optionalString fallback "-fallback"}" {
|
||||||
clevisTest = true;
|
clevisTest = true;
|
||||||
clevisFallbackTest = fallback;
|
clevisFallbackTest = fallback;
|
||||||
enableOCR = fallback;
|
enableOCR = fallback;
|
||||||
|
@ -731,17 +731,27 @@ let
|
||||||
"udevadm settle",
|
"udevadm settle",
|
||||||
"mkswap /dev/vda2 -L swap",
|
"mkswap /dev/vda2 -L swap",
|
||||||
"swapon -L swap",
|
"swapon -L swap",
|
||||||
|
'' + optionalString (!parentDataset) ''
|
||||||
"zpool create -O mountpoint=legacy rpool /dev/vda3",
|
"zpool create -O mountpoint=legacy rpool /dev/vda3",
|
||||||
"echo -n password | zfs create"
|
"echo -n password | zfs create"
|
||||||
+ " -o encryption=aes-256-gcm -o keyformat=passphrase rpool/root",
|
+ " -o encryption=aes-256-gcm -o keyformat=passphrase rpool/root",
|
||||||
|
'' + optionalString (parentDataset) ''
|
||||||
|
"echo -n password | zpool create -O mountpoint=none -O encryption=on -O keyformat=passphrase rpool /dev/vda3",
|
||||||
|
"zfs create -o mountpoint=legacy rpool/root",
|
||||||
|
'' +
|
||||||
|
''
|
||||||
"mount -t zfs rpool/root /mnt",
|
"mount -t zfs rpool/root /mnt",
|
||||||
"mkfs.ext3 -L boot /dev/vda1",
|
"mkfs.ext3 -L boot /dev/vda1",
|
||||||
"mkdir -p /mnt/boot",
|
"mkdir -p /mnt/boot",
|
||||||
"mount LABEL=boot /mnt/boot",
|
"mount LABEL=boot /mnt/boot",
|
||||||
"udevadm settle")
|
"udevadm settle")
|
||||||
'';
|
'';
|
||||||
extraConfig = ''
|
extraConfig = optionalString (!parentDataset) ''
|
||||||
boot.initrd.clevis.devices."rpool/root".secretFile = "/etc/nixos/clevis-secret.jwe";
|
boot.initrd.clevis.devices."rpool/root".secretFile = "/etc/nixos/clevis-secret.jwe";
|
||||||
|
'' + optionalString (parentDataset) ''
|
||||||
|
boot.initrd.clevis.devices."rpool".secretFile = "/etc/nixos/clevis-secret.jwe";
|
||||||
|
'' +
|
||||||
|
''
|
||||||
boot.zfs.requestEncryptionCredentials = true;
|
boot.zfs.requestEncryptionCredentials = true;
|
||||||
|
|
||||||
|
|
||||||
|
@ -1359,6 +1369,8 @@ in {
|
||||||
clevisLuksFallback = mkClevisLuksTest { fallback = true; };
|
clevisLuksFallback = mkClevisLuksTest { fallback = true; };
|
||||||
clevisZfs = mkClevisZfsTest { };
|
clevisZfs = mkClevisZfsTest { };
|
||||||
clevisZfsFallback = mkClevisZfsTest { fallback = true; };
|
clevisZfsFallback = mkClevisZfsTest { fallback = true; };
|
||||||
|
clevisZfsParentDataset = mkClevisZfsTest { parentDataset = true; };
|
||||||
|
clevisZfsParentDatasetFallback = mkClevisZfsTest { parentDataset = true; fallback = true; };
|
||||||
} // optionalAttrs systemdStage1 {
|
} // optionalAttrs systemdStage1 {
|
||||||
stratisRoot = makeInstallerTest "stratisRoot" {
|
stratisRoot = makeInstallerTest "stratisRoot" {
|
||||||
createPartitions = ''
|
createPartitions = ''
|
||||||
|
|
9
third_party/nixpkgs/nixos/tests/keycloak.nix
vendored
9
third_party/nixpkgs/nixos/tests/keycloak.nix
vendored
|
@ -44,7 +44,6 @@ let
|
||||||
};
|
};
|
||||||
plugins = with config.services.keycloak.package.plugins; [
|
plugins = with config.services.keycloak.package.plugins; [
|
||||||
keycloak-discord
|
keycloak-discord
|
||||||
keycloak-metrics-spi
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
|
@ -122,14 +121,6 @@ let
|
||||||
| jq -r '"Authorization: bearer " + .access_token' >admin_auth_header
|
| jq -r '"Authorization: bearer " + .access_token' >admin_auth_header
|
||||||
""")
|
""")
|
||||||
|
|
||||||
# Register the metrics SPI
|
|
||||||
keycloak.succeed(
|
|
||||||
"""${pkgs.jre}/bin/keytool -import -alias snakeoil -file ${certs.ca.cert} -storepass aaaaaa -keystore cacert.jks -noprompt""",
|
|
||||||
"""KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh config credentials --server '${frontendUrl}' --realm master --user admin --password "$(<${adminPasswordFile})" """,
|
|
||||||
"""KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh update events/config -s 'eventsEnabled=true' -s 'adminEventsEnabled=true' -s 'eventsListeners+=metrics-listener'""",
|
|
||||||
"""curl -sSf '${frontendUrl}/realms/master/metrics' | grep '^keycloak_admin_event_UPDATE'"""
|
|
||||||
)
|
|
||||||
|
|
||||||
# Publish the realm, including a test OIDC client and user
|
# Publish the realm, including a test OIDC client and user
|
||||||
keycloak.succeed(
|
keycloak.succeed(
|
||||||
"curl -sSf -H @admin_auth_header -X POST -H 'Content-Type: application/json' -d @${realmDataJson} '${frontendUrl}/admin/realms/'"
|
"curl -sSf -H @admin_auth_header -X POST -H 'Content-Type: application/json' -d @${realmDataJson} '${frontendUrl}/admin/realms/'"
|
||||||
|
|
238
third_party/nixpkgs/nixos/tests/libreswan-nat.nix
vendored
Normal file
238
third_party/nixpkgs/nixos/tests/libreswan-nat.nix
vendored
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
# This test sets up an IPsec VPN server that allows a client behind an IPv4 NAT
|
||||||
|
# router to access the IPv6 internet. We check that the client initially can't
|
||||||
|
# ping an IPv6 hosts and its connection to the server can be eavesdropped by
|
||||||
|
# the router, but once the IPsec tunnel is enstablished it can talk to an
|
||||||
|
# IPv6-only host and the connection is secure.
|
||||||
|
#
|
||||||
|
# Notes:
|
||||||
|
# - the VPN is implemented using policy-based routing.
|
||||||
|
# - the client is assigned an IPv6 address from the same /64 subnet
|
||||||
|
# of the server, without DHCPv6 or SLAAC.
|
||||||
|
# - the server acts as NDP proxy for the client, so that the latter
|
||||||
|
# becomes reachable at its assigned IPv6 via the server.
|
||||||
|
# - the client falls back to TCP if UDP is blocked
|
||||||
|
|
||||||
|
{ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
|
||||||
|
# Common network setup
|
||||||
|
baseNetwork = {
|
||||||
|
# shared hosts file
|
||||||
|
networking.extraHosts = lib.mkVMOverride ''
|
||||||
|
203.0.113.1 router
|
||||||
|
203.0.113.2 server
|
||||||
|
2001:db8::2 inner
|
||||||
|
192.168.1.1 client
|
||||||
|
'';
|
||||||
|
# open a port for testing
|
||||||
|
networking.firewall.allowedUDPPorts = [ 1234 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
# Common IPsec configuration
|
||||||
|
baseTunnel = {
|
||||||
|
services.libreswan.enable = true;
|
||||||
|
environment.etc."ipsec.d/tunnel.secrets" =
|
||||||
|
{ text = ''@server %any : PSK "j1JbIi9WY07rxwcNQ6nbyThKCf9DGxWOyokXIQcAQUnafsNTUJxfsxwk9WYK8fHj"'';
|
||||||
|
mode = "600";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Helpers to add a static IP address on an interface
|
||||||
|
setAddress4 = iface: addr: {
|
||||||
|
networking.interfaces.${iface}.ipv4.addresses =
|
||||||
|
lib.mkVMOverride [ { address = addr; prefixLength = 24; } ];
|
||||||
|
};
|
||||||
|
setAddress6 = iface: addr: {
|
||||||
|
networking.interfaces.${iface}.ipv6.addresses =
|
||||||
|
lib.mkVMOverride [ { address = addr; prefixLength = 64; } ];
|
||||||
|
};
|
||||||
|
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "libreswan-nat";
|
||||||
|
meta = with lib.maintainers; {
|
||||||
|
maintainers = [ rnhmjoj ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes.router = { pkgs, ... }: lib.mkMerge [
|
||||||
|
baseNetwork
|
||||||
|
(setAddress4 "eth1" "203.0.113.1")
|
||||||
|
(setAddress4 "eth2" "192.168.1.1")
|
||||||
|
{
|
||||||
|
virtualisation.vlans = [ 1 2 ];
|
||||||
|
environment.systemPackages = [ pkgs.tcpdump ];
|
||||||
|
networking.nat = {
|
||||||
|
enable = true;
|
||||||
|
externalInterface = "eth1";
|
||||||
|
internalInterfaces = [ "eth2" ];
|
||||||
|
};
|
||||||
|
networking.firewall.trustedInterfaces = [ "eth2" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
nodes.inner = lib.mkMerge [
|
||||||
|
baseNetwork
|
||||||
|
(setAddress6 "eth1" "2001:db8::2")
|
||||||
|
{ virtualisation.vlans = [ 3 ]; }
|
||||||
|
];
|
||||||
|
|
||||||
|
nodes.server = lib.mkMerge [
|
||||||
|
baseNetwork
|
||||||
|
baseTunnel
|
||||||
|
(setAddress4 "eth1" "203.0.113.2")
|
||||||
|
(setAddress6 "eth2" "2001:db8::1")
|
||||||
|
{
|
||||||
|
virtualisation.vlans = [ 1 3 ];
|
||||||
|
networking.firewall.allowedUDPPorts = [ 500 4500 ];
|
||||||
|
networking.firewall.allowedTCPPorts = [ 993 ];
|
||||||
|
|
||||||
|
# see https://github.com/NixOS/nixpkgs/pull/310857
|
||||||
|
networking.firewall.checkReversePath = false;
|
||||||
|
|
||||||
|
boot.kernel.sysctl = {
|
||||||
|
# enable forwarding packets
|
||||||
|
"net.ipv6.conf.all.forwarding" = 1;
|
||||||
|
"net.ipv4.conf.all.forwarding" = 1;
|
||||||
|
# enable NDP proxy for VPN clients
|
||||||
|
"net.ipv6.conf.all.proxy_ndp" = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.libreswan.configSetup = "listen-tcp=yes";
|
||||||
|
services.libreswan.connections.tunnel = ''
|
||||||
|
# server
|
||||||
|
left=203.0.113.2
|
||||||
|
leftid=@server
|
||||||
|
leftsubnet=::/0
|
||||||
|
leftupdown=${pkgs.writeScript "updown" ''
|
||||||
|
# act as NDP proxy for VPN clients
|
||||||
|
if test "$PLUTO_VERB" = up-client-v6; then
|
||||||
|
ip neigh add proxy "$PLUTO_PEER_CLIENT_NET" dev eth2
|
||||||
|
fi
|
||||||
|
if test "$PLUTO_VERB" = down-client-v6; then
|
||||||
|
ip neigh del proxy "$PLUTO_PEER_CLIENT_NET" dev eth2
|
||||||
|
fi
|
||||||
|
''}
|
||||||
|
|
||||||
|
# clients
|
||||||
|
right=%any
|
||||||
|
rightaddresspool=2001:db8:0:0:c::/97
|
||||||
|
modecfgdns=2001:db8::1
|
||||||
|
|
||||||
|
# clean up vanished clients
|
||||||
|
dpddelay=30
|
||||||
|
|
||||||
|
auto=add
|
||||||
|
keyexchange=ikev2
|
||||||
|
rekey=no
|
||||||
|
narrowing=yes
|
||||||
|
fragmentation=yes
|
||||||
|
authby=secret
|
||||||
|
|
||||||
|
leftikeport=993
|
||||||
|
retransmit-timeout=10s
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
nodes.client = lib.mkMerge [
|
||||||
|
baseNetwork
|
||||||
|
baseTunnel
|
||||||
|
(setAddress4 "eth1" "192.168.1.2")
|
||||||
|
{
|
||||||
|
virtualisation.vlans = [ 2 ];
|
||||||
|
networking.defaultGateway = {
|
||||||
|
address = "192.168.1.1";
|
||||||
|
interface = "eth1";
|
||||||
|
};
|
||||||
|
services.libreswan.connections.tunnel = ''
|
||||||
|
# client
|
||||||
|
left=%defaultroute
|
||||||
|
leftid=@client
|
||||||
|
leftmodecfgclient=yes
|
||||||
|
leftsubnet=::/0
|
||||||
|
|
||||||
|
# server
|
||||||
|
right=203.0.113.2
|
||||||
|
rightid=@server
|
||||||
|
rightsubnet=::/0
|
||||||
|
|
||||||
|
auto=add
|
||||||
|
narrowing=yes
|
||||||
|
rekey=yes
|
||||||
|
fragmentation=yes
|
||||||
|
authby=secret
|
||||||
|
|
||||||
|
# fallback when UDP is blocked
|
||||||
|
enable-tcp=fallback
|
||||||
|
tcp-remoteport=993
|
||||||
|
retransmit-timeout=5s
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
testScript =
|
||||||
|
''
|
||||||
|
def client_to_host(machine, msg: str):
|
||||||
|
"""
|
||||||
|
Sends a message from client to server
|
||||||
|
"""
|
||||||
|
machine.execute("nc -lu :: 1234 >/tmp/msg &")
|
||||||
|
client.sleep(1)
|
||||||
|
client.succeed(f"echo '{msg}' | nc -uw 0 {machine.name} 1234")
|
||||||
|
client.sleep(1)
|
||||||
|
machine.succeed(f"grep '{msg}' /tmp/msg")
|
||||||
|
|
||||||
|
|
||||||
|
def eavesdrop():
|
||||||
|
"""
|
||||||
|
Starts eavesdropping on the router
|
||||||
|
"""
|
||||||
|
match = "udp port 1234"
|
||||||
|
router.execute(f"tcpdump -i eth1 -c 1 -Avv {match} >/tmp/log &")
|
||||||
|
|
||||||
|
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
with subtest("Network is up"):
|
||||||
|
client.wait_until_succeeds("ping -c1 server")
|
||||||
|
client.succeed("systemctl restart ipsec")
|
||||||
|
server.succeed("systemctl restart ipsec")
|
||||||
|
|
||||||
|
with subtest("Router can eavesdrop cleartext traffic"):
|
||||||
|
eavesdrop()
|
||||||
|
client_to_host(server, "I secretly love turnip")
|
||||||
|
router.sleep(1)
|
||||||
|
router.succeed("grep turnip /tmp/log")
|
||||||
|
|
||||||
|
with subtest("Libreswan is ready"):
|
||||||
|
client.wait_for_unit("ipsec")
|
||||||
|
server.wait_for_unit("ipsec")
|
||||||
|
client.succeed("ipsec checkconfig")
|
||||||
|
server.succeed("ipsec checkconfig")
|
||||||
|
|
||||||
|
with subtest("Client can't ping VPN host"):
|
||||||
|
client.fail("ping -c1 inner")
|
||||||
|
|
||||||
|
with subtest("Client can start the tunnel"):
|
||||||
|
client.succeed("ipsec start tunnel")
|
||||||
|
client.succeed("ip -6 addr show lo | grep -q 2001:db8:0:0:c")
|
||||||
|
|
||||||
|
with subtest("Client can ping VPN host"):
|
||||||
|
client.wait_until_succeeds("ping -c1 2001:db8::1")
|
||||||
|
client.succeed("ping -c1 inner")
|
||||||
|
|
||||||
|
with subtest("Eve no longer can eavesdrop"):
|
||||||
|
eavesdrop()
|
||||||
|
client_to_host(inner, "Just kidding, I actually like rhubarb")
|
||||||
|
router.sleep(1)
|
||||||
|
router.fail("grep rhubarb /tmp/log")
|
||||||
|
|
||||||
|
with subtest("TCP fallback is available"):
|
||||||
|
server.succeed("iptables -I nixos-fw -p udp -j DROP")
|
||||||
|
client.succeed("ipsec restart")
|
||||||
|
client.execute("ipsec start tunnel")
|
||||||
|
client.wait_until_succeeds("ping -c1 inner")
|
||||||
|
'';
|
||||||
|
}
|
|
@ -3,7 +3,7 @@
|
||||||
# Eve can eavesdrop the plaintext traffic between Alice and Bob, but once they
|
# Eve can eavesdrop the plaintext traffic between Alice and Bob, but once they
|
||||||
# enable the secure tunnel Eve's spying becomes ineffective.
|
# enable the secure tunnel Eve's spying becomes ineffective.
|
||||||
|
|
||||||
import ./make-test-python.nix ({ lib, pkgs, ... }:
|
{ lib, pkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
||||||
|
@ -133,4 +133,4 @@ in
|
||||||
eve.sleep(1)
|
eve.sleep(1)
|
||||||
eve.fail("grep rhubarb /tmp/log")
|
eve.fail("grep rhubarb /tmp/log")
|
||||||
'';
|
'';
|
||||||
})
|
}
|
||||||
|
|
6
third_party/nixpkgs/nixos/tests/matomo.nix
vendored
6
third_party/nixpkgs/nixos/tests/matomo.nix
vendored
|
@ -41,14 +41,14 @@ let
|
||||||
in {
|
in {
|
||||||
matomo = matomoTest pkgs.matomo // {
|
matomo = matomoTest pkgs.matomo // {
|
||||||
name = "matomo";
|
name = "matomo";
|
||||||
meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
|
meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ];
|
||||||
};
|
};
|
||||||
matomo-beta = matomoTest pkgs.matomo-beta // {
|
matomo-beta = matomoTest pkgs.matomo-beta // {
|
||||||
name = "matomo-beta";
|
name = "matomo-beta";
|
||||||
meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
|
meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ];
|
||||||
};
|
};
|
||||||
matomo_5 = matomoTest pkgs.matomo_5 // {
|
matomo_5 = matomoTest pkgs.matomo_5 // {
|
||||||
name = "matomo-5";
|
name = "matomo-5";
|
||||||
meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ] ++ lib.teams.flyingcircus.members;
|
meta.maintainers = with maintainers; [ florianjacob mmilata twey boozedog ] ++ lib.teams.flyingcircus.members;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
5
third_party/nixpkgs/nixos/tests/netdata.nix
vendored
5
third_party/nixpkgs/nixos/tests/netdata.nix
vendored
|
@ -11,7 +11,10 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = with pkgs; [ curl jq netdata ];
|
environment.systemPackages = with pkgs; [ curl jq netdata ];
|
||||||
services.netdata.enable = true;
|
services.netdata = {
|
||||||
|
enable = true;
|
||||||
|
python.recommendedPythonPackages = true;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,6 @@ runTest ({ config, ... }: {
|
||||||
services.nextcloud = {
|
services.nextcloud = {
|
||||||
enable = true;
|
enable = true;
|
||||||
datadir = "/var/lib/nextcloud-data";
|
datadir = "/var/lib/nextcloud-data";
|
||||||
config.dbtableprefix = "nixos_";
|
|
||||||
autoUpdateApps = {
|
autoUpdateApps = {
|
||||||
enable = true;
|
enable = true;
|
||||||
startAt = "20:00";
|
startAt = "20:00";
|
||||||
|
|
|
@ -5,7 +5,7 @@ in
|
||||||
{
|
{
|
||||||
name = "private-gpt";
|
name = "private-gpt";
|
||||||
meta = with lib.maintainers; {
|
meta = with lib.maintainers; {
|
||||||
maintainers = [ drupol ];
|
maintainers = [ ];
|
||||||
};
|
};
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
|
|
148
third_party/nixpkgs/nixos/tests/prometheus/alertmanager.nix
vendored
Normal file
148
third_party/nixpkgs/nixos/tests/prometheus/alertmanager.nix
vendored
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-alertmanager";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
prometheus = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
alertmanagers = [
|
||||||
|
{
|
||||||
|
scheme = "http";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
''
|
||||||
|
groups:
|
||||||
|
- name: test
|
||||||
|
rules:
|
||||||
|
- alert: InstanceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 5s
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $labels.instance }} down"
|
||||||
|
''
|
||||||
|
];
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "alertmanager";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"node:${toString config.services.prometheus.exporters.node.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
alertmanager = { config, pkgs, ... }: {
|
||||||
|
services.prometheus.alertmanager = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
|
||||||
|
configuration = {
|
||||||
|
global = {
|
||||||
|
resolve_timeout = "1m";
|
||||||
|
};
|
||||||
|
|
||||||
|
route = {
|
||||||
|
# Root route node
|
||||||
|
receiver = "test";
|
||||||
|
group_by = ["..."];
|
||||||
|
continue = false;
|
||||||
|
group_wait = "1s";
|
||||||
|
group_interval = "15s";
|
||||||
|
repeat_interval = "24h";
|
||||||
|
};
|
||||||
|
|
||||||
|
receivers = [
|
||||||
|
{
|
||||||
|
name = "test";
|
||||||
|
webhook_configs = [
|
||||||
|
{
|
||||||
|
url = "http://logger:6725";
|
||||||
|
send_resolved = true;
|
||||||
|
max_alerts = 0;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
logger = { config, pkgs, ... }: {
|
||||||
|
networking.firewall.allowedTCPPorts = [ 6725 ];
|
||||||
|
|
||||||
|
services.prometheus.alertmanagerWebhookLogger.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
alertmanager.wait_for_unit("alertmanager")
|
||||||
|
alertmanager.wait_for_open_port(9093)
|
||||||
|
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
|
||||||
|
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
|
||||||
|
|
||||||
|
logger.wait_for_unit("alertmanager-webhook-logger")
|
||||||
|
logger.wait_for_open_port(6725)
|
||||||
|
|
||||||
|
prometheus.wait_for_unit("prometheus")
|
||||||
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
|
||||||
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.wait_until_succeeds(
|
||||||
|
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
116
third_party/nixpkgs/nixos/tests/prometheus/config-reload.nix
vendored
Normal file
116
third_party/nixpkgs/nixos/tests/prometheus/config-reload.nix
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-config-reload";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
prometheus = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
enableReload = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
specialisation = {
|
||||||
|
"prometheus-config-change" = {
|
||||||
|
configuration = {
|
||||||
|
environment.systemPackages = [ pkgs.yq ];
|
||||||
|
|
||||||
|
# This configuration just adds a new prometheus job
|
||||||
|
# to scrape the node_exporter metrics of the s3 machine.
|
||||||
|
services.prometheus = {
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
prometheus.wait_for_unit("prometheus")
|
||||||
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
|
# Check if switching to a NixOS configuration that changes the prometheus
|
||||||
|
# configuration reloads (instead of restarts) prometheus before the switch
|
||||||
|
# finishes successfully:
|
||||||
|
with subtest("config change reloads prometheus"):
|
||||||
|
import json
|
||||||
|
# We check if prometheus has finished reloading by looking for the message
|
||||||
|
# "Completed loading of configuration file" in the journal between the start
|
||||||
|
# and finish of switching to the new NixOS configuration.
|
||||||
|
#
|
||||||
|
# To mark the start we record the journal cursor before starting the switch:
|
||||||
|
cursor_before_switching = json.loads(
|
||||||
|
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
||||||
|
)["__CURSOR"]
|
||||||
|
|
||||||
|
# Now we switch:
|
||||||
|
prometheus_config_change = prometheus.succeed(
|
||||||
|
"readlink /run/current-system/specialisation/prometheus-config-change"
|
||||||
|
).strip()
|
||||||
|
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
||||||
|
|
||||||
|
# Next we retrieve all logs since the start of switching:
|
||||||
|
logs_after_starting_switching = prometheus.succeed(
|
||||||
|
"""
|
||||||
|
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
||||||
|
""".format(
|
||||||
|
cursor_before_switching=cursor_before_switching
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Finally we check if the message "Completed loading of configuration file"
|
||||||
|
# occurs before the "finished switching to system configuration" message:
|
||||||
|
finished_switching_msg = (
|
||||||
|
"finished switching to system configuration " + prometheus_config_change
|
||||||
|
)
|
||||||
|
reloaded_before_switching_finished = False
|
||||||
|
finished_switching = False
|
||||||
|
for log_line in logs_after_starting_switching.split("\n"):
|
||||||
|
msg = json.loads(log_line)["MESSAGE"]
|
||||||
|
if "Completed loading of configuration file" in msg:
|
||||||
|
reloaded_before_switching_finished = True
|
||||||
|
if msg == finished_switching_msg:
|
||||||
|
finished_switching = True
|
||||||
|
break
|
||||||
|
|
||||||
|
assert reloaded_before_switching_finished
|
||||||
|
assert finished_switching
|
||||||
|
|
||||||
|
# Check if the reloaded config includes the new node job:
|
||||||
|
prometheus.succeed(
|
||||||
|
"""
|
||||||
|
curl -sf http://127.0.0.1:9090/api/v1/status/config \
|
||||||
|
| jq -r .data.yaml \
|
||||||
|
| yq '.scrape_configs | any(.job_name == "node")' \
|
||||||
|
| grep true
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
13
third_party/nixpkgs/nixos/tests/prometheus/default.nix
vendored
Normal file
13
third_party/nixpkgs/nixos/tests/prometheus/default.nix
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
{ system ? builtins.currentSystem
|
||||||
|
, config ? { }
|
||||||
|
, pkgs ? import ../../.. { inherit system config; }
|
||||||
|
}:
|
||||||
|
|
||||||
|
{
|
||||||
|
alertmanager = import ./alertmanager.nix { inherit system pkgs; };
|
||||||
|
config-reload = import ./config-reload.nix { inherit system pkgs; };
|
||||||
|
federation = import ./federation.nix { inherit system pkgs; };
|
||||||
|
prometheus-pair = import ./prometheus-pair.nix { inherit system pkgs; };
|
||||||
|
pushgateway = import ./pushgateway.nix { inherit system pkgs; };
|
||||||
|
remote-write = import ./remote-write.nix { inherit system pkgs; };
|
||||||
|
}
|
213
third_party/nixpkgs/nixos/tests/prometheus/federation.nix
vendored
Normal file
213
third_party/nixpkgs/nixos/tests/prometheus/federation.nix
vendored
Normal file
|
@ -0,0 +1,213 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-federation";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
global1 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "federate";
|
||||||
|
honor_labels = true;
|
||||||
|
metrics_path = "/federate";
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"match[]" = [
|
||||||
|
"{job=\"node\"}"
|
||||||
|
"{job=\"prometheus\"}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"global1:${toString config.services.prometheus.port}"
|
||||||
|
"global2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
global2 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "federate";
|
||||||
|
honor_labels = true;
|
||||||
|
metrics_path = "/federate";
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"match[]" = [
|
||||||
|
"{job=\"node\"}"
|
||||||
|
"{job=\"prometheus\"}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"global1:${toString config.services.prometheus.port}"
|
||||||
|
"global2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus1 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"node1:${toString config.services.prometheus.exporters.node.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus2 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"node2:${toString config.services.prometheus.exporters.node.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
node1 = { config, pkgs, ... }: {
|
||||||
|
services.prometheus.exporters.node = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
node2 = { config, pkgs, ... }: {
|
||||||
|
services.prometheus.exporters.node = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
for machine in node1, node2:
|
||||||
|
machine.wait_for_unit("prometheus-node-exporter")
|
||||||
|
machine.wait_for_open_port(9100)
|
||||||
|
|
||||||
|
for machine in prometheus1, prometheus2, global1, global2:
|
||||||
|
machine.wait_for_unit("prometheus")
|
||||||
|
machine.wait_for_open_port(9090)
|
||||||
|
|
||||||
|
# Verify both servers got the same data from the exporter
|
||||||
|
for machine in prometheus1, prometheus2:
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
for machine in global1, global2:
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
87
third_party/nixpkgs/nixos/tests/prometheus/prometheus-pair.nix
vendored
Normal file
87
third_party/nixpkgs/nixos/tests/prometheus/prometheus-pair.nix
vendored
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-pair";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
prometheus1 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus2 = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
for machine in prometheus1, prometheus2:
|
||||||
|
machine.wait_for_unit("prometheus")
|
||||||
|
machine.wait_for_open_port(9090)
|
||||||
|
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
|
||||||
|
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
|
||||||
|
|
||||||
|
# Prometheii ready - run some queries
|
||||||
|
for machine in prometheus1, prometheus2:
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||||
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v '✓'"))
|
||||||
|
'';
|
||||||
|
})
|
94
third_party/nixpkgs/nixos/tests/prometheus/pushgateway.nix
vendored
Normal file
94
third_party/nixpkgs/nixos/tests/prometheus/pushgateway.nix
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-pushgateway";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
prometheus = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "pushgateway";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"pushgateway:9091"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
pushgateway = { config, pkgs, ... }: {
|
||||||
|
networking.firewall.allowedTCPPorts = [ 9091 ];
|
||||||
|
|
||||||
|
services.prometheus.pushgateway = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
client = { config, pkgs, ... }: {
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
pushgateway.wait_for_unit("pushgateway")
|
||||||
|
pushgateway.wait_for_open_port(9091)
|
||||||
|
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
|
||||||
|
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
|
||||||
|
|
||||||
|
prometheus.wait_for_unit("prometheus")
|
||||||
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
|
||||||
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
client.wait_for_unit("network-online.target")
|
||||||
|
|
||||||
|
# Add a metric and check in Prometheus
|
||||||
|
client.wait_until_succeeds(
|
||||||
|
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep 'null'"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Delete the metric, check not in Prometheus
|
||||||
|
client.wait_until_succeeds(
|
||||||
|
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_fails(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||||
|
)
|
||||||
|
|
||||||
|
prometheus.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
73
third_party/nixpkgs/nixos/tests/prometheus/remote-write.nix
vendored
Normal file
73
third_party/nixpkgs/nixos/tests/prometheus/remote-write.nix
vendored
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
import ../make-test-python.nix ({ lib, pkgs, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "prometheus-remote-write";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
receiver = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
extraFlags = [ "--web.enable-remote-write-receiver" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
prometheus = { config, pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
remoteWrite = [
|
||||||
|
{
|
||||||
|
url = "http://receiver:9090/api/v1/write";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"node:${toString config.services.prometheus.exporters.node.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
node = { config, pkgs, ... }: {
|
||||||
|
services.prometheus.exporters.node = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
node.wait_for_unit("prometheus-node-exporter")
|
||||||
|
node.wait_for_open_port(9100)
|
||||||
|
|
||||||
|
for machine in prometheus, receiver:
|
||||||
|
machine.wait_for_unit("prometheus")
|
||||||
|
machine.wait_for_open_port(9090)
|
||||||
|
|
||||||
|
# Verify both servers got the same data from the exporter
|
||||||
|
for machine in prometheus, receiver:
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
95
third_party/nixpkgs/nixos/tests/tandoor-recipes-script-name.nix
vendored
Normal file
95
third_party/nixpkgs/nixos/tests/tandoor-recipes-script-name.nix
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
import ./make-test-python.nix (
|
||||||
|
{ pkgs, lib, ... }:
|
||||||
|
{
|
||||||
|
name = "tandoor-recipes-script-name";
|
||||||
|
|
||||||
|
nodes.machine =
|
||||||
|
{ pkgs, nodes, ... }:
|
||||||
|
{
|
||||||
|
services.tandoor-recipes = {
|
||||||
|
enable = true;
|
||||||
|
extraConfig = {
|
||||||
|
SCRIPT_NAME = "/any/path";
|
||||||
|
STATIC_URL = "${nodes.machine.services.tandoor-recipes.extraConfig.SCRIPT_NAME}/static/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript =
|
||||||
|
{ nodes, ... }:
|
||||||
|
let
|
||||||
|
inherit (nodes.machine.services.tandoor-recipes) address port;
|
||||||
|
inherit (nodes.machine.services.tandoor-recipes.extraConfig) SCRIPT_NAME;
|
||||||
|
in
|
||||||
|
''
|
||||||
|
from html.parser import HTMLParser
|
||||||
|
|
||||||
|
origin_url = "http://${address}:${toString port}"
|
||||||
|
base_url = f"{origin_url}${SCRIPT_NAME}"
|
||||||
|
login_path = "/admin/login/"
|
||||||
|
login_url = f"{base_url}{login_path}"
|
||||||
|
|
||||||
|
cookie_jar_path = "/tmp/cookies.txt"
|
||||||
|
curl = f"curl --cookie {cookie_jar_path} --cookie-jar {cookie_jar_path} --fail --header 'Origin: {origin_url}' --show-error --silent"
|
||||||
|
|
||||||
|
print("Wait for the service to respond")
|
||||||
|
machine.wait_for_unit("tandoor-recipes.service")
|
||||||
|
machine.wait_until_succeeds(f"{curl} {login_url}")
|
||||||
|
|
||||||
|
username = "username"
|
||||||
|
password = "password"
|
||||||
|
|
||||||
|
print("Create admin user")
|
||||||
|
machine.succeed(
|
||||||
|
f"DJANGO_SUPERUSER_PASSWORD='{password}' /var/lib/tandoor-recipes/tandoor-recipes-manage createsuperuser --no-input --username='{username}' --email=nobody@example.com"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Get CSRF token for later requests")
|
||||||
|
csrf_token = machine.succeed(f"grep csrftoken {cookie_jar_path} | cut --fields=7").rstrip()
|
||||||
|
|
||||||
|
print("Log in as admin user")
|
||||||
|
machine.succeed(
|
||||||
|
f"{curl} --data 'csrfmiddlewaretoken={csrf_token}' --data 'username={username}' --data 'password={password}' {login_url}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Get the contents of the logged in main page")
|
||||||
|
logged_in_page = machine.succeed(f"{curl} --location {base_url}")
|
||||||
|
|
||||||
|
class UrlParser(HTMLParser):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.urls: list[str] = []
|
||||||
|
|
||||||
|
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
|
||||||
|
if tag == "form":
|
||||||
|
for name, value in attrs:
|
||||||
|
if name == "action" and value is not None:
|
||||||
|
assert not value.endswith(login_path)
|
||||||
|
break
|
||||||
|
|
||||||
|
if tag != "a":
|
||||||
|
return
|
||||||
|
|
||||||
|
for name, value in attrs:
|
||||||
|
if name == "href" and value is not None:
|
||||||
|
if value.startswith(base_url):
|
||||||
|
self.urls.append(value)
|
||||||
|
elif value.startswith("/"):
|
||||||
|
self.urls.append(f"{origin_url}{value}")
|
||||||
|
else:
|
||||||
|
print("Ignoring external URL: {value}")
|
||||||
|
|
||||||
|
break
|
||||||
|
|
||||||
|
parser = UrlParser()
|
||||||
|
parser.feed(logged_in_page)
|
||||||
|
|
||||||
|
for url in parser.urls:
|
||||||
|
with subtest(f"Verify that {url} can be reached"):
|
||||||
|
machine.succeed(f"{curl} {url}")
|
||||||
|
'';
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ l0b0 ];
|
||||||
|
}
|
||||||
|
)
|
|
@ -212,8 +212,6 @@ in import ./make-test-python.nix {
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = { nodes, ... } : ''
|
testScript = { nodes, ... } : ''
|
||||||
import json
|
|
||||||
|
|
||||||
# Before starting the other machines we first make sure that our S3 service is online
|
# Before starting the other machines we first make sure that our S3 service is online
|
||||||
# and has a bucket added for thanos:
|
# and has a bucket added for thanos:
|
||||||
s3.start()
|
s3.start()
|
||||||
|
@ -289,61 +287,5 @@ in import ./make-test-python.nix {
|
||||||
+ "jq .thanos.labels.some_label | "
|
+ "jq .thanos.labels.some_label | "
|
||||||
+ "grep 'required by thanos'"
|
+ "grep 'required by thanos'"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if switching to a NixOS configuration that changes the prometheus
|
|
||||||
# configuration reloads (instead of restarts) prometheus before the switch
|
|
||||||
# finishes successfully:
|
|
||||||
with subtest("config change reloads prometheus"):
|
|
||||||
# We check if prometheus has finished reloading by looking for the message
|
|
||||||
# "Completed loading of configuration file" in the journal between the start
|
|
||||||
# and finish of switching to the new NixOS configuration.
|
|
||||||
#
|
|
||||||
# To mark the start we record the journal cursor before starting the switch:
|
|
||||||
cursor_before_switching = json.loads(
|
|
||||||
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
|
||||||
)["__CURSOR"]
|
|
||||||
|
|
||||||
# Now we switch:
|
|
||||||
prometheus_config_change = prometheus.succeed(
|
|
||||||
"readlink /run/current-system/specialisation/prometheus-config-change"
|
|
||||||
).strip()
|
|
||||||
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
|
||||||
|
|
||||||
# Next we retrieve all logs since the start of switching:
|
|
||||||
logs_after_starting_switching = prometheus.succeed(
|
|
||||||
"""
|
|
||||||
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
|
||||||
""".format(
|
|
||||||
cursor_before_switching=cursor_before_switching
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Finally we check if the message "Completed loading of configuration file"
|
|
||||||
# occurs before the "finished switching to system configuration" message:
|
|
||||||
finished_switching_msg = (
|
|
||||||
"finished switching to system configuration " + prometheus_config_change
|
|
||||||
)
|
|
||||||
reloaded_before_switching_finished = False
|
|
||||||
finished_switching = False
|
|
||||||
for log_line in logs_after_starting_switching.split("\n"):
|
|
||||||
msg = json.loads(log_line)["MESSAGE"]
|
|
||||||
if "Completed loading of configuration file" in msg:
|
|
||||||
reloaded_before_switching_finished = True
|
|
||||||
if msg == finished_switching_msg:
|
|
||||||
finished_switching = True
|
|
||||||
break
|
|
||||||
|
|
||||||
assert reloaded_before_switching_finished
|
|
||||||
assert finished_switching
|
|
||||||
|
|
||||||
# Check if the reloaded config includes the new s3-node_exporter job:
|
|
||||||
prometheus.succeed(
|
|
||||||
"""
|
|
||||||
curl -sf http://127.0.0.1:${toString queryPort}/api/v1/status/config \
|
|
||||||
| jq -r .data.yaml \
|
|
||||||
| yq '.scrape_configs | any(.job_name == "s3-node_exporter")' \
|
|
||||||
| grep true
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
'';
|
'';
|
||||||
}
|
}
|
2
third_party/nixpkgs/nixos/tests/tigervnc.nix
vendored
2
third_party/nixpkgs/nixos/tests/tigervnc.nix
vendored
|
@ -7,7 +7,7 @@ with import ../lib/testing-python.nix { inherit system pkgs; };
|
||||||
makeTest {
|
makeTest {
|
||||||
name = "tigervnc";
|
name = "tigervnc";
|
||||||
meta = with pkgs.lib.maintainers; {
|
meta = with pkgs.lib.maintainers; {
|
||||||
maintainers = [ lheckemann ];
|
maintainers = [ ];
|
||||||
};
|
};
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
|
|
2
third_party/nixpkgs/nixos/tests/timezone.nix
vendored
2
third_party/nixpkgs/nixos/tests/timezone.nix
vendored
|
@ -1,6 +1,6 @@
|
||||||
import ./make-test-python.nix ({ pkgs, ...} : {
|
import ./make-test-python.nix ({ pkgs, ...} : {
|
||||||
name = "timezone";
|
name = "timezone";
|
||||||
meta.maintainers = with pkgs.lib.maintainers; [ lheckemann ];
|
meta.maintainers = with pkgs.lib.maintainers; [ ];
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
node_eutz = { pkgs, ... }: {
|
node_eutz = { pkgs, ... }: {
|
||||||
|
|
96
third_party/nixpkgs/nixos/tests/wstunnel.nix
vendored
Normal file
96
third_party/nixpkgs/nixos/tests/wstunnel.nix
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
let
|
||||||
|
certs = import ./common/acme/server/snakeoil-certs.nix;
|
||||||
|
domain = certs.domain;
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "wstunnel";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
server = {
|
||||||
|
virtualisation.vlans = [ 1 ];
|
||||||
|
|
||||||
|
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
useNetworkd = true;
|
||||||
|
useDHCP = false;
|
||||||
|
firewall.enable = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.network.networks."01-eth1" = {
|
||||||
|
name = "eth1";
|
||||||
|
networkConfig.Address = "10.0.0.1/24";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wstunnel = {
|
||||||
|
enable = true;
|
||||||
|
servers.my-server = {
|
||||||
|
listen = {
|
||||||
|
host = "10.0.0.1";
|
||||||
|
port = 443;
|
||||||
|
};
|
||||||
|
tlsCertificate = certs.${domain}.cert;
|
||||||
|
tlsKey = certs.${domain}.key;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
client = {
|
||||||
|
virtualisation.vlans = [ 1 ];
|
||||||
|
|
||||||
|
security.pki.certificateFiles = [ certs.ca.cert ];
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
useNetworkd = true;
|
||||||
|
useDHCP = false;
|
||||||
|
firewall.enable = false;
|
||||||
|
extraHosts = ''
|
||||||
|
10.0.0.1 ${domain}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.network.networks."01-eth1" = {
|
||||||
|
name = "eth1";
|
||||||
|
networkConfig.Address = "10.0.0.2/24";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.wstunnel = {
|
||||||
|
enable = true;
|
||||||
|
clients.my-client = {
|
||||||
|
autoStart = false;
|
||||||
|
connectTo = "wss://${domain}:443";
|
||||||
|
localToRemote = [
|
||||||
|
"tcp://8080:localhost:2080"
|
||||||
|
];
|
||||||
|
remoteToLocal = [
|
||||||
|
"tcp://2081:localhost:8081"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = /* python */ ''
|
||||||
|
start_all()
|
||||||
|
server.wait_for_unit("wstunnel-server-my-server.service")
|
||||||
|
client.wait_for_open_port(443, "10.0.0.1")
|
||||||
|
|
||||||
|
client.systemctl("start wstunnel-client-my-client.service")
|
||||||
|
client.wait_for_unit("wstunnel-client-my-client.service")
|
||||||
|
|
||||||
|
with subtest("connection from client to server"):
|
||||||
|
server.succeed("nc -l 2080 >/tmp/msg &")
|
||||||
|
client.sleep(1)
|
||||||
|
client.succeed('nc -w1 localhost 8080 <<<"Hello from client"')
|
||||||
|
server.succeed('grep "Hello from client" /tmp/msg')
|
||||||
|
|
||||||
|
with subtest("connection from server to client"):
|
||||||
|
client.succeed("nc -l 8081 >/tmp/msg &")
|
||||||
|
server.sleep(1)
|
||||||
|
server.succeed('nc -w1 localhost 2081 <<<"Hello from server"')
|
||||||
|
client.succeed('grep "Hello from server" /tmp/msg')
|
||||||
|
|
||||||
|
client.systemctl("stop wstunnel-client-my-client.service")
|
||||||
|
'';
|
||||||
|
}
|
|
@ -202,7 +202,7 @@ stdenv.mkDerivation rec {
|
||||||
# Documentation.
|
# Documentation.
|
||||||
cc-by-30
|
cc-by-30
|
||||||
];
|
];
|
||||||
maintainers = with maintainers; [ lheckemann veprbl wegank ];
|
maintainers = with maintainers; [ veprbl wegank ];
|
||||||
platforms = platforms.unix;
|
platforms = platforms.unix;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,11 +27,11 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "bitwig-studio";
|
pname = "bitwig-studio";
|
||||||
version = "5.1.8";
|
version = "5.1.9";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://downloads.bitwig.com/stable/${version}/${pname}-${version}.deb";
|
url = "https://www.bitwig.com/dl/Bitwig%20Studio/${version}/installer_linux/";
|
||||||
sha256 = "sha256-KxNLae/uTYL1m/X+/7wr7hhKfw31NpB9Mw9RzfrTuus=";
|
hash = "sha256-J5kLqXCMnGb0ZMhES6PQIPjN51ptlBGj4Fy8qSzJ6Qg=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook3 ];
|
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook3 ];
|
||||||
|
|
|
@ -8,12 +8,12 @@
|
||||||
, samplerateSupport ? jackSupport, libsamplerate ? null
|
, samplerateSupport ? jackSupport, libsamplerate ? null
|
||||||
, ossSupport ? false, alsa-oss ? null
|
, ossSupport ? false, alsa-oss ? null
|
||||||
, pulseaudioSupport ? config.pulseaudio or false, libpulseaudio ? null
|
, pulseaudioSupport ? config.pulseaudio or false, libpulseaudio ? null
|
||||||
|
, sndioSupport ? false, sndio ? null
|
||||||
, mprisSupport ? stdenv.isLinux, systemd ? null
|
, mprisSupport ? stdenv.isLinux, systemd ? null
|
||||||
|
|
||||||
# TODO: add these
|
# TODO: add these
|
||||||
#, artsSupport
|
#, artsSupport
|
||||||
#, roarSupport
|
#, roarSupport
|
||||||
#, sndioSupport
|
|
||||||
#, sunSupport
|
#, sunSupport
|
||||||
#, waveoutSupport
|
#, waveoutSupport
|
||||||
|
|
||||||
|
@ -59,11 +59,11 @@ let
|
||||||
(mkFlag samplerateSupport "CONFIG_SAMPLERATE=y" libsamplerate)
|
(mkFlag samplerateSupport "CONFIG_SAMPLERATE=y" libsamplerate)
|
||||||
(mkFlag ossSupport "CONFIG_OSS=y" alsa-oss)
|
(mkFlag ossSupport "CONFIG_OSS=y" alsa-oss)
|
||||||
(mkFlag pulseaudioSupport "CONFIG_PULSE=y" libpulseaudio)
|
(mkFlag pulseaudioSupport "CONFIG_PULSE=y" libpulseaudio)
|
||||||
|
(mkFlag sndioSupport "CONFIG_SNDIO=y" sndio)
|
||||||
(mkFlag mprisSupport "CONFIG_MPRIS=y" systemd)
|
(mkFlag mprisSupport "CONFIG_MPRIS=y" systemd)
|
||||||
|
|
||||||
#(mkFlag artsSupport "CONFIG_ARTS=y")
|
#(mkFlag artsSupport "CONFIG_ARTS=y")
|
||||||
#(mkFlag roarSupport "CONFIG_ROAR=y")
|
#(mkFlag roarSupport "CONFIG_ROAR=y")
|
||||||
#(mkFlag sndioSupport "CONFIG_SNDIO=y")
|
|
||||||
#(mkFlag sunSupport "CONFIG_SUN=y")
|
#(mkFlag sunSupport "CONFIG_SUN=y")
|
||||||
#(mkFlag waveoutSupport "CONFIG_WAVEOUT=y")
|
#(mkFlag waveoutSupport "CONFIG_WAVEOUT=y")
|
||||||
|
|
||||||
|
@ -92,13 +92,13 @@ in
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "cmus";
|
pname = "cmus";
|
||||||
version = "2.10.0-unstable-2023-11-05";
|
version = "2.11.0";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "cmus";
|
owner = "cmus";
|
||||||
repo = "cmus";
|
repo = "cmus";
|
||||||
rev = "23afab39902d3d97c47697196b07581305337529";
|
rev = "v${version}";
|
||||||
sha256 = "sha256-pxDIYbeJMoaAuErCghWJpDSh1WbYbhgJ7+ca5WLCrOs=";
|
hash = "sha256-kUJC+ORLkYD57mPL/1p5VCm9yiNzVdOZhxp7sVP6oMw=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ pkg-config ];
|
nativeBuildInputs = [ pkg-config ];
|
||||||
|
|
|
@ -23,13 +23,13 @@ with lib.strings;
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
||||||
version = "2.72.14";
|
version = "2.74.6";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "grame-cncm";
|
owner = "grame-cncm";
|
||||||
repo = "faust";
|
repo = "faust";
|
||||||
rev = version;
|
rev = version;
|
||||||
sha256 = "sha256-RdSXiOYwKsfyrfHEughCeSwa9VFM6/3pMg54yCMpzLU=";
|
sha256 = "sha256-0r7DjTrsNKZ5ZmWoA+Y9OXyJFUiUFZiPQb1skXXWYTw=";
|
||||||
fetchSubmodules = true;
|
fetchSubmodules = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
{ stdenv, lib, fetchFromGitHub, faust2jaqt, faust2lv2 }:
|
{ stdenv, lib, fetchFromGitHub, faust2jaqt, faust2lv2 }:
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "faustPhysicalModeling";
|
pname = "faustPhysicalModeling";
|
||||||
version = "2.72.14";
|
version = "2.74.6";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "grame-cncm";
|
owner = "grame-cncm";
|
||||||
repo = "faust";
|
repo = "faust";
|
||||||
rev = version;
|
rev = version;
|
||||||
sha256 = "sha256-UBMVU2oAfoAaSQXxZxV+LFq8dyb5dvy/0cCG4XywZVc=";
|
sha256 = "sha256-2qgw7pauDZBvEb5iySNiq2Fq+T+qw+AjVTwxaSQ9Eko=";
|
||||||
};
|
};
|
||||||
|
|
||||||
buildInputs = [ faust2jaqt faust2lv2 ];
|
buildInputs = [ faust2jaqt faust2lv2 ];
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
, desktop-file-utils
|
, desktop-file-utils
|
||||||
, dbus
|
, dbus
|
||||||
, openssl
|
, openssl
|
||||||
|
, glib-networking
|
||||||
, sqlite
|
, sqlite
|
||||||
, gst_all_1
|
, gst_all_1
|
||||||
, wrapGAppsHook4
|
, wrapGAppsHook4
|
||||||
|
@ -22,19 +23,19 @@
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "gnome-podcasts";
|
pname = "gnome-podcasts";
|
||||||
version = "0.6.1";
|
version = "0.7.1";
|
||||||
|
|
||||||
src = fetchFromGitLab {
|
src = fetchFromGitLab {
|
||||||
domain = "gitlab.gnome.org";
|
domain = "gitlab.gnome.org";
|
||||||
owner = "World";
|
owner = "World";
|
||||||
repo = "podcasts";
|
repo = "podcasts";
|
||||||
rev = version;
|
rev = version;
|
||||||
hash = "sha256-LPwCYgAFgUMFQZ0i4ldiuGYGMMWcMqYct3/o7eTIhmU=";
|
hash = "sha256-KCjHT/4AeJ+RXCtawkhs6f4D8NCJotYIPk3tGr5YG9M=";
|
||||||
};
|
};
|
||||||
|
|
||||||
cargoDeps = rustPlatform.fetchCargoTarball {
|
cargoDeps = rustPlatform.fetchCargoTarball {
|
||||||
inherit pname version src;
|
inherit pname version src;
|
||||||
hash = "sha256-n3ZcUhqn1rvvgkBKSKvH0b8wbOCqcBGwpb2OqMe8h0s=";
|
hash = "sha256-XTfKqKs7874ak7Lzscxw8E2qcnJOWMZaaol8TpIB6Vw=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
@ -56,6 +57,7 @@ stdenv.mkDerivation rec {
|
||||||
gettext
|
gettext
|
||||||
dbus
|
dbus
|
||||||
openssl
|
openssl
|
||||||
|
glib-networking
|
||||||
sqlite
|
sqlite
|
||||||
gst_all_1.gstreamer
|
gst_all_1.gstreamer
|
||||||
gst_all_1.gst-plugins-base
|
gst_all_1.gst-plugins-base
|
||||||
|
@ -66,13 +68,13 @@ stdenv.mkDerivation rec {
|
||||||
# tests require network
|
# tests require network
|
||||||
doCheck = false;
|
doCheck = false;
|
||||||
|
|
||||||
meta = with lib; {
|
meta = {
|
||||||
description = "Listen to your favorite podcasts";
|
description = "Listen to your favorite podcasts";
|
||||||
mainProgram = "gnome-podcasts";
|
mainProgram = "gnome-podcasts";
|
||||||
homepage = "https://apps.gnome.org/Podcasts/";
|
homepage = "https://apps.gnome.org/Podcasts/";
|
||||||
license = licenses.gpl3Plus;
|
license = lib.licenses.gpl3Plus;
|
||||||
maintainers = teams.gnome.members;
|
maintainers = lib.teams.gnome.members;
|
||||||
platforms = platforms.unix;
|
platforms = lib.platforms.unix;
|
||||||
broken = stdenv.isDarwin; # never built on Hydra https://hydra.nixos.org/job/nixpkgs/trunk/gnome-podcasts.x86_64-darwin
|
broken = stdenv.isDarwin; # never built on Hydra https://hydra.nixos.org/job/nixpkgs/trunk/gnome-podcasts.x86_64-darwin
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,18 +10,18 @@
|
||||||
|
|
||||||
buildGoModule rec {
|
buildGoModule rec {
|
||||||
pname = "go-musicfox";
|
pname = "go-musicfox";
|
||||||
version = "4.4.1";
|
version = "4.5.3";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "go-musicfox";
|
owner = "go-musicfox";
|
||||||
repo = "go-musicfox";
|
repo = "go-musicfox";
|
||||||
rev = "v${version}";
|
rev = "v${version}";
|
||||||
hash = "sha256-pIfQ0ufn8W0opm+N6IPFBPWNxNWMOU7FudPtIFop51c=";
|
hash = "sha256-qf4XAAfWWlHAnNGhXaYpnjj+2z+/lWOHaTyv8R4UDgQ=";
|
||||||
};
|
};
|
||||||
|
|
||||||
deleteVendor = true;
|
deleteVendor = true;
|
||||||
|
|
||||||
vendorHash = "sha256-ey78zeCSEuRgteG5ZRb4uO88E6lwEgqSxKfjJg3NGT4=";
|
vendorHash = "sha256-oz/kVp/Jj2Lmo19UFOn2VPD/iWbSRCbmKy8fK8RdkYs=";
|
||||||
|
|
||||||
subPackages = [ "cmd/musicfox.go" ];
|
subPackages = [ "cmd/musicfox.go" ];
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ stdenv.mkDerivation {
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
homepage = "https://www.parabola.me.uk/alsa/pmidi.html";
|
homepage = "https://www.parabola.me.uk/alsa/pmidi.html";
|
||||||
description = "Straightforward command line program to play midi files through the ALSA sequencer";
|
description = "Straightforward command line program to play midi files through the ALSA sequencer";
|
||||||
maintainers = with maintainers; [ lheckemann ];
|
maintainers = with maintainers; [ ];
|
||||||
license = licenses.gpl2;
|
license = licenses.gpl2;
|
||||||
mainProgram = "pmidi";
|
mainProgram = "pmidi";
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,7 +52,7 @@ buildNpmPackage rec {
|
||||||
description = "Pocket Casts webapp, packaged for the Linux Desktop";
|
description = "Pocket Casts webapp, packaged for the Linux Desktop";
|
||||||
homepage = "https://github.com/felicianotech/pocket-casts-desktop-app";
|
homepage = "https://github.com/felicianotech/pocket-casts-desktop-app";
|
||||||
license = licenses.mit;
|
license = licenses.mit;
|
||||||
maintainers = with maintainers; [ wolfangaukang ];
|
maintainers = [ ];
|
||||||
mainProgram = "pocket-casts";
|
mainProgram = "pocket-casts";
|
||||||
platforms = platforms.linux;
|
platforms = platforms.linux;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
{ fetchFromGitHub
|
{ stdenvNoCC
|
||||||
|
, fetchFromGitHub
|
||||||
, lib
|
, lib
|
||||||
, cmake
|
, cmake
|
||||||
, mkDerivation
|
, mkDerivation
|
||||||
|
@ -26,12 +27,18 @@ mkDerivation rec {
|
||||||
|
|
||||||
installFlags = [ "DESTDIR=$(out)" ];
|
installFlags = [ "DESTDIR=$(out)" ];
|
||||||
|
|
||||||
|
postInstall = lib.optionalString stdenvNoCC.isDarwin ''
|
||||||
|
mkdir -p $out/Applications
|
||||||
|
mv $out/bin/spotify-qt.app $out/Applications
|
||||||
|
ln $out/Applications/spotify-qt.app/Contents/MacOS/spotify-qt $out/bin/spotify-qt
|
||||||
|
'';
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
description = "Lightweight unofficial Spotify client using Qt";
|
description = "Lightweight unofficial Spotify client using Qt";
|
||||||
mainProgram = "spotify-qt";
|
mainProgram = "spotify-qt";
|
||||||
homepage = "https://github.com/kraxarn/spotify-qt";
|
homepage = "https://github.com/kraxarn/spotify-qt";
|
||||||
license = licenses.gpl3Only;
|
license = licenses.gpl3Only;
|
||||||
maintainers = with maintainers; [ ];
|
maintainers = with maintainers; [ iivusly ];
|
||||||
platforms = platforms.unix;
|
platforms = platforms.unix;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,14 +14,14 @@ let
|
||||||
# If an update breaks things, one of those might have valuable info:
|
# If an update breaks things, one of those might have valuable info:
|
||||||
# https://aur.archlinux.org/packages/spotify/
|
# https://aur.archlinux.org/packages/spotify/
|
||||||
# https://community.spotify.com/t5/Desktop-Linux
|
# https://community.spotify.com/t5/Desktop-Linux
|
||||||
version = "1.2.31.1205.g4d59ad7c";
|
version = "1.2.37.701.ge66eb7bc";
|
||||||
# To get the latest stable revision:
|
# To get the latest stable revision:
|
||||||
# curl -H 'X-Ubuntu-Series: 16' 'https://api.snapcraft.io/api/v1/snaps/details/spotify?channel=stable' | jq '.download_url,.version,.last_updated'
|
# curl -H 'X-Ubuntu-Series: 16' 'https://api.snapcraft.io/api/v1/snaps/details/spotify?channel=stable' | jq '.download_url,.version,.last_updated'
|
||||||
# To get general information:
|
# To get general information:
|
||||||
# curl -H 'Snap-Device-Series: 16' 'https://api.snapcraft.io/v2/snaps/info/spotify' | jq '.'
|
# curl -H 'Snap-Device-Series: 16' 'https://api.snapcraft.io/v2/snaps/info/spotify' | jq '.'
|
||||||
# More examples of api usage:
|
# More examples of api usage:
|
||||||
# https://github.com/canonical-websites/snapcraft.io/blob/master/webapp/publisher/snaps/views.py
|
# https://github.com/canonical-websites/snapcraft.io/blob/master/webapp/publisher/snaps/views.py
|
||||||
rev = "75";
|
rev = "76";
|
||||||
|
|
||||||
deps = [
|
deps = [
|
||||||
alsa-lib
|
alsa-lib
|
||||||
|
@ -86,8 +86,9 @@ stdenv.mkDerivation {
|
||||||
# spotify ourselves:
|
# spotify ourselves:
|
||||||
# https://community.spotify.com/t5/Desktop-Linux/Redistribute-Spotify-on-Linux-Distributions/td-p/1695334
|
# https://community.spotify.com/t5/Desktop-Linux/Redistribute-Spotify-on-Linux-Distributions/td-p/1695334
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
|
name = "spotify-${version}-${rev}.snap";
|
||||||
url = "https://api.snapcraft.io/api/v1/snaps/download/pOBIoZ2LrCB3rDohMxoYGnbN14EHOgD7_${rev}.snap";
|
url = "https://api.snapcraft.io/api/v1/snaps/download/pOBIoZ2LrCB3rDohMxoYGnbN14EHOgD7_${rev}.snap";
|
||||||
hash = "sha512-o4iLcbNqbsxo9YJMy0SXO7Udv4CMhhBcsf53UuqWKFFWY/jKVN+Lb+dB7Jf9+UowpmbrP44w97Oi+dnbfFXYjQ==";
|
hash = "sha512-k7aw1QM3NCFkm0tXcHgYyeEBagGFpCL6JdWlFruJszPloiCy5vopOsD4PdqyiSEs0rSUP0rLxX2UBs3XuI5cUA==";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ wrapGAppsHook3 makeShellWrapper squashfsTools ];
|
nativeBuildInputs = [ wrapGAppsHook3 makeShellWrapper squashfsTools ];
|
||||||
|
@ -120,11 +121,15 @@ stdenv.mkDerivation {
|
||||||
# Prevent double wrapping
|
# Prevent double wrapping
|
||||||
dontWrapGApps = true;
|
dontWrapGApps = true;
|
||||||
|
|
||||||
|
env = rec {
|
||||||
|
libdir = "${placeholder "out"}/lib/spotify";
|
||||||
|
librarypath = "${lib.makeLibraryPath deps}:${libdir}";
|
||||||
|
};
|
||||||
|
|
||||||
installPhase =
|
installPhase =
|
||||||
''
|
''
|
||||||
runHook preInstall
|
runHook preInstall
|
||||||
|
|
||||||
libdir=$out/lib/spotify
|
|
||||||
mkdir -p $libdir
|
mkdir -p $libdir
|
||||||
mv ./usr/* $out/
|
mv ./usr/* $out/
|
||||||
|
|
||||||
|
@ -147,16 +152,6 @@ stdenv.mkDerivation {
|
||||||
--interpreter "$(cat $NIX_CC/nix-support/dynamic-linker)" \
|
--interpreter "$(cat $NIX_CC/nix-support/dynamic-linker)" \
|
||||||
--set-rpath $rpath $out/share/spotify/spotify
|
--set-rpath $rpath $out/share/spotify/spotify
|
||||||
|
|
||||||
librarypath="${lib.makeLibraryPath deps}:$libdir"
|
|
||||||
wrapProgramShell $out/share/spotify/spotify \
|
|
||||||
''${gappsWrapperArgs[@]} \
|
|
||||||
${lib.optionalString (deviceScaleFactor != null) ''
|
|
||||||
--add-flags "--force-device-scale-factor=${toString deviceScaleFactor}" \
|
|
||||||
''} \
|
|
||||||
--prefix LD_LIBRARY_PATH : "$librarypath" \
|
|
||||||
--prefix PATH : "${gnome.zenity}/bin" \
|
|
||||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
|
||||||
|
|
||||||
# fix Icon line in the desktop file (#48062)
|
# fix Icon line in the desktop file (#48062)
|
||||||
sed -i "s:^Icon=.*:Icon=spotify-client:" "$out/share/spotify/spotify.desktop"
|
sed -i "s:^Icon=.*:Icon=spotify-client:" "$out/share/spotify/spotify.desktop"
|
||||||
|
|
||||||
|
@ -175,6 +170,21 @@ stdenv.mkDerivation {
|
||||||
runHook postInstall
|
runHook postInstall
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
fixupPhase = ''
|
||||||
|
runHook preFixup
|
||||||
|
|
||||||
|
wrapProgramShell $out/share/spotify/spotify \
|
||||||
|
''${gappsWrapperArgs[@]} \
|
||||||
|
${lib.optionalString (deviceScaleFactor != null) ''
|
||||||
|
--add-flags "--force-device-scale-factor=${toString deviceScaleFactor}" \
|
||||||
|
''} \
|
||||||
|
--prefix LD_LIBRARY_PATH : "$librarypath" \
|
||||||
|
--prefix PATH : "${gnome.zenity}/bin" \
|
||||||
|
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||||
|
|
||||||
|
runHook postFixup
|
||||||
|
'';
|
||||||
|
|
||||||
meta = meta // {
|
meta = meta // {
|
||||||
maintainers = with lib.maintainers; [ eelco ftrvxmtrx sheenobu timokau ma27 ];
|
maintainers = with lib.maintainers; [ eelco ftrvxmtrx sheenobu timokau ma27 ];
|
||||||
};
|
};
|
||||||
|
|
|
@ -152,7 +152,7 @@ stdenv.mkDerivation rec {
|
||||||
mainProgram = "tenacity";
|
mainProgram = "tenacity";
|
||||||
homepage = "https://tenacityaudio.org/";
|
homepage = "https://tenacityaudio.org/";
|
||||||
license = licenses.gpl2Plus;
|
license = licenses.gpl2Plus;
|
||||||
maintainers = with maintainers; [ irenes lheckemann ];
|
maintainers = with maintainers; [ irenes ];
|
||||||
platforms = platforms.linux;
|
platforms = platforms.linux;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,11 +38,11 @@ let
|
||||||
in
|
in
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
pname = "bisq-desktop";
|
pname = "bisq-desktop";
|
||||||
version = "1.9.15";
|
version = "1.9.16";
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://github.com/bisq-network/bisq/releases/download/v${version}/Bisq-64bit-${version}.deb";
|
url = "https://github.com/bisq-network/bisq/releases/download/v${version}/Bisq-64bit-${version}.deb";
|
||||||
sha256 = "0bz4yzfrzn9rwsmwwnsqdgxsqd42dyiz3vxi53qxj36h49nh8lzg";
|
sha256 = "sha256-DxYgZgDa3vOHj7svJqu/pdyXKZ+uBTy35Fchw49xxoA=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [
|
nativeBuildInputs = [
|
||||||
|
|
|
@ -2,16 +2,16 @@
|
||||||
|
|
||||||
buildGoModule rec {
|
buildGoModule rec {
|
||||||
pname = "dcrwallet";
|
pname = "dcrwallet";
|
||||||
version = "2.0.2";
|
version = "2.0.3";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "decred";
|
owner = "decred";
|
||||||
repo = "dcrwallet";
|
repo = "dcrwallet";
|
||||||
rev = "release-v${version}";
|
rev = "release-v${version}";
|
||||||
hash = "sha256-fsmil9YQNvXDyBxyt+Ei3F5U/dvbrzbZ01+v9o3+jVY=";
|
hash = "sha256-sRwGpOVydfZjgo7W+4RsHC4JElPyilbV5xhcHxPh2Wc=";
|
||||||
};
|
};
|
||||||
|
|
||||||
vendorHash = "sha256-ehtgsBCFzMft8285IjpsQ6y9HPb/UpZmcj9X4m8ZKXo=";
|
vendorHash = "sha256-lVVIS3FL8XwthCpzRKdw59NVtHVxXQ0ouD7jYQzOecM=";
|
||||||
|
|
||||||
subPackages = [ "." ];
|
subPackages = [ "." ];
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
|
|
||||||
buildGoModule rec {
|
buildGoModule rec {
|
||||||
pname = "lndhub-go";
|
pname = "lndhub-go";
|
||||||
version = "1.0.0";
|
version = "1.0.1";
|
||||||
|
|
||||||
src = fetchFromGitHub {
|
src = fetchFromGitHub {
|
||||||
owner = "getAlby";
|
owner = "getAlby";
|
||||||
repo = "lndhub.go";
|
repo = "lndhub.go";
|
||||||
rev = version;
|
rev = version;
|
||||||
sha256 = "sha256-PHBzM/lYYu6hXa5jiFQR/K5j+vmxaYH7xuoxOhFbhMk=";
|
sha256 = "sha256-YOLqMIfZSGD+AOng1XWCBlzaHkPnQc+2kmDXF2fh+ps=";
|
||||||
};
|
};
|
||||||
|
|
||||||
vendorHash = "sha256-Vo29w04cRW0syD2tjieKVeZ3srFNuEC3T17birVWn6k=";
|
vendorHash = "sha256-Vo29w04cRW0syD2tjieKVeZ3srFNuEC3T17birVWn6k=";
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue