Project import generated by Copybara.

GitOrigin-RevId: fe2ecaf706a5907b5e54d979fbde4924d84b65fc
This commit is contained in:
Default email 2023-04-12 14:48:02 +02:00
parent 3f6d8c5c14
commit 87f9c27ba9
2661 changed files with 207251 additions and 31563 deletions

View file

@ -1,5 +1,6 @@
**/deps.nix linguist-generated
**/deps.json linguist-generated
**/deps.toml lingust-generated
**/node-packages.nix linguist-generated
pkgs/applications/editors/emacs-modes/*-generated.nix linguist-generated

View file

@ -45,6 +45,7 @@
/pkgs/build-support/setup-hooks @Ericson2314
/pkgs/build-support/setup-hooks/auto-patchelf.sh @layus
/pkgs/build-support/setup-hooks/auto-patchelf.py @layus
/pkgs/pkgs-lib @infinisil
# Nixpkgs build-support
/pkgs/build-support/writers @lassulus @Profpatsch
@ -132,7 +133,7 @@
/pkgs/development/ruby-modules @marsam
# Rust
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq @winterqt @figsoda
/pkgs/development/compilers/rust @Mic92 @zowoq @winterqt @figsoda
/pkgs/build-support/rust @zowoq @winterqt @figsoda
/doc/languages-frameworks/rust.section.md @zowoq @winterqt @figsoda

View file

@ -54,7 +54,7 @@ jobs:
# less noisy until all nixpkgs pull requests have stopped using
# docbook for option docs.
- name: Comment on failure
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
if: ${{ failure() && steps.check.conclusion == 'failure' }}
with:
issue-number: 189318

View file

@ -51,7 +51,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Comment on failure
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
if: ${{ failure() }}
with:
issue-number: 105153

View file

@ -49,7 +49,7 @@ jobs:
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Comment on failure
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
if: ${{ failure() }}
with:
issue-number: 105153

View file

@ -46,7 +46,7 @@ jobs:
run: |
git clean -f
- name: create PR
uses: peter-evans/create-pull-request@v4
uses: peter-evans/create-pull-request@v5
with:
body: |
Automatic update by [update-terraform-providers](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/update-terraform-providers.yml) action.

View file

@ -61,3 +61,89 @@ builders-use-substitutes = true
```ShellSession
$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
```
## Example flake usage
```
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixpkgs-22.11-darwin";
darwin.url = "github:lnl7/nix-darwin/master";
darwin.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = { self, darwin, nixpkgs, ... }@inputs:
let
inherit (darwin.lib) darwinSystem;
system = "aarch64-darwin";
pkgs = nixpkgs.legacyPackages."${system}";
linuxSystem = builtins.replaceStrings [ "darwin" ] [ "linux" ] system;
darwin-builder = nixpkgs.lib.nixosSystem {
system = linuxSystem;
modules = [
"${nixpkgs}/nixos/modules/profiles/macos-builder.nix"
{ virtualisation.host.pkgs = pkgs; }
];
};
in {
darwinConfigurations = {
machine1 = darwinSystem {
inherit system;
modules = [
{
nix.distributedBuilds = true;
nix.buildMachines = [{
hostName = "ssh://builder@localhost";
system = linuxSystem;
maxJobs = 4;
supportedFeatures = [ "kvm" "benchmark" "big-parallel" ];
}];
launchd.daemons.darwin-builder = {
command = "${darwin-builder.config.system.build.macos-builder-installer}/bin/create-builder";
serviceConfig = {
KeepAlive = true;
RunAtLoad = true;
StandardOutPath = "/var/log/darwin-builder.log";
StandardErrorPath = "/var/log/darwin-builder.log";
};
};
}
];
};
};
};
}
```
## Reconfiguring the builder
Initially you should not change the builder configuration else you will not be
able to use the binary cache. However, after you have the builder running locally
you may use it to build a modified builder with additional storage or memory.
To do this, you just need to set the `virtualisation.darwin-builder.*` parameters as
in the example below and rebuild.
```
darwin-builder = nixpkgs.lib.nixosSystem {
system = linuxSystem;
modules = [
"${nixpkgs}/nixos/modules/profiles/macos-builder.nix"
{
virtualisation.host.pkgs = pkgs;
virtualisation.darwin-builder.diskSize = 5120;
virtualisation.darwin-builder.memorySize = 1024;
virtualisation.darwin-builder.hostPort = 33022;
virtualisation.darwin-builder.workingDirectory = "/var/lib/darwin-builder";
}
];
```
You may make any other changes to your VM in this attribute set. For example,
you could enable Docker or X11 forwarding to your Darwin host.

View file

@ -454,7 +454,7 @@ In the file `pkgs/top-level/all-packages.nix` you can find fetch helpers, these
owner = "NixOS";
repo = "nix";
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
hash = "ha256-7D4m+saJjbSFP5hOwpQq2FGR2rr+psQMTcyb1ZvtXsQ=;
hash = "ha256-7D4m+saJjbSFP5hOwpQq2FGR2rr+psQMTcyb1ZvtXsQ=";
}
```

View file

@ -25,6 +25,7 @@
<xi:include href="ios.section.xml" />
<xi:include href="java.section.xml" />
<xi:include href="javascript.section.xml" />
<xi:include href="lisp.section.xml" />
<xi:include href="lua.section.xml" />
<xi:include href="maven.section.xml" />
<xi:include href="nim.section.xml" />

View file

@ -0,0 +1,304 @@
# lisp-modules {#lisp}
This document describes the Nixpkgs infrastructure for building Common Lisp
libraries that use ASDF (Another System Definition Facility). It lives in
`pkgs/development/lisp-modules`.
## Overview {#lisp-overview}
The main entry point of the API are the Common Lisp implementation packages
(e.g. `abcl`, `ccl`, `clasp-common-lisp`, `clisp` `ecl`, `sbcl`)
themselves. They have the `pkgs` and `withPackages` attributes, which can be
used to discover available packages and to build wrappers, respectively.
The `pkgs` attribute set contains packages that were automatically imported from
Quicklisp, and any other manually defined ones. Not every package works for all
the CL implementations (e.g. `nyxt` only makes sense for `sbcl`).
The `withPackages` function is of primary utility. It is used to build runnable
wrappers, with a pinned and pre-built ASDF FASL available in the `ASDF`
environment variable, and `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS`
configured to find the desired systems on runtime.
With a few exceptions, the primary thing that the infrastructure does is to run
`asdf:load-system` for each system specified in the `systems` argument to
`build-asdf-system`, and save the FASLs to the Nix store. Then, it makes these
FASLs available to wrappers. Any other use-cases, such as producing SBCL
executables with `sb-ext:save-lisp-and-die`, are achieved via overriding the
`buildPhase` etc.
In addition, Lisps have the `withOverrides` function, which can be used to
substitute any package in the scope of their `pkgs`. This will be useful
together with `overrideLispAttrs` when dealing with slashy ASDF systems, because
they should stay in the main package and be build by specifying the `systems`
argument to `build-asdf-system`.
## The 90% use case example {#lisp-use-case-example}
The most common way to use the library is to run ad-hoc wrappers like this:
`nix-shell -p 'sbcl.withPackages (ps: with ps; [ alexandria ])'`
Then, in a shell:
```
$ result/bin/sbcl
* (load (sb-ext:posix-getenv "ASDF"))
* (asdf:load-system 'alexandria)
```
Also one can create a `pkgs.mkShell` environment in `shell.nix`/`flake.nix`:
```
let
sbcl' = sbcl.withPackages (ps: [ ps.alexandria ]);
in mkShell {
buildInputs = [ sbcl' ];
}
```
Such a Lisp can be now used e.g. to compile your sources:
```
buildPhase = ''
${sbcl'}/bin/sbcl --load my-build-file.lisp
''
```
## Importing packages from Quicklisp {#lisp-importing-packages-from-quicklisp}
The library is able to very quickly import all the packages distributed by
Quicklisp by parsing its `releases.txt` and `systems.txt` files. These files are
available from [http://beta.quicklisp.org/dist/quicklisp.txt].
The import process is implemented in the `import` directory as Common Lisp
functions in the `org.lispbuilds.nix` ASDF system. To run the script, one can
execute `ql-import.lisp`:
```
nix-shell --run 'sbcl --script ql-import.lisp'
```
The script will:
1. Download the latest Quicklisp `systems.txt` and `releases.txt` files
2. Generate an SQLite database of all QL systems in `packages.sqlite`
3. Generate an `imported.nix` file from the database
The maintainer's job there is to:
1. Re-run the `ql-import.lisp` script
2. Add missing native dependencies in `ql.nix`
3. For packages that still don't build, package them manually in `packages.nix`
Also, the `imported.nix` file **must not be edited manually**! It should only be
generated as described in this section.
### Adding native dependencies {#lisp-quicklisp-adding-native-dependencies}
The Quicklisp files contain ASDF dependency data, but don't include native
library (CFFI) dependencies, and, in the case of ABCL, Java dependencies.
The `ql.nix` file contains a long list of overrides, where these dependencies
can be added.
Packages defined in `packages.nix` contain these dependencies naturally.
### Trusting `systems.txt` and `releases.txt` {#lisp-quicklisp-trusting}
The previous implementation of `lisp-modules` didn't fully trust the Quicklisp
data, because there were times where the dependencies specified were not
complete, and caused broken builds. It instead used a `nix-shell` environment to
discover real dependencies by using the ASDF APIs.
The current implementation has chosen to trust this data, because it's faster to
parse a text file than to build each system to generate its Nix file, and
because that way packages can be mass-imported. Because of that, there may come
a day where some packages will break, due to bugs in Quicklisp. In that case,
the fix could be a manual override in `packages.nix` and `ql.nix`.
A known fact is that Quicklisp doesn't include dependencies on slashy systems in
its data. This is an example of a situation where such fixes were used, e.g. to
replace the `systems` attribute of the affected packages. (See the definition of
`iolib`).
### Quirks {#lisp-quicklisp-quirks}
During Quicklisp import:
- `+` in names are converted to `_plus{_,}`: `cl+ssl`->`cl_plus_ssl`, `alexandria+`->`alexandria_plus`
- `.` to `_dot_`: `iolib.base`->`iolib_dot_base`
- names starting with a number have a `_` prepended (`3d-vectors`->`_3d-vectors`)
- `_` in names is converted to `__` for reversibility
## Defining packages manually inside Nixpkgs {#lisp-defining-packages-inside}
New packages, that for some reason are not in Quicklisp, and so cannot be
auto-imported, can be written in the `packages.nix` file.
In that file, use the `build-asdf-system` function, which is a wrapper around
`mkDerivation` for building ASDF systems. Various other hacks are present, such
as `build-with-compile-into-pwd` for systems which create files during
compilation.
The `build-asdf-system` function is documented with comments in
`nix-cl.nix`. Also, `packages.nix` is full of examples of how to use it.
## Defining packages manually outside Nixpkgs {#lisp-defining-packages-outside}
Lisp derivations (`abcl`, `sbcl` etc.) also export the `buildASDFSystem`
function, which is the same as `build-asdf-system`, except for the `lisp`
argument which is set to the given CL implementation.
It can be used to define packages outside Nixpkgs, and, for example, add them
into the package scope with `withOverrides` which will be discussed later on.
### Including an external package in scope {#lisp-including-external-pkg-in-scope}
A package defined outside Nixpkgs using `buildASDFSystem` can be woven into the
Nixpkgs-provided scope like this:
```
let
alexandria = sbcl.buildASDFSystem rec {
pname = "alexandria";
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
};
sbcl' = sbcl.withOverrides (self: super: {
inherit alexandria;
});
in sbcl'.pkgs.alexandria
```
## Overriding package attributes {#lisp-overriding-package-attributes}
Packages export the `overrideLispAttrs` function, which can be used to build a
new package with different parameters.
Example of overriding `alexandria`:
```
sbcl.pkgs.alexandria.overrideLispAttrs (oldAttrs: rec {
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
})
```
## Overriding packages in scope {#lisp-overriding-packages-in-scope}
Packages can be woven into a new scope by using `withOverrides`:
```
let
sbcl' = sbcl.withOverrides (self: super: {
alexandria = super.alexandria.overrideLispAttrs (oldAttrs: rec {
pname = "alexandria";
version = "1.4";
src = fetchFromGitLab {
domain = "gitlab.common-lisp.net";
owner = "alexandria";
repo = "alexandria";
rev = "v${version}";
hash = "sha256-1Hzxt65dZvgOFIljjjlSGgKYkj+YBLwJCACi5DZsKmQ=";
};
});
});
in builtins.elemAt sbcl'.pkgs.bordeaux-threads.lispLibs 0
```
### Dealing with slashy systems {#lisp-dealing-with-slashy-systems}
Slashy (secondary) systems should not exist in their own packages! Instead, they
should be included in the parent package as an extra entry in the `systems`
argument to the `build-asdf-system`/`buildASDFSystem` functions.
The reason is that ASDF searches for a secondary system in the `.asd` of the
parent package. Thus, having them separate would cause either one of them not to
load cleanly, because one will contains FASLs of itself but not the other, and
vice versa.
To package slashy systems, use `overrideLispAttrs`, like so:
```
ecl.pkgs.alexandria.overrideLispAttrs (oldAttrs: {
systems = oldAttrs.systems ++ [ "alexandria/tests" ];
lispLibs = oldAttrs.lispLibs ++ [ ecl.pkgs.rt ];
})
```
See the respective section on using `withOverrides` for how to weave it back
into `ecl.pkgs`.
Note that sometimes the slashy systems might not only have more dependencies
than the main one, but create a circular dependency between `.asd`
files. Unfortunately, in this case an adhoc solution becomes necessary.
## Building Wrappers {#lisp-building-wrappers}
Wrappers can be built using the `withPackages` function of Common Lisp
implementations (`abcl`, `ecl`, `sbcl` etc.):
```
sbcl.withPackages (ps: [ ps.alexandria ps.bordeaux-threads ])
```
Such a wrapper can then be executed like this:
```
result/bin/sbcl
```
### Loading ASDF {#lisp-loading-asdf}
For best results, avoid calling `(require 'asdf)` When using the
library-generated wrappers.
Use `(load (ext:getenv "ASDF"))` instead, supplying your implementation's way of
getting an environment variable for `ext:getenv`. This will load the
(pre-compiled to FASL) Nixpkgs-provided version of ASDF.
### Loading systems {#lisp-loading-systems}
There, you can simply use `asdf:load-system`. This works by setting the right
values for the `CL_SOURCE_REGISTRY`/`ASDF_OUTPUT_TRANSLATIONS` environment
variables, so that systems are found in the Nix store and pre-compiled FASLs are
loaded.
## Adding a new Lisp {#lisp-adding-a-new-lisp}
The function `wrapLisp` is used to wrap Common Lisp implementations. It adds the
`pkgs`, `withPackages`, `withOverrides` and `buildASDFSystem` attributes to the
derivation.
`wrapLisp` takes these arguments:
- `pkg`: the Lisp package
- `faslExt`: Implementation-specific extension for FASL files
- `program`: The name of executable file in `${pkg}/bin/` (Default: `pkg.pname`)
- `flags`: A list of flags to always pass to `program` (Default: `[]`)
- `asdf`: The ASDF version to use (Default: `pkgs.asdf_3_3`)
- `packageOverrides`: Package overrides config (Default: `(self: super: {})`)
This example wraps CLISP:
```
wrapLisp {
pkg = clisp;
faslExt = "fas";
flags = ["-E" "UTF8"];
}
```

View file

@ -162,7 +162,7 @@ required to build a rust package. A simple fix is to use:
```nix
postPatch = ''
cp ${./Cargo.lock} Cargo.lock
ln -s ${./Cargo.lock} Cargo.lock
'';
```

View file

@ -40,17 +40,24 @@ Since release 15.09 there is a new TeX Live packaging that lives entirely under
## Custom packages {#sec-language-texlive-custom-packages}
You may find that you need to use an external TeX package. A derivation for such package has to provide the contents of the "texmf" directory in its output and provide the appropriate `tlType` attribute (one of `"run"`, `"bin"`, `"doc"`, `"source"`). Dependencies on other TeX packages can be listed in the attribute `tlDeps`.
You may find that you need to use an external TeX package. A derivation for such package has to provide contents of the "texmf" directory in its output and provide the `tlType` attribute. Here is a (very verbose) example:
Such derivation must then be listed in the attribute `pkgs` of an attribute set passed to `texlive.combine`, for instance by passing `extraPkgs = { pkgs = [ custom_package ]; };`. Within Nixpkgs, `pkgs` should be part of the derivation itself, allowing users to call `texlive.combine { inherit (texlive) scheme-small; inherit some_tex_package; }`.
Here is a (very verbose) example where the attribute `pkgs` is attached to the derivation itself, which requires creating a fixed point. See also the packages `auctex`, `eukleides`, `mftrace` for more examples.
```nix
with import <nixpkgs> {};
let
foiltex_run = stdenvNoCC.mkDerivation {
foiltex = stdenvNoCC.mkDerivation (finalAttrs: {
pname = "latex-foiltex";
version = "2.1.4b";
passthru.tlType = "run";
passthru = {
pkgs = [ finalAttrs.finalPackage ];
tlDeps = with texlive; [ latex ];
tlType = "run";
};
srcs = [
(fetchurl {
@ -102,8 +109,7 @@ let
maintainers = with maintainers; [ veprbl ];
platforms = platforms.all;
};
};
foiltex = { pkgs = [ foiltex_run ]; };
});
latex_with_foiltex = texlive.combine {
inherit (texlive) scheme-small;

View file

@ -909,6 +909,13 @@ in mkLicense lset) ({
url = "https://github.com/thestk/stk/blob/master/LICENSE";
};
tsl = {
shortName = "TSL";
fullName = "Timescale License Agreegment";
url = "https://github.com/timescale/timescaledb/blob/main/tsl/LICENSE-TIMESCALE";
unfree = true;
};
tcltk = {
spdxId = "TCL";
fullName = "TCL/TK License";

View file

@ -567,15 +567,19 @@ rec {
zipAttrsWith (n: concatLists)
(map (module: let subtree = module.${attr}; in
if !(builtins.isAttrs subtree) then
throw ''
You're trying to declare a value of type `${builtins.typeOf subtree}'
rather than an attribute-set for the option
throw (if attr == "config" then ''
You're trying to define a value of type `${builtins.typeOf subtree}'
rather than an attribute set for the option
`${builtins.concatStringsSep "." prefix}'!
This usually happens if `${builtins.concatStringsSep "." prefix}' has option
definitions inside that are not matched. Please check how to properly define
this option by e.g. referring to `man 5 configuration.nix'!
''
'' else ''
An option declaration for `${builtins.concatStringsSep "." prefix}' has type
`${builtins.typeOf subtree}' rather than an attribute set.
Did you mean to define this outside of `options'?
'')
else
mapAttrs (n: f module) subtree
) modules);

View file

@ -2,7 +2,9 @@
{ lib }:
let
inherit (builtins) length;
inherit (builtins) length;
inherit (lib.trivial) warnIf;
asciiTable = import ./ascii-table.nix;
@ -207,7 +209,20 @@ rec {
normalizePath "/a//b///c/"
=> "/a/b/c/"
*/
normalizePath = s: (builtins.foldl' (x: y: if y == "/" && hasSuffix "/" x then x else x+y) "" (stringToCharacters s));
normalizePath = s:
warnIf
(isPath s)
''
lib.strings.normalizePath: The argument (${toString s}) is a path value, but only strings are supported.
Path values are always normalised in Nix, so there's no need to call this function on them.
This function also copies the path to the Nix store and returns the store path, the same as "''${path}" will, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(
builtins.foldl'
(x: y: if y == "/" && hasSuffix "/" x then x else x+y)
""
(stringToCharacters s)
);
/* Depending on the boolean `cond', return either the given string
or the empty string. Useful to concatenate against a bigger string.
@ -240,7 +255,17 @@ rec {
# Prefix to check for
pref:
# Input string
str: substring 0 (stringLength pref) str == pref;
str:
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath pref)
''
lib.strings.hasPrefix: The first argument (${toString pref}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(substring 0 (stringLength pref) str == pref);
/* Determine whether a string has given suffix.
@ -260,8 +285,20 @@ rec {
let
lenContent = stringLength content;
lenSuffix = stringLength suffix;
in lenContent >= lenSuffix &&
substring (lenContent - lenSuffix) lenContent content == suffix;
in
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath suffix)
''
lib.strings.hasSuffix: The first argument (${toString suffix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(
lenContent >= lenSuffix
&& substring (lenContent - lenSuffix) lenContent content == suffix
);
/* Determine whether a string contains the given infix
@ -278,7 +315,16 @@ rec {
=> false
*/
hasInfix = infix: content:
builtins.match ".*${escapeRegex infix}.*" "${content}" != null;
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath infix)
''
lib.strings.hasInfix: The first argument (${toString infix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function always returns `false` in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(builtins.match ".*${escapeRegex infix}.*" "${content}" != null);
/* Convert a string to a list of characters (i.e. singleton strings).
This allows you to, e.g., map a function over each character. However,
@ -570,14 +616,23 @@ rec {
prefix:
# Input string
str:
let
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath prefix)
''
lib.strings.removePrefix: The first argument (${toString prefix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function never removes any prefix in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(let
preLen = stringLength prefix;
sLen = stringLength str;
in
if hasPrefix prefix str then
if substring 0 preLen str == prefix then
substring preLen (sLen - preLen) str
else
str;
str);
/* Return a string without the specified suffix, if the suffix matches.
@ -594,14 +649,23 @@ rec {
suffix:
# Input string
str:
let
# Before 23.05, paths would be copied to the store before converting them
# to strings and comparing. This was surprising and confusing.
warnIf
(isPath suffix)
''
lib.strings.removeSuffix: The first argument (${toString suffix}) is a path value, but only strings are supported.
There is almost certainly a bug in the calling code, since this function never removes any suffix in such a case.
This function also copies the path to the Nix store, which may not be what you want.
This behavior is deprecated and will throw an error in the future.''
(let
sufLen = stringLength suffix;
sLen = stringLength str;
in
if sufLen <= sLen && suffix == substring (sLen - sufLen) sufLen str then
substring 0 (sLen - sufLen) str
else
str;
str);
/* Return true if string v1 denotes a version older than v2.

View file

@ -189,7 +189,7 @@ checkConfigOutput '^"foo"$' config.submodule.foo ./declare-submoduleWith-special
## shorthandOnlyDefines config behaves as expected
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError 'is not of type `boolean' config.submodule.config ./declare-submoduleWith-shorthand.nix ./define-submoduleWith-noshorthand.nix
checkConfigError "You're trying to declare a value of type \`bool'\n\s*rather than an attribute-set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigError "You're trying to define a value of type \`bool'\n\s*rather than an attribute set for the option" config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-shorthand.nix
checkConfigOutput '^true$' config.submodule.config ./declare-submoduleWith-noshorthand.nix ./define-submoduleWith-noshorthand.nix
## submoduleWith should merge all modules in one swoop

View file

@ -850,6 +850,12 @@
githubId = 858965;
name = "Andrew Morsillo";
};
amz-x = {
email = "mail@amz-x.com";
github = "amz-x";
githubId = 18249234;
name = "Christopher Crouse";
};
AnatolyPopov = {
email = "aipopov@live.ru";
github = "AnatolyPopov";
@ -1180,6 +1186,12 @@
githubId = 58516559;
name = "Alexander Rezvov";
};
arian-d = {
email = "arianxdehghani@gmail.com";
github = "arian-d";
githubId = 40076285;
name = "Arian Dehghani";
};
arianvp = {
email = "arian.vanputten@gmail.com";
github = "arianvp";
@ -1329,6 +1341,12 @@
githubId = 84152630;
name = "Ashley Chiara";
};
ashleyghooper = {
email = "ashleyghooper@gmail.com";
github = "ashleyghooper";
githubId = 11037075;
name = "Ashley Hooper";
};
aske = {
email = "aske@fmap.me";
github = "aske";
@ -1457,6 +1475,12 @@
githubId = 574938;
name = "Jonathan Glines";
};
austin-artificial = {
email = "austin.platt@artificial.io";
github = "austin-artificial";
githubId = 126663376;
name = "Austin Platt";
};
austinbutler = {
email = "austinabutler@gmail.com";
github = "austinbutler";
@ -2409,12 +2433,6 @@
githubId = 91694;
name = "Javier Candeira";
};
candyc1oud = {
email = "candyc1oud@outlook.com";
github = "candyc1oud";
githubId = 113157395;
name = "Candy Cloud";
};
canndrew = {
email = "shum@canndrew.org";
github = "canndrew";
@ -2757,6 +2775,15 @@
githubId = 2245737;
name = "Christopher Mark Poole";
};
christoph-heiss = {
email = "christoph@c8h4.io";
github = "christoph-heiss";
githubId = 7571069;
name = "Christoph Heiss";
keys = [{
fingerprint = "9C56 1D64 30B2 8D6B DCBC 9CEB 73D5 E7FD EE3D E49A";
}];
};
chuahou = {
email = "human+github@chuahou.dev";
github = "chuahou";
@ -2989,6 +3016,13 @@
githubId = 298705;
name = "Cyril Cohen";
};
colamaroro = {
name = "Corentin Rondier";
email = "github@rondier.io";
github = "colamaroro";
githubId = 12484955;
matrix = "@colamaroro:lovelyrad.io";
};
cole-h = {
name = "Cole Helbling";
email = "cole.e.helbling@outlook.com";
@ -3252,6 +3286,13 @@
fingerprint = "2B1F 70F9 5F1B 48DA 2265 A7FA A6BC 8B8C EB31 659B";
}];
};
cyntheticfox = {
email = "houstdav000@gmail.com";
github = "cyntheticfox";
githubId = 17628961;
matrix = "@houstdav000:gh0st.ems.host";
name = "Cynthia Fox";
};
cyounkins = {
name = "Craig Younkins";
email = "cyounkins@gmail.com";
@ -3379,6 +3420,11 @@
githubId = 1298344;
name = "Daniel Fullmer";
};
dansbandit = {
github = "dansbandit";
githubId = 4530687;
name = "dansbandit";
};
danth = {
name = "Daniel Thwaites";
email = "danthwaites30@btinternet.com";
@ -3657,6 +3703,12 @@
githubId = 706758;
name = "Christian Gerbrandt";
};
derdennisop = {
email = "dennish@wuitz.de";
github = "derdennisop";
githubId = 52411861;
name = "Dennis";
};
derekcollison = {
email = "derek@nats.io";
github = "derekcollison";
@ -4019,6 +4071,15 @@
githubId = 108501;
name = "David Pflug";
};
dr460nf1r3 = {
email = "root@dr460nf1r3.org";
github = "dr460nf1r3";
githubId = 12834713;
name = "Nico Jensch";
keys = [{
fingerprint = "D245 D484 F357 8CB1 7FD6 DA6B 67DB 29BF F3C9 6757";
}];
};
dramaturg = {
email = "seb@ds.ag";
github = "dramaturg";
@ -4061,12 +4122,6 @@
fingerprint = "85F3 72DF 4AF3 EF13 ED34 72A3 0AAF 2901 E804 0715";
}];
};
drzoidberg = {
email = "jakob@mast3rsoft.com";
github = "jakobneufeld";
githubId = 24791219;
name = "Jakob Neufeld";
};
dsalaza4 = {
email = "podany270895@gmail.com";
github = "dsalaza4";
@ -4137,7 +4192,7 @@
};
dylanmtaylor = {
email = "dylan@dylanmtaylor.com";
github = "dylamtaylor";
github = "dylanmtaylor";
githubId = 277927;
name = "Dylan Taylor";
};
@ -4309,6 +4364,12 @@
githubId = 701128;
name = "Eike Kettner";
};
eken = {
email = "edvin.kallstrom@protonmail.com";
github = "Eken-beep";
name = "Edvin Källström";
githubId = 84442052;
};
ekleog = {
email = "leo@gaspard.io";
matrix = "@leo:gaspard.ninja";
@ -4687,6 +4748,12 @@
githubId = 32169529;
name = "Etienne Jean";
};
ettom = {
email = "ettom22@hotmail.com";
github = "ettom";
githubId = 36895504;
name = "ettom";
};
etu = {
email = "elis@hirwing.se";
matrix = "@etu:semi.social";
@ -4713,6 +4780,13 @@
fingerprint = "8129 5B85 9C5A F703 C2F4 1E29 2D1D 402E 1776 3DD6";
}];
};
evan-goode = {
email = "mail@evangoo.de";
name = "Evan Goode";
github = "evan-goode";
githubId = 7495216;
matrix = "@goode:matrix.org";
};
evanjs = {
email = "evanjsx@gmail.com";
github = "evanjs";
@ -4931,6 +5005,13 @@
keys = [{ fingerprint = "7391 BF2D A2C3 B2C9 BE25 ACA9 C7A7 4616 F302 5DF4"; }];
matrix = "@felipeqq2:pub.solar";
};
felixalbrigtsen = {
email = "felixalbrigtsen@gmail.com";
github = "felixalbrigtsen";
githubId = 64613093;
name = "Felix Albrigtsen";
matrix = "@felixalb:pvv.ntnu.no";
};
felixscheinost = {
name = "Felix Scheinost";
email = "felix.scheinost@posteo.de";
@ -6170,13 +6251,6 @@
githubId = 25618740;
name = "Vincent Cui";
};
houstdav000 = {
email = "houstdav000@gmail.com";
github = "houstdav000";
githubId = 17628961;
matrix = "@houstdav000:gh0st.ems.host";
name = "David Houston";
};
hoverbear = {
email = "operator+nix@hoverbear.org";
matrix = "@hoverbear:matrix.org";
@ -6958,6 +7032,12 @@
githubId = 17029738;
name = "Jean-Charles Quillet";
};
jedsek = {
email = "jedsek@qq.com";
github = "jedsek";
githubId = 63626406;
name = "Jedsek";
};
jefdaj = {
email = "jefdaj@gmail.com";
github = "jefdaj";
@ -7049,6 +7129,13 @@
fingerprint = "7EB1 C02A B62B B464 6D7C E4AE D1D0 9DE1 69EA 19A0";
}];
};
jfvillablanca = {
email = "jmfv.dev@gmail.com";
matrix = "@jfvillablanca:matrix.org";
github = "jfvillablanca";
githubId = 31008330;
name = "Jann Marc Villablanca";
};
jgart = {
email = "jgart@dismail.de";
github = "jgarte";
@ -7816,6 +7903,12 @@
githubId = 1047859;
name = "Kaz Wesley";
};
kazenyuk = {
email = "kazenyuk@pm.me";
github = "nvmd";
githubId = 524492;
name = "Sergey Kazenyuk";
};
kcalvinalvin = {
email = "calvin@kcalvinalvin.info";
github = "kcalvinalvin";
@ -8160,6 +8253,12 @@
github = "konradmalik";
githubId = 13033392;
};
konst-aa = {
email = "konstantin.astafurov@gmail.com";
github = "konst-aa";
githubId = 40547702;
name = "Konstantin Astafurov";
};
koozz = {
email = "koozz@linux.com";
github = "koozz";
@ -9403,6 +9502,12 @@
githubId = 43853194;
name = "Matheus Vieira";
};
mathiassven = {
email = "github@mathiassven.com";
github = "MathiasSven";
githubId = 24759037;
name = "Mathias Sven";
};
mathnerd314 = {
email = "mathnerd314.gph+hs@gmail.com";
github = "Mathnerd314";
@ -10528,6 +10633,12 @@
githubId = 772914;
name = "Mikael Voss";
};
mwdomino = {
email = "matt@dominey.io";
github = "mwdomino";
githubId = 46284538;
name = "Matt Dominey";
};
mwolfe = {
email = "corp@m0rg.dev";
github = "m0rg-dev";
@ -10866,6 +10977,15 @@
githubId = 8214542;
name = "Nicolò Balzarotti";
};
nicoo = {
email = "nicoo@debian.org";
github = "nbraud";
githubId = 1155801;
name = "nicoo";
keys = [{
fingerprint = "E44E 9EA5 4B8E 256A FB73 49D3 EC9D 3708 72BC 7A8C";
}];
};
nidabdella = {
name = "Mohamed Nidabdella";
email = "nidabdella.mohamed@gmail.com";
@ -10936,6 +11056,12 @@
githubId = 47835714;
name = "Nintron";
};
niols = {
email = "niols@niols.fr";
github = "niols";
githubId = 5920602;
name = "Nicolas Jeannerod";
};
nioncode = {
email = "nioncode+github@gmail.com";
github = "nioncode";
@ -12731,11 +12857,6 @@
githubId = 220211;
name = "Renato Garcia";
};
rencire = {
github = "rencire";
githubId = 546296;
name = "Eric Ren";
};
renesat = {
name = "Ivan Smolyakov";
email = "smol.ivan97@gmail.com";
@ -12748,12 +12869,6 @@
githubId = 3302;
name = "Renzo Carbonara";
};
retrry = {
email = "retrry@gmail.com";
github = "retrry";
githubId = 500703;
name = "Tadas Barzdžius";
};
revol-xut = {
email = "revol-xut@protonmail.com";
name = "Tassilo Tanneberger";
@ -13775,12 +13890,6 @@
github = "ShamrockLee";
githubId = 44064051;
};
shanemikel = {
email = "shanepearlman@pm.me";
github = "shanemikel";
githubId = 6720672;
name = "Shane Pearlman";
};
shanesveller = {
email = "shane@sveller.dev";
github = "shanesveller";
@ -14349,6 +14458,12 @@
githubId = 6362238;
name = "Christoph Honal";
};
star-szr = {
email = "nixpkgs@scottr.mailworks.org";
github = "star-szr";
githubId = 327943;
name = "Scott Zhu Reeves";
};
stasjok = {
name = "Stanislav Asunkin";
email = "nixpkgs@stasjok.ru";
@ -14570,13 +14685,6 @@
githubId = 2666479;
name = "Y Nguyen";
};
superherointj = {
name = "Sérgio Marcelo";
email = "sergiomarcelo+nixpkgs@ya.ru";
matrix = "@superherointj:matrix.org";
github = "superherointj";
githubId = 5861043;
};
SuperSandro2000 = {
email = "sandro.jaeckel@gmail.com";
matrix = "@sandro:supersandro.de";
@ -15231,6 +15339,12 @@
githubId = 1618946;
name = "Tiago Castro";
};
tie = {
name = "Ivan Trubach";
email = "mr.trubach@icloud.com";
github = "tie";
githubId = 14792994;
};
tilcreator = {
name = "TilCreator";
email = "contact.nixos@tc-j.de";
@ -16871,6 +16985,12 @@
githubId = 568532;
name = "Christian Zagrodnick";
};
zahrun = {
email = "zahrun@murena.io";
github = "zahrun";
githubId = 10415894;
name = "Zahrun";
};
zakame = {
email = "zakame@zakame.net";
github = "zakame";
@ -17091,6 +17211,12 @@
githubId = 20029431;
name = "Zyansheep";
};
zygot = {
email = "stefan.bordei13@gmail.com";
github = "stefan-bordei";
githubId = 71881325;
name = "Stefan Bordei";
};
zzamboni = {
email = "diego@zzamboni.org";
github = "zzamboni";

View file

@ -40,6 +40,7 @@ sed -r \
-e 's|^constraints:||' \
-e 's|^ +| - |' \
-e 's|,$||' \
-e '/^with-compiler:/d' \
-e '/installed$/d' \
-e '/^$/d' \
< "${tmpfile}" | sort --ignore-case >"${tmpfile_new}"

View file

@ -82,7 +82,7 @@ luautf8,,,,,,pstn
luazip,,,,,,
lua-yajl,,,,,,pstn
luuid,,,,,,
luv,,,,1.43.0-0,,
luv,,,,1.44.2-1,,
lush.nvim,https://github.com/rktjmp/lush.nvim,,,,,teto
lyaml,,,,,,lblasc
markdown,,,,,,

1 name src ref server version luaversion maintainers
82 luazip
83 lua-yajl pstn
84 luuid
85 luv 1.43.0-0 1.44.2-1
86 lush.nvim https://github.com/rktjmp/lush.nvim teto
87 lyaml lblasc
88 markdown

View file

@ -100,7 +100,7 @@ async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, bra
# Git can only handle a single index operation at a time
async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion} {newVersion}'.format(**change)
commit_message = '{attrPath}: {oldVersion} -> {newVersion}'.format(**change)
if 'commitMessage' in change:
commit_message = change['commitMessage']
elif 'commitBody' in change:

View file

@ -288,12 +288,14 @@ with lib.maintainers; {
golang = {
members = [
c00w
kalbasit
mic92
zowoq
qbit
];
githubTeams = [
"golang"
];
scope = "Maintain Golang compilers.";
shortName = "Go";
enableFeatureFreezePing = true;
@ -423,6 +425,21 @@ with lib.maintainers; {
shortName = "Linux Kernel";
};
lisp = {
members = [
raskin
lukego
nagy
uthar
];
githubTeams = [
"lisp"
];
scope = "Maintain the Lisp ecosystem.";
shortName = "lisp";
enableFeatureFreezePing = true;
};
llvm = {
members = [
dtzWill
@ -718,12 +735,14 @@ with lib.maintainers; {
rust = {
members = [
figsoda
lnl7
mic92
tjni
winter
zowoq
];
githubTeams = [
"rust"
];
scope = "Maintain the Rust compiler toolchain and nixpkgs integration.";
shortName = "Rust";
enableFeatureFreezePing = true;

View file

@ -168,7 +168,7 @@ let
./manual.md \
./manual-combined-pre.xml
${pkgs.libxslt.bin}/bin/xsltproc \
xsltproc \
-o manual-combined.xml ${./../../lib/make-options-doc/postprocess-option-descriptions.xsl} \
manual-combined-pre.xml

View file

@ -16,8 +16,12 @@ In addition to numerous new and upgraded packages, this release has the followin
It's recommended to use `nixos-rebuild boot` and `reboot`, rather than `nixos-rebuild switch` - since in some rare cases
the switch of a live system might fail.
- glibc: 2.35 -\> 2.37
- Cinnamon has been updated to 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what is changed.
- GNOME has been upgraded to version 44. Please see the [release notes](https://release.gnome.org/44/) for details.
- KDE Plasma has been updated to v5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what is changed.
- `nixos-rebuild` now supports an extra `--specialisation` option that can be used to change specialisation for `switch` and `test` commands.
@ -45,8 +49,14 @@ In addition to numerous new and upgraded packages, this release has the followin
- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer. Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
- [hyprland](https://github.com/hyprwm/hyprland), a dynamic tiling Wayland compositor that doesn't sacrifice on its looks. Available as [programs.hyprland](#opt-programs.hyprland.enable).
- [minipro](https://gitlab.com/DavidGriffith/minipro/), an open source program for controlling the MiniPRO TL866xx series of chip programmers. Available as [programs.minipro](options.html#opt-programs.minipro.enable).
- [stevenblack-blocklist](https://github.com/StevenBlack/hosts), A unified hosts file with base extensions for blocking unwanted websites. Available as [networking.stevenblack](options.html#opt-networking.stevenblack.enable).
- [Budgie Desktop](https://github.com/BuddiesOfBudgie/budgie-desktop), a familiar, modern desktop environment. Availabe as [services.xserver.desktopManager.budgie](options.html#opt-services.xserver.desktopManager.budgie).
- [imaginary](https://github.com/h2non/imaginary), a microservice for high-level image processing that Nextcloud can use to generate previews. Available as [services.imaginary](#opt-services.imaginary.enable).
- [opensearch](https://opensearch.org), a search server alternative to Elasticsearch. Available as [services.opensearch](options.html#opt-services.opensearch.enable).
@ -71,6 +81,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [v2rayA](https://v2raya.org), a Linux web GUI client of Project V which supports V2Ray, Xray, SS, SSR, Trojan and Pingtunnel. Available as [services.v2raya](options.html#opt-services.v2raya.enable).
- [wstunnel](https://github.com/erebe/wstunnel), a proxy tunnelling arbitrary TCP or UDP traffic through a WebSocket connection. Instances may be configured via [services.wstunnel](options.html#opt-services.wstunnel.enable).
- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
- [jellyseerr](https://github.com/Fallenbagel/jellyseerr), a web-based requests manager for Jellyfin, forked from Overseerr. Available as [services.jellyseerr](#opt-services.jellyseerr.enable).
@ -85,12 +97,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- [nimdow](https://github.com/avahe-kellenberger/nimdow), a window manager written in Nim, inspired by dwm.
- [trurl](https://github.com/curl/trurl), a command line tool for URL parsing and manipulation.
- [woodpecker-agents](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-agents](#opt-services.woodpecker-agents.agents._name_.enable).
- [woodpecker-server](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable).
- [ReGreet](https://github.com/rharish101/ReGreet), a clean and customizable greeter for greetd. Available as [programs.regreet](#opt-programs.regreet.enable).
- [v4l2-relayd](https://git.launchpad.net/v4l2-relayd), a streaming relay for v4l2loopback using gstreamer. Available as [services.v4l2-relayd](#opt-services.v4l2-relayd.instances._name_.enable).
- [hardware.ipu6](#opt-hardware.ipu6.enable) adds support for ipu6 based webcams on intel tiger lake and alder lake.
## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
@ -128,6 +146,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- The [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) module option default value has been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
- The option `i18n.inputMethod.fcitx5.enableRimeData` has been removed. Default RIME data is now included in `fcitx5-rime` by default, and can be customized using `fcitx5-rime.override { rimeDataPkgs = [ pkgs.rime-data, package2, ... ]; }`
- Kime has been updated from 2.5.6 to 3.0.2 and the `i18n.inputMethod.kime.config` option has been removed. Users should use `daemonModules`, `iconColor`, and `extraConfig` options under `i18n.inputMethod.kime` instead.
- `tut` has been updated from 1.0.34 to 2.0.0, and now uses the TOML format for the configuration file instead of INI. Additional information can be found [here](https://github.com/RasmusLindroth/tut/releases/tag/2.0.0).
@ -152,6 +172,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
- The old unsupported version 6.x of the ELK-stack and Elastic beats have been removed. Use OpenSearch instead.
- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
@ -187,6 +209,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use.
- To enable the HTTP3 (QUIC) protocol for a nginx virtual host, set the `quic` attribute on it to true, e.g. `services.nginx.virtualHosts.<name>.quic = true;`.
- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
- The `services.pipewire.config` options have been removed, as they have basically never worked correctly. All behavior defined by the default configuration can be overridden with drop-in files as necessary - see [below](#sec-release-23.05-migration-pipewire) for details.
@ -201,13 +225,18 @@ In addition to numerous new and upgraded packages, this release has the followin
- The `baget` package and module was removed due to being unmaintained.
- `go-ethereum` package has been updated to v1.11.5 and the `puppeth` command is no longer available as of v1.11.0.
- The `pnpm` package has be updated to from version 7.29.1 to version 8.1.1 and Node.js 14 support has been discontinued (though, there are workarounds if Node.js 14 is still required)
- Migration instructions: ["Before updating pnpm to v8 in your CI, regenerate your pnpm-lock.yaml. To upgrade your lockfile, run pnpm install and commit the changes. Existing dependencies will not be updated; however, due to configuration changes in pnpm v8, some missing peer dependencies may be added to the lockfile and some packages may get deduplicated. You can commit the new lockfile even before upgrading Node.js in the CI, as pnpm v7 already supports the new lockfile format."](https://github.com/pnpm/pnpm/releases/tag/v8.0.0)
## Other Notable Changes {#sec-release-23.05-notable-changes}
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
- Pantheon now defaults to Mutter 42 and GNOME settings daemon 42, all Pantheon packages are now tracking elementary OS 7 updates.
- Pantheon now defaults to Mutter 43 and GNOME settings daemon 43, all Pantheon packages are now tracking elementary OS 7 updates.
- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
@ -225,6 +254,10 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.openssh.ciphers` to `services.openssh.settings.Ciphers`
- `services.openssh.gatewayPorts` to `services.openssh.settings.GatewayPorts`
- `netbox` was updated to 3.4. NixOS' `services.netbox.package` still defaults to 3.3 if `stateVersion` is earlier than 23.05. Please review upstream's [breaking changes](https://github.com/netbox-community/netbox/releases/tag/v3.4.0), and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
- `services.netbox` now support RFC42-style options, through `services.netbox.settings`.
- `services.mastodon` gained a tootctl wrapped named `mastodon-tootctl` similar to `nextcloud-occ` which can be executed from any user and switches to the configured mastodon user with sudo and sources the environment variables.
- DocBook option documentation, which has been deprecated since 22.11, will now cause a warning when documentation is built. Out-of-tree modules should migrate to using CommonMark documentation as outlined in [](#sec-option-declarations) to silence this warning.
@ -235,6 +268,12 @@ In addition to numerous new and upgraded packages, this release has the followin
- `services.borgmatic` now allows for multiple configurations, placed in `/etc/borgmatic.d/`, you can define them with `services.borgmatic.configurations`.
- `service.openafsServer` features a new backup server `pkgs.fabs` as a
replacement for openafs's own `buserver`. See
[FABS](https://github.com/openafs-contrib/fabs) to check if this is an viable
replacement. It stores backups as volume dump files and thus better integrates
into contemporary backup solutions.
- The `dnsmasq` service now takes configuration via the
`services.dnsmasq.settings` attribute set. The option
`services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches
@ -284,6 +323,7 @@ In addition to numerous new and upgraded packages, this release has the followin
```
- `services.dhcpcd` service now don't solicit or accept IPv6 Router Advertisements on interfaces that use static IPv6 addresses.
If network uses both IPv6 Unique local addresses (ULA) and global IPv6 address auto-configuration with SLAAC, must add the parameter `networking.dhcpcd.IPv6rs = true;`.
- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
@ -306,6 +346,8 @@ In addition to numerous new and upgraded packages, this release has the followin
- Enabling global redirect in `services.nginx.virtualHosts` now allows one to add exceptions with the `locations` option.
- A new option `proxyCachePath` has been added to `services.nginx`. Learn more about proxy_cache_path: <https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path>.
- A new option `recommendedBrotliSettings` has been added to `services.nginx`. Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/blob/master/README.md).
- Updated recommended settings in `services.nginx.recommendedGzipSettings`:
@ -358,10 +400,17 @@ In addition to numerous new and upgraded packages, this release has the followin
- The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed.
- The option `services.gpsd.device` has been replaced with
`services.gpsd.devices`, which supports multiple devices.
- `k3s` can now be configured with an EnvironmentFile for its systemd service, allowing secrets to be provided without ending up in the Nix Store.
- `boot.initrd.luks.device.<name>` has a new `tryEmptyPassphrase` option, this is useful for OEM's who need to install an encrypted disk with a future settable passphrase
- Lisp gained a [manual section](https://nixos.org/manual/nixpkgs/stable/#lisp), documenting a new and backwards incompatible interface. The previous interface will be removed in a future release.
- The `bind` module now allows the per-zone `allow-query` setting to be configured (previously it was hard-coded to `any`; it still defaults to `any` to retain compatibility).
## Detailed migration information {#sec-release-23.05-migration}
### Pipewire configuration overrides {#sec-release-23.05-migration-pipewire}

View file

@ -733,8 +733,9 @@ in {
sep = "\\$";
base64 = "[a-zA-Z0-9./]+";
id = cryptSchemeIdPatternGroup;
name = "[a-z0-9-]+";
value = "[a-zA-Z0-9/+.-]+";
options = "${id}(=${value})?(,${id}=${value})*";
options = "${name}(=${value})?(,${name}=${value})*";
scheme = "${id}(${sep}${options})?";
content = "${base64}${sep}${base64}(${sep}${base64})?";
mcf = "^${sep}${scheme}${sep}${content}$";

View file

@ -69,50 +69,21 @@ in
package = mkOption {
type = types.package;
internal = true;
default = cfg.mesaPackage;
description = lib.mdDoc ''
The package that provides the OpenGL implementation.
The default is Mesa's drivers which should cover all OpenGL-capable
hardware. If you want to use another Mesa version, adjust
{option}`mesaPackage`.
'';
};
package32 = mkOption {
type = types.package;
internal = true;
default = cfg.mesaPackage32;
description = lib.mdDoc ''
Same as {option}`package` but for the 32-bit OpenGL implementation on
64-bit systems. Used when {option}`driSupport32Bit` is set.
The package that provides the 32-bit OpenGL implementation on
64-bit systems. Used when {option}`driSupport32Bit` is
set.
'';
};
mesaPackage = mkOption {
type = types.package;
default = pkgs.mesa;
defaultText = literalExpression "pkgs.mesa";
example = literalExpression "pkgs.mesa_22";
description = lib.mdDoc ''
The Mesa driver package used for rendering support on the system.
You should only need to adjust this if you require a newer Mesa
version for your hardware or because you need to patch a bug.
'';
apply = mesa: mesa.drivers or (throw "`mesa` package must have a `drivers` output.");
};
mesaPackage32 = mkOption {
type = types.package;
default = pkgs.pkgsi686Linux.mesa;
defaultText = literalExpression "pkgs.pkgsi686Linux.mesa";
example = literalExpression "pkgs.pkgsi686Linux.mesa_22";
description = lib.mdDoc ''
Same as {option}`mesaPackage` but for the 32-bit Mesa on 64-bit
systems. Used when {option}`driSupport32Bit` is set.
'';
apply = mesa: mesa.drivers or (throw "`mesa` package must have a `drivers` output.");
};
extraPackages = mkOption {
type = types.listOf types.package;
default = [];
@ -126,6 +97,7 @@ in
:::
'';
};
extraPackages32 = mkOption {
type = types.listOf types.package;
default = [];
@ -181,6 +153,9 @@ in
environment.sessionVariables.LD_LIBRARY_PATH = mkIf cfg.setLdLibraryPath
([ "/run/opengl-driver/lib" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/lib");
hardware.opengl.package = mkDefault pkgs.mesa.drivers;
hardware.opengl.package32 = mkDefault pkgs.pkgsi686Linux.mesa.drivers;
boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
};
}

View file

@ -26,6 +26,8 @@ let
nvidiaPersistencedEnabled = cfg.nvidiaPersistenced;
nvidiaSettings = cfg.nvidiaSettings;
busIDType = types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
in
{
@ -462,7 +464,7 @@ in
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
++ optional (!cfg.open && config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && lib.versionOlder nvidia_x11.version "530") "ibt=off";
++ optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
services.udev.extraRules =
''

View file

@ -0,0 +1,57 @@
{ config, lib, pkgs, ... }:
let
inherit (lib) mkDefault mkEnableOption mkIf mkOption optional types;
cfg = config.hardware.ipu6;
in
{
options.hardware.ipu6 = {
enable = mkEnableOption (lib.mdDoc "support for Intel IPU6/MIPI cameras");
platform = mkOption {
type = types.enum [ "ipu6" "ipu6ep" ];
description = lib.mdDoc ''
Choose the version for your hardware platform.
Use `ipu6` for Tiger Lake and `ipu6ep` for Alder Lake respectively.
'';
};
};
config = mkIf cfg.enable {
boot.extraModulePackages = with config.boot.kernelPackages; [
ipu6-drivers
];
hardware.firmware = with pkgs; [ ]
++ optional (cfg.platform == "ipu6") ipu6-camera-bin
++ optional (cfg.platform == "ipu6ep") ipu6ep-camera-bin;
services.udev.extraRules = ''
SUBSYSTEM=="intel-ipu6-psys", MODE="0660", GROUP="video"
'';
services.v4l2-relayd.instances.ipu6 = {
enable = mkDefault true;
cardLabel = mkDefault "Intel MIPI Camera";
extraPackages = with pkgs.gst_all_1; [ ]
++ optional (cfg.platform == "ipu6") icamerasrc-ipu6
++ optional (cfg.platform == "ipu6ep") icamerasrc-ipu6ep;
input = {
pipeline = "icamerasrc";
format = mkIf (cfg.platform == "ipu6ep") (mkDefault "NV12");
};
};
};
}

View file

@ -5,10 +5,9 @@ with lib;
let
im = config.i18n.inputMethod;
cfg = im.fcitx5;
addons = cfg.addons ++ optional cfg.enableRimeData pkgs.rime-data;
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit addons; };
whetherRimeDataDir = any (p: p.pname == "fcitx5-rime") cfg.addons;
in {
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit (cfg) addons; };
in
{
options = {
i18n.inputMethod.fcitx5 = {
addons = mkOption {
@ -19,30 +18,23 @@ in {
Enabled Fcitx5 addons.
'';
};
};
};
enableRimeData = mkEnableOption (lib.mdDoc "default rime-data with fcitx5-rime");
};
};
imports = [
(mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx5" "enableRimeData" ] ''
RIME data is now included in `fcitx5-rime` by default, and can be customized using `fcitx5-rime.override { rimeDataPkgs = ...; }`
'')
];
config = mkIf (im.enabled == "fcitx5") {
i18n.inputMethod.package = fcitx5Package;
environment = mkMerge [{
variables = {
environment.variables = {
GTK_IM_MODULE = "fcitx";
QT_IM_MODULE = "fcitx";
XMODIFIERS = "@im=fcitx";
QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
};
}
(mkIf whetherRimeDataDir {
pathsToLink = [
"/share/rime-data"
];
variables = {
NIX_RIME_DATA_DIR = "/run/current-system/sw/share/rime-data";
};
})];
};
}

View file

@ -131,7 +131,8 @@ let
desktopItem = pkgs.makeDesktopItem {
name = "nixos-manual";
desktopName = "NixOS Manual";
genericName = "View NixOS documentation in a web browser";
genericName = "System Manual";
comment = "View NixOS documentation in a web browser";
icon = "nix-snowflake";
exec = "nixos-help";
categories = ["System"];

View file

@ -233,7 +233,7 @@ in
# nix-serve = 199; # unused, removed 2020-12-12
#tvheadend = 200; # dynamically allocated as of 2021-09-18
uwsgi = 201;
gitit = 202;
# gitit = 202; # unused, module was removed 2023-04-03
riemanntools = 203;
subsonic = 204;
# riak = 205; # unused, remove 2022-07-22

View file

@ -99,6 +99,7 @@
./hardware/video/switcheroo-control.nix
./hardware/video/uvcvideo/default.nix
./hardware/video/webcam/facetimehd.nix
./hardware/video/webcam/ipu6.nix
./hardware/wooting.nix
./hardware/xone.nix
./hardware/xpadneo.nix
@ -179,6 +180,7 @@
./programs/haguichi.nix
./programs/hamster.nix
./programs/htop.nix
./programs/hyprland.nix
./programs/iay.nix
./programs/iftop.nix
./programs/i3lock.nix
@ -195,6 +197,7 @@
./programs/mdevctl.nix
./programs/mepo.nix
./programs/mininet.nix
./programs/minipro.nix
./programs/miriway.nix
./programs/mosh.nix
./programs/msmtp.nix
@ -276,6 +279,7 @@
./security/doas.nix
./security/duosec.nix
./security/google_oslogin.nix
./security/ipa.nix
./security/lock-kernel-modules.nix
./security/misc.nix
./security/oath.nix
@ -562,6 +566,7 @@
./services/mail/schleuder.nix
./services/mail/spamassassin.nix
./services/mail/sympa.nix
./services/mail/zeyple.nix
./services/matrix/appservice-discord.nix
./services/matrix/appservice-irc.nix
./services/matrix/conduit.nix
@ -611,7 +616,6 @@
./services/misc/gammu-smsd.nix
./services/misc/geoipupdate.nix
./services/misc/gitea.nix
# ./services/misc/gitit.nix
./services/misc/gitlab.nix
./services/misc/gitolite.nix
./services/misc/gitweb.nix
@ -1038,6 +1042,7 @@
./services/networking/wg-quick.nix
./services/networking/wireguard.nix
./services/networking/wpa_supplicant.nix
./services/networking/wstunnel.nix
./services/networking/x2goserver.nix
./services/networking/xandikos.nix
./services/networking/xinetd.nix
@ -1127,6 +1132,7 @@
./services/video/replay-sorcery.nix
./services/video/rtsp-simple-server.nix
./services/video/unifi-video.nix
./services/video/v4l2-relayd.nix
./services/wayland/cage.nix
./services/web-apps/akkoma.nix
./services/web-apps/alps.nix
@ -1137,6 +1143,7 @@
./services/web-apps/calibre-web.nix
./services/web-apps/coder.nix
./services/web-apps/changedetection-io.nix
./services/web-apps/chatgpt-retrieval-plugin.nix
./services/web-apps/cloudlog.nix
./services/web-apps/code-server.nix
./services/web-apps/convos.nix

View file

@ -52,9 +52,9 @@ with lib;
services.getty.helpLine = ''
The "nixos" and "root" accounts have empty passwords.
An ssh daemon is running. You then must set a password
for either "root" or "nixos" with `passwd` or add an ssh key
to /home/nixos/.ssh/authorized_keys be able to login.
To log in over ssh you must set a password for either "nixos" or "root"
with `passwd` (prefix with `sudo` for "root"), or add your public key to
/home/nixos/.ssh/authorized_keys or /root/.ssh/authorized_keys.
If you need a wireless connection, type
`sudo systemctl start wpa_supplicant` and configure a
@ -65,8 +65,8 @@ with lib;
start the graphical user interface.
'';
# We run sshd by default. Login via root is only possible after adding a
# password via "passwd" or by adding a ssh key to /home/nixos/.ssh/authorized_keys.
# We run sshd by default. Login is only possible after adding a
# password via "passwd" or by adding a ssh key to ~/.ssh/authorized_keys.
# The latter one is particular useful if keys are manually added to
# installation device for head-less systems i.e. arm boards by manually
# mounting the storage in a different system.

View file

@ -7,6 +7,8 @@ let
keyType = "ed25519";
cfg = config.virtualisation.darwin-builder;
in
{
@ -24,6 +26,57 @@ in
}
];
options.virtualisation.darwin-builder = with lib; {
diskSize = mkOption {
default = 20 * 1024;
type = types.int;
example = 30720;
description = "The maximum disk space allocated to the runner in MB";
};
memorySize = mkOption {
default = 3 * 1024;
type = types.int;
example = 8192;
description = "The runner's memory in MB";
};
min-free = mkOption {
default = 1024 * 1024 * 1024;
type = types.int;
example = 1073741824;
description = ''
The threshold (in bytes) of free disk space left at which to
start garbage collection on the runner
'';
};
max-free = mkOption {
default = 3 * 1024 * 1024 * 1024;
type = types.int;
example = 3221225472;
description = ''
The threshold (in bytes) of free disk space left at which to
stop garbage collection on the runner
'';
};
workingDirectory = mkOption {
default = ".";
type = types.str;
example = "/var/lib/darwin-builder";
description = ''
The working directory to use to run the script. When running
as part of a flake will need to be set to a non read-only filesystem.
'';
};
hostPort = mkOption {
default = 22;
type = types.int;
example = 31022;
description = ''
The localhost host port to forward TCP to the guest port.
'';
};
};
config = {
# The builder is not intended to be used interactively
documentation.enable = false;
@ -52,9 +105,9 @@ in
nix.settings = {
auto-optimise-store = true;
min-free = 1024 * 1024 * 1024;
min-free = cfg.min-free;
max-free = 3 * 1024 * 1024 * 1024;
max-free = cfg.max-free;
trusted-users = [ "root" user ];
};
@ -86,7 +139,13 @@ in
hostPkgs = config.virtualisation.host.pkgs;
script = hostPkgs.writeShellScriptBin "create-builder" ''
script = hostPkgs.writeShellScriptBin "create-builder" (
# When running as non-interactively as part of a DarwinConfiguration the working directory
# must be set to a writeable directory.
(if cfg.workingDirectory != "." then ''
${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
cd "${cfg.workingDirectory}"
'' else "") + ''
KEYS="''${KEYS:-./keys}"
${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
@ -98,8 +157,8 @@ in
if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
(set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
fi
KEYS="$(nix-store --add "$KEYS")" ${config.system.build.vm}/bin/run-nixos-vm
'';
KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${config.system.build.vm}/bin/run-nixos-vm
'');
in
script.overrideAttrs (old: {
@ -139,12 +198,12 @@ in
'';
virtualisation = {
diskSize = 20 * 1024;
diskSize = cfg.diskSize;
memorySize = 3 * 1024;
memorySize = cfg.memorySize;
forwardPorts = [
{ from = "host"; guest.port = 22; host.port = 22; }
{ from = "host"; guest.port = 22; host.port = cfg.hostPort; }
];
# Disable graphics for the builder since users will likely want to run it
@ -176,4 +235,5 @@ in
# restarted.
writableStoreUseTmpfs = false;
};
};
}

View file

@ -0,0 +1,84 @@
{ config
, lib
, pkgs
, ...
}:
with lib; let
cfg = config.programs.hyprland;
defaultHyprlandPackage = pkgs.hyprland.override {
enableXWayland = cfg.xwayland.enable;
hidpiXWayland = cfg.xwayland.hidpi;
nvidiaPatches = cfg.nvidiaPatches;
};
in
{
options.programs.hyprland = {
enable = mkEnableOption null // {
description = mdDoc ''
Hyprland, the dynamic tiling Wayland compositor that doesn't sacrifice on its looks.
You can manually launch Hyprland by executing {command}`Hyprland` on a TTY.
A configuration file will be generated in {file}`~/.config/hypr/hyprland.conf`.
See <https://wiki.hyprland.org> for more information.
'';
};
package = mkOption {
type = types.path;
default = defaultHyprlandPackage;
defaultText = literalExpression ''
pkgs.hyprland.override {
enableXWayland = config.programs.hyprland.xwayland.enable;
hidpiXWayland = config.programs.hyprland.xwayland.hidpi;
nvidiaPatches = config.programs.hyprland.nvidiaPatches;
}
'';
example = literalExpression "<Hyprland flake>.packages.<system>.default";
description = mdDoc ''
The Hyprland package to use.
Setting this option will make {option}`programs.hyprland.xwayland` and
{option}`programs.hyprland.nvidiaPatches` not work.
'';
};
xwayland = {
enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
hidpi = mkEnableOption null // {
description = mdDoc ''
Enable HiDPI XWayland, based on [XWayland MR 733](https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/733).
See <https://wiki.hyprland.org/Nix/Options-Overrides/#xwayland-hidpi> for more info.
'';
};
};
nvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
};
config = mkIf cfg.enable {
environment = {
systemPackages = [ cfg.package ];
};
fonts.enableDefaultFonts = mkDefault true;
hardware.opengl.enable = mkDefault true;
programs = {
dconf.enable = mkDefault true;
xwayland.enable = mkDefault true;
};
security.polkit.enable = true;
services.xserver.displayManager.sessionPackages = [ cfg.package ];
xdg.portal = {
enable = mkDefault true;
extraPortals = [
pkgs.xdg-desktop-portal-hyprland
];
};
};
}

View file

@ -0,0 +1,29 @@
{ config, lib, pkgs, ... }:
let
cfg = config.programs.minipro;
in
{
options = {
programs.minipro = {
enable = lib.mkEnableOption (lib.mdDoc "minipro") // {
description = lib.mdDoc ''
Installs minipro and its udev rules.
Users of the `plugdev` group can interact with connected MiniPRO chip programmers.
'';
};
package = lib.mkPackageOptionMD pkgs "minipro" { };
};
};
config = lib.mkIf cfg.enable {
users.groups.plugdev = { };
environment.systemPackages = [ cfg.package ];
services.udev.packages = [ cfg.package ];
};
meta = {
maintainers = with lib.maintainers; [ infinidoge ];
};
}

View file

@ -6,7 +6,7 @@ let
inherit (lib.attrsets) attrNames filterAttrs hasAttr mapAttrs mapAttrsToList optionalAttrs;
inherit (lib.modules) mkDefault mkIf;
inherit (lib.options) literalExpression mkEnableOption mkOption;
inherit (lib.strings) concatStringsSep optionalString toLower;
inherit (lib.strings) concatLines optionalString toLower;
inherit (lib.types) addCheck attrsOf lines nonEmptyStr nullOr package path port str strMatching submodule;
# Checks if given list of strings contains unique
@ -164,7 +164,7 @@ let
mkLine = k: v: k + optionalString (v!="") " ${v}";
lines = mapAttrsToList mkLine attrset;
in
concatStringsSep "\n" lines;
concatLines lines;
config.stanza = ''
server ${config.name}
${config.text}
@ -263,7 +263,7 @@ let
${optionalString (cfg.defaultServername!=null) "defaultserver ${cfg.defaultServername}"}
${concatStringsSep "\n" (mapAttrsToList (k: v: v.stanza) cfg.servers)}
${concatLines (mapAttrsToList (k: v: v.stanza) cfg.servers)}
'';
in

View file

@ -0,0 +1,258 @@
{
config,
lib,
pkgs,
...
}:
with lib; let
cfg = config.security.ipa;
pyBool = x:
if x
then "True"
else "False";
ldapConf = pkgs.writeText "ldap.conf" ''
# Turning this off breaks GSSAPI used with krb5 when rdns = false
SASL_NOCANON on
URI ldaps://${cfg.server}
BASE ${cfg.basedn}
TLS_CACERT /etc/ipa/ca.crt
'';
nssDb =
pkgs.runCommand "ipa-nssdb"
{
nativeBuildInputs = [pkgs.nss.tools];
} ''
mkdir -p $out
certutil -d $out -N --empty-password
certutil -d $out -A --empty-password -n "${cfg.realm} IPA CA" -t CT,C,C -i ${cfg.certificate}
'';
in {
options = {
security.ipa = {
enable = mkEnableOption (lib.mdDoc "FreeIPA domain integration");
certificate = mkOption {
type = types.package;
description = lib.mdDoc ''
IPA server CA certificate.
Use `nix-prefetch-url http://$server/ipa/config/ca.crt` to
obtain the file and the hash.
'';
example = literalExpression ''
pkgs.fetchurl {
url = http://ipa.example.com/ipa/config/ca.crt;
sha256 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
};
'';
};
domain = mkOption {
type = types.str;
example = "example.com";
description = lib.mdDoc "Domain of the IPA server.";
};
realm = mkOption {
type = types.str;
example = "EXAMPLE.COM";
description = lib.mdDoc "Kerberos realm.";
};
server = mkOption {
type = types.str;
example = "ipa.example.com";
description = lib.mdDoc "IPA Server hostname.";
};
basedn = mkOption {
type = types.str;
example = "dc=example,dc=com";
description = lib.mdDoc "Base DN to use when performing LDAP operations.";
};
offlinePasswords = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Whether to store offline passwords when the server is down.";
};
cacheCredentials = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Whether to cache credentials.";
};
ifpAllowedUids = mkOption {
type = types.listOf types.string;
default = ["root"];
description = lib.mdDoc "A list of users allowed to access the ifp dbus interface.";
};
dyndns = {
enable = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Whether to enable FreeIPA automatic hostname updates.";
};
interface = mkOption {
type = types.str;
example = "eth0";
default = "*";
description = lib.mdDoc "Network interface to perform hostname updates through.";
};
};
chromiumSupport = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Whether to whitelist the FreeIPA domain in Chromium.";
};
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = !config.krb5.enable;
message = "krb5 must be disabled through `krb5.enable` for FreeIPA integration to work.";
}
{
assertion = !config.users.ldap.enable;
message = "ldap must be disabled through `users.ldap.enable` for FreeIPA integration to work.";
}
];
environment.systemPackages = with pkgs; [krb5Full freeipa];
environment.etc = {
"ipa/default.conf".text = ''
[global]
basedn = ${cfg.basedn}
realm = ${cfg.realm}
domain = ${cfg.domain}
server = ${cfg.server}
host = ${config.networking.hostName}
xmlrpc_uri = https://${cfg.server}/ipa/xml
enable_ra = True
'';
"ipa/nssdb".source = nssDb;
"krb5.conf".text = ''
[libdefaults]
default_realm = ${cfg.realm}
dns_lookup_realm = false
dns_lookup_kdc = true
rdns = false
ticket_lifetime = 24h
forwardable = true
udp_preference_limit = 0
[realms]
${cfg.realm} = {
kdc = ${cfg.server}:88
master_kdc = ${cfg.server}:88
admin_server = ${cfg.server}:749
default_domain = ${cfg.domain}
pkinit_anchors = FILE:/etc/ipa/ca.crt
}
[domain_realm]
.${cfg.domain} = ${cfg.realm}
${cfg.domain} = ${cfg.realm}
${cfg.server} = ${cfg.realm}
[dbmodules]
${cfg.realm} = {
db_library = ${pkgs.freeipa}/lib/krb5/plugins/kdb/ipadb.so
}
'';
"openldap/ldap.conf".source = ldapConf;
};
environment.etc."chromium/policies/managed/freeipa.json" = mkIf cfg.chromiumSupport {
text = ''
{ "AuthServerWhitelist": "*.${cfg.domain}" }
'';
};
system.activationScripts.ipa = stringAfter ["etc"] ''
# libcurl requires a hard copy of the certificate
if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
rm -f /etc/ipa/ca.crt
cp ${cfg.certificate} /etc/ipa/ca.crt
fi
if [ ! -f /etc/krb5.keytab ]; then
cat <<EOF
In order to complete FreeIPA integration, please join the domain by completing the following steps:
1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
2. Join the domain and obtain the keytab file: ipa-join
3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
4. Restart sssd systemd service: sudo systemctl restart sssd
EOF
fi
'';
services.sssd.config = ''
[domain/${cfg.domain}]
id_provider = ipa
auth_provider = ipa
access_provider = ipa
chpass_provider = ipa
ipa_domain = ${cfg.domain}
ipa_server = _srv_, ${cfg.server}
ipa_hostname = ${config.networking.hostName}.${cfg.domain}
cache_credentials = ${pyBool cfg.cacheCredentials}
krb5_store_password_if_offline = ${pyBool cfg.offlinePasswords}
${optionalString ((toLower cfg.domain) != (toLower cfg.realm))
"krb5_realm = ${cfg.realm}"}
dyndns_update = ${pyBool cfg.dyndns.enable}
dyndns_iface = ${cfg.dyndns.interface}
ldap_tls_cacert = /etc/ipa/ca.crt
ldap_user_extra_attrs = mail:mail, sn:sn, givenname:givenname, telephoneNumber:telephoneNumber, lock:nsaccountlock
[sssd]
debug_level = 65510
services = nss, sudo, pam, ssh, ifp
domains = ${cfg.domain}
[nss]
homedir_substring = /home
[pam]
pam_pwd_expiration_warning = 3
pam_verbosity = 3
[sudo]
debug_level = 65510
[autofs]
[ssh]
[pac]
[ifp]
user_attributes = +mail, +telephoneNumber, +givenname, +sn, +lock
allowed_uids = ${concatStringsSep ", " cfg.ifpAllowedUids}
'';
services.ntp.servers = singleton cfg.server;
services.sssd.enable = true;
services.ntp.enable = true;
security.pki.certificateFiles = singleton cfg.certificate;
};
}

View file

@ -66,6 +66,7 @@ let
${mkKeepArgs cfg} \
${optionalString (cfg.prune.prefix != null) "--glob-archives ${escapeShellArg "${cfg.prune.prefix}*"}"} \
$extraPruneArgs
borg compact $extraArgs $extraCompactArgs
${cfg.postPrune}
'');
@ -638,6 +639,15 @@ in {
example = "--save-space";
};
extraCompactArgs = mkOption {
type = types.str;
description = lib.mdDoc ''
Additional arguments for {command}`borg compact`.
Can also be set at runtime using `$extraCompactArgs`.
'';
default = "";
example = "--cleanup-commits";
};
};
}
));

View file

@ -8,7 +8,8 @@ let
cfg = config.services.buildbot-master;
opt = options.services.buildbot-master;
python = cfg.package.pythonModule;
package = pkgs.python3.pkgs.toPythonModule cfg.package;
python = package.pythonModule;
escapeStr = escape [ "'" ];
@ -212,10 +213,10 @@ in {
package = mkOption {
type = types.package;
default = pkgs.python3Packages.buildbot-full;
defaultText = literalExpression "pkgs.python3Packages.buildbot-full";
default = pkgs.buildbot-full;
defaultText = literalExpression "pkgs.buildbot-full";
description = lib.mdDoc "Package to use for buildbot.";
example = literalExpression "pkgs.python3Packages.buildbot";
example = literalExpression "pkgs.buildbot";
};
packages = mkOption {
@ -255,7 +256,7 @@ in {
after = [ "network-online.target" ];
wantedBy = [ "multi-user.target" ];
path = cfg.packages ++ cfg.pythonPackages python.pkgs;
environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ cfg.package ])}/${python.sitePackages}";
environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}";
preStart = ''
mkdir -vp "${cfg.buildbotDir}"

View file

@ -8,7 +8,8 @@ let
cfg = config.services.buildbot-worker;
opt = options.services.buildbot-worker;
python = cfg.package.pythonModule;
package = pkgs.python3.pkgs.toPythonModule cfg.package;
python = package.pythonModule;
tacFile = pkgs.writeText "aur-buildbot-worker.tac" ''
import os
@ -129,7 +130,7 @@ in {
package = mkOption {
type = types.package;
default = pkgs.python3Packages.buildbot-worker;
default = pkgs.buildbot-worker;
defaultText = literalExpression "pkgs.python3Packages.buildbot-worker";
description = lib.mdDoc "Package to use for buildbot worker.";
example = literalExpression "pkgs.python2Packages.buildbot-worker";
@ -168,7 +169,7 @@ in {
after = [ "network.target" "buildbot-master.service" ];
wantedBy = [ "multi-user.target" ];
path = cfg.packages;
environment.PYTHONPATH = "${python.withPackages (p: [ cfg.package ])}/${python.sitePackages}";
environment.PYTHONPATH = "${python.withPackages (p: [ package ])}/${python.sitePackages}";
preStart = ''
mkdir -vp "${cfg.buildbotDir}/info"

View file

@ -636,6 +636,6 @@ in {
};
meta = {
maintainers = with lib.maintainers; [ patternspandemic jonringer erictapen ];
maintainers = with lib.maintainers; [ patternspandemic jonringer ];
};
}

View file

@ -2,10 +2,26 @@
with lib;
let
cfg = config.services.auto-cpufreq;
cfgFilename = "auto-cpufreq.conf";
cfgFile = format.generate cfgFilename cfg.settings;
format = pkgs.formats.ini {};
in {
options = {
services.auto-cpufreq = {
enable = mkEnableOption (lib.mdDoc "auto-cpufreq daemon");
settings = mkOption {
description = lib.mdDoc ''
Configuration for `auto-cpufreq`.
See its [example configuration file] for supported settings.
[example configuration file]: https://github.com/AdnanHodzic/auto-cpufreq/blob/master/auto-cpufreq.conf-example
'';
default = {};
type = types.submodule { freeformType = format.type; };
};
};
};
@ -18,6 +34,11 @@ in {
# Workaround for https://github.com/NixOS/nixpkgs/issues/81138
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ bash coreutils ];
serviceConfig.ExecStart = [
""
"${lib.getExe pkgs.auto-cpufreq} --config ${cfgFile}"
];
};
};
};

View file

@ -35,7 +35,10 @@ let
# ...
# } ];
usedPlatforms = config:
if isAttrs config then
# don't recurse into derivations possibly creating an infinite recursion
if isDerivation config then
[ ]
else if isAttrs config then
optional (config ? platform) config.platform
++ concatMap usedPlatforms (attrValues config)
else if isList config then
@ -505,6 +508,7 @@ in {
"mysensors"
"nad"
"numato"
"otbr"
"rflink"
"rfxtrx"
"scsgate"

View file

@ -0,0 +1,125 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.zeyple;
ini = pkgs.formats.ini { };
gpgHome = pkgs.runCommand "zeyple-gpg-home" { } ''
mkdir -p $out
for file in ${lib.concatStringsSep " " cfg.keys}; do
${config.programs.gnupg.package}/bin/gpg --homedir="$out" --import "$file"
done
# Remove socket files
rm -f $out/S.*
'';
in {
options.services.zeyple = {
enable = mkEnableOption (lib.mdDoc "Zeyple, an utility program to automatically encrypt outgoing emails with GPG");
user = mkOption {
type = types.str;
default = "zeyple";
description = lib.mdDoc ''
User to run Zeyple as.
::: {.note}
If left as the default value this user will automatically be created
on system activation, otherwise the sysadmin is responsible for
ensuring the user exists.
:::
'';
};
group = mkOption {
type = types.str;
default = "zeyple";
description = lib.mdDoc ''
Group to use to run Zeyple.
::: {.note}
If left as the default value this group will automatically be created
on system activation, otherwise the sysadmin is responsible for
ensuring the user exists.
:::
'';
};
settings = mkOption {
type = ini.type;
default = { };
description = lib.mdDoc ''
Zeyple configuration. refer to
<https://github.com/infertux/zeyple/blob/master/zeyple/zeyple.conf.example>
for details on supported values.
'';
};
keys = mkOption {
type = with types; listOf path;
description = lib.mdDoc "List of public key files that will be imported by gpg.";
};
rotateLogs = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Whether to enable rotation of log files.";
};
};
config = mkIf cfg.enable {
users.groups = optionalAttrs (cfg.group == "zeyple") { "${cfg.group}" = { }; };
users.users = optionalAttrs (cfg.user == "zeyple") {
"${cfg.user}" = {
isSystemUser = true;
group = cfg.group;
};
};
services.zeyple.settings = {
zeyple = mapAttrs (name: mkDefault) {
log_file = "/var/log/zeyple/zeyple.log";
force_encrypt = true;
};
gpg = mapAttrs (name: mkDefault) { home = "${gpgHome}"; };
relay = mapAttrs (name: mkDefault) {
host = "localhost";
port = 10026;
};
};
environment.etc."zeyple.conf".source = ini.generate "zeyple.conf" cfg.settings;
systemd.tmpfiles.rules = [ "f '${cfg.settings.zeyple.log_file}' 0600 ${cfg.user} ${cfg.group} - -" ];
services.logrotate = mkIf cfg.rotateLogs {
enable = true;
settings.zeyple = {
files = cfg.settings.zeyple.log_file;
frequency = "weekly";
rotate = 5;
compress = true;
copytruncate = true;
};
};
services.postfix.extraMasterConf = ''
zeyple unix - n n - - pipe
user=${cfg.user} argv=${pkgs.zeyple}/bin/zeyple ''${recipient}
localhost:${toString cfg.settings.relay.port} inet n - n - 10 smtpd
-o content_filter=
-o receive_override_options=no_unknown_recipient_checks,no_header_body_checks,no_milters
-o smtpd_helo_restrictions=
-o smtpd_client_restrictions=
-o smtpd_sender_restrictions=
-o smtpd_recipient_restrictions=permit_mynetworks,reject
-o mynetworks=127.0.0.0/8,[::1]/128
-o smtpd_authorized_xforward_hosts=127.0.0.0/8,[::1]/128
'';
services.postfix.extraConfig = "content_filter = zeyple";
};
}

View file

@ -159,6 +159,15 @@ in
'';
};
};
options.relay_api.database = {
connection_string = lib.mkOption {
type = lib.types.str;
default = "file:relayapi.db";
description = lib.mdDoc ''
Database for the Relay Server.
'';
};
};
options.media_api = {
database = {
connection_string = lib.mkOption {
@ -294,7 +303,7 @@ in
-o /run/dendrite/dendrite.yaml
''];
ExecStart = lib.strings.concatStringsSep " " ([
"${pkgs.dendrite}/bin/dendrite-monolith-server"
"${pkgs.dendrite}/bin/dendrite"
"--config /run/dendrite/dendrite.yaml"
] ++ lib.optionals (cfg.httpPort != null) [
"--http-bind-address :${builtins.toString cfg.httpPort}"

View file

@ -28,6 +28,12 @@ in
description = mdDoc "The host address the atuin server should listen on.";
};
maxHistoryLength = mkOption {
type = types.int;
default = 8192;
description = mdDoc "The max length of each history item the atuin server should store.";
};
port = mkOption {
type = types.port;
default = 8888;
@ -72,6 +78,7 @@ in
environment = {
ATUIN_HOST = cfg.host;
ATUIN_PORT = toString cfg.port;
ATUIN_MAX_HISTORY_LENGTH = toString cfg.maxHistoryLength;
ATUIN_OPEN_REGISTRATION = boolToString cfg.openRegistration;
ATUIN_DB_URI = "postgresql:///atuin";
ATUIN_PATH = cfg.path;

View file

@ -1,725 +0,0 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.gitit;
homeDir = "/var/lib/gitit";
toYesNo = b: if b then "yes" else "no";
gititShared = with cfg.haskellPackages; gitit + "/share/" + ghc.targetPrefix + ghc.haskellCompilerName + "/" + gitit.pname + "-" + gitit.version;
gititWithPkgs = hsPkgs: extras: hsPkgs.ghcWithPackages (self: with self; [ gitit ] ++ (extras self));
gititSh = hsPkgs: extras: with pkgs; let
env = gititWithPkgs hsPkgs extras;
in writeScript "gitit" ''
#!${runtimeShell}
cd $HOME
export NIX_GHC="${env}/bin/ghc"
export NIX_GHCPKG="${env}/bin/ghc-pkg"
export NIX_GHC_DOCDIR="${env}/share/doc/ghc/html"
export NIX_GHC_LIBDIR=$( $NIX_GHC --print-libdir )
${env}/bin/gitit -f ${configFile}
'';
gititOptions = {
enable = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Enable the gitit service.";
};
haskellPackages = mkOption {
default = pkgs.haskellPackages;
defaultText = literalExpression "pkgs.haskellPackages";
example = literalExpression "pkgs.haskell.packages.ghc784";
description = lib.mdDoc "haskellPackages used to build gitit and plugins.";
};
extraPackages = mkOption {
type = types.functionTo (types.listOf types.package);
default = self: [];
example = literalExpression ''
haskellPackages: [
haskellPackages.wreq
]
'';
description = lib.mdDoc ''
Extra packages available to ghc when running gitit. The
value must be a function which receives the attrset defined
in {var}`haskellPackages` as the sole argument.
'';
};
address = mkOption {
type = types.str;
default = "0.0.0.0";
description = lib.mdDoc "IP address on which the web server will listen.";
};
port = mkOption {
type = types.int;
default = 5001;
description = lib.mdDoc "Port on which the web server will run.";
};
wikiTitle = mkOption {
type = types.str;
default = "Gitit!";
description = lib.mdDoc "The wiki title.";
};
repositoryType = mkOption {
type = types.enum ["git" "darcs" "mercurial"];
default = "git";
description = lib.mdDoc "Specifies the type of repository used for wiki content.";
};
repositoryPath = mkOption {
type = types.path;
default = homeDir + "/wiki";
description = lib.mdDoc ''
Specifies the path of the repository directory. If it does not
exist, gitit will create it on startup.
'';
};
requireAuthentication = mkOption {
type = types.enum [ "none" "modify" "read" ];
default = "modify";
description = lib.mdDoc ''
If 'none', login is never required, and pages can be edited
anonymously. If 'modify', login is required to modify the wiki
(edit, add, delete pages, upload files). If 'read', login is
required to see any wiki pages.
'';
};
authenticationMethod = mkOption {
type = types.enum [ "form" "http" "generic" "github" ];
default = "form";
description = lib.mdDoc ''
'form' means that users will be logged in and registered using forms
in the gitit web interface. 'http' means that gitit will assume that
HTTP authentication is in place and take the logged in username from
the "Authorization" field of the HTTP request header (in addition,
the login/logout and registration links will be suppressed).
'generic' means that gitit will assume that some form of
authentication is in place that directly sets REMOTE_USER to the name
of the authenticated user (e.g. mod_auth_cas on apache). 'rpx' means
that gitit will attempt to log in through https://rpxnow.com. This
requires that 'rpx-domain', 'rpx-key', and 'base-url' be set below,
and that 'curl' be in the system path.
'';
};
userFile = mkOption {
type = types.path;
default = homeDir + "/gitit-users";
description = lib.mdDoc ''
Specifies the path of the file containing user login information. If
it does not exist, gitit will create it (with an empty user list).
This file is not used if 'http' is selected for
authentication-method.
'';
};
sessionTimeout = mkOption {
type = types.int;
default = 60;
description = lib.mdDoc ''
Number of minutes of inactivity before a session expires.
'';
};
staticDir = mkOption {
type = types.path;
default = gititShared + "/data/static";
description = lib.mdDoc ''
Specifies the path of the static directory (containing javascript,
css, and images). If it does not exist, gitit will create it and
populate it with required scripts, stylesheets, and images.
'';
};
defaultPageType = mkOption {
type = types.enum [ "markdown" "rst" "latex" "html" "markdown+lhs" "rst+lhs" "latex+lhs" ];
default = "markdown";
description = lib.mdDoc ''
Specifies the type of markup used to interpret pages in the wiki.
Possible values are markdown, rst, latex, html, markdown+lhs,
rst+lhs, and latex+lhs. (the +lhs variants treat the input as
literate Haskell. See pandoc's documentation for more details.) If
Markdown is selected, pandoc's syntax extensions (for footnotes,
delimited code blocks, etc.) will be enabled. Note that pandoc's
restructuredtext parser is not complete, so some pages may not be
rendered correctly if rst is selected. The same goes for latex and
html.
'';
};
math = mkOption {
type = types.enum [ "mathml" "raw" "mathjax" "jsmath" "google" ];
default = "mathml";
description = lib.mdDoc ''
Specifies how LaTeX math is to be displayed. Possible values are
mathml, raw, mathjax, jsmath, and google. If mathml is selected,
gitit will convert LaTeX math to MathML and link in a script,
MathMLinHTML.js, that allows the MathML to be seen in Gecko browsers,
IE + mathplayer, and Opera. In other browsers you may get a jumble of
characters. If raw is selected, the LaTeX math will be displayed as
raw LaTeX math. If mathjax is selected, gitit will link to the
remote mathjax script. If jsMath is selected, gitit will link to the
script /js/jsMath/easy/load.js, and will assume that jsMath has been
installed into the js/jsMath directory. This is the most portable
solution. If google is selected, the google chart API is called to
render the formula as an image. This requires a connection to google,
and might raise a technical or a privacy problem.
'';
};
mathJaxScript = mkOption {
type = types.str;
default = "https://d3eoax9i5htok0.cloudfront.net/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML";
description = lib.mdDoc ''
Specifies the path to MathJax rendering script. You might want to
use your own MathJax script to render formulas without Internet
connection or if you want to use some special LaTeX packages. Note:
path specified there cannot be an absolute path to a script on your
hdd, instead you should run your (local if you wish) HTTP server
which will serve the MathJax.js script. You can easily (in four lines
of code) serve MathJax.js using
http://happstack.com/docs/crashcourse/FileServing.html Do not forget
the "http://" prefix (e.g. http://localhost:1234/MathJax.js).
'';
};
showLhsBirdTracks = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Specifies whether to show Haskell code blocks in "bird style", with
"> " at the beginning of each line.
'';
};
templatesDir = mkOption {
type = types.path;
default = gititShared + "/data/templates";
description = lib.mdDoc ''
Specifies the path of the directory containing page templates. If it
does not exist, gitit will create it with default templates. Users
may wish to edit the templates to customize the appearance of their
wiki. The template files are HStringTemplate templates. Variables to
be interpolated appear between $\'s. Literal $\'s must be
backslash-escaped.
'';
};
logFile = mkOption {
type = types.path;
default = homeDir + "/gitit.log";
description = lib.mdDoc ''
Specifies the path of gitit's log file. If it does not exist, gitit
will create it. The log is in Apache combined log format.
'';
};
logLevel = mkOption {
type = types.enum [ "DEBUG" "INFO" "NOTICE" "WARNING" "ERROR" "CRITICAL" "ALERT" "EMERGENCY" ];
default = "ERROR";
description = lib.mdDoc ''
Determines how much information is logged. Possible values (from
most to least verbose) are DEBUG, INFO, NOTICE, WARNING, ERROR,
CRITICAL, ALERT, EMERGENCY.
'';
};
frontPage = mkOption {
type = types.str;
default = "Front Page";
description = lib.mdDoc ''
Specifies which wiki page is to be used as the wiki's front page.
Gitit creates a default front page on startup, if one does not exist
already.
'';
};
noDelete = mkOption {
type = types.str;
default = "Front Page, Help";
description = lib.mdDoc ''
Specifies pages that cannot be deleted through the web interface.
(They can still be deleted directly using git or darcs.) A
comma-separated list of page names. Leave blank to allow every page
to be deleted.
'';
};
noEdit = mkOption {
type = types.str;
default = "Help";
description = lib.mdDoc ''
Specifies pages that cannot be edited through the web interface.
Leave blank to allow every page to be edited.
'';
};
defaultSummary = mkOption {
type = types.str;
default = "";
description = lib.mdDoc ''
Specifies text to be used in the change description if the author
leaves the "description" field blank. If default-summary is blank
(the default), the author will be required to fill in the description
field.
'';
};
tableOfContents = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Specifies whether to print a tables of contents (with links to
sections) on each wiki page.
'';
};
plugins = mkOption {
type = with types; listOf str;
default = [ (gititShared + "/plugins/Dot.hs") ];
description = lib.mdDoc ''
Specifies a list of plugins to load. Plugins may be specified either
by their path or by their module name. If the plugin name starts
with Gitit.Plugin., gitit will assume that the plugin is an installed
module and will not try to find a source file.
'';
};
useCache = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Specifies whether to cache rendered pages. Note that if use-feed is
selected, feeds will be cached regardless of the value of use-cache.
'';
};
cacheDir = mkOption {
type = types.path;
default = homeDir + "/cache";
description = lib.mdDoc "Path where rendered pages will be cached.";
};
maxUploadSize = mkOption {
type = types.str;
default = "1000K";
description = lib.mdDoc ''
Specifies an upper limit on the size (in bytes) of files uploaded
through the wiki's web interface. To disable uploads, set this to
0K. This will result in the uploads link disappearing and the
_upload url becoming inactive.
'';
};
maxPageSize = mkOption {
type = types.str;
default = "1000K";
description = lib.mdDoc "Specifies an upper limit on the size (in bytes) of pages.";
};
debugMode = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc "Causes debug information to be logged while gitit is running.";
};
compressResponses = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc "Specifies whether HTTP responses should be compressed.";
};
mimeTypesFile = mkOption {
type = types.path;
default = "/etc/mime/types.info";
description = lib.mdDoc ''
Specifies the path of a file containing mime type mappings. Each
line of the file should contain two fields, separated by whitespace.
The first field is the mime type, the second is a file extension.
For example:
```
video/x-ms-wmx wmx
```
If the file is not found, some simple defaults will be used.
'';
};
useReCaptcha = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
If true, causes gitit to use the reCAPTCHA service
(http://recaptcha.net) to prevent bots from creating accounts.
'';
};
reCaptchaPrivateKey = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc ''
Specifies the private key for the reCAPTCHA service. To get
these, you need to create an account at http://recaptcha.net.
'';
};
reCaptchaPublicKey = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc ''
Specifies the public key for the reCAPTCHA service. To get
these, you need to create an account at http://recaptcha.net.
'';
};
accessQuestion = mkOption {
type = types.str;
default = "What is the code given to you by Ms. X?";
description = lib.mdDoc ''
Specifies a question that users must answer when they attempt to
create an account
'';
};
accessQuestionAnswers = mkOption {
type = types.str;
default = "RED DOG, red dog";
description = lib.mdDoc ''
Specifies a question that users must answer when they attempt to
create an account, along with a comma-separated list of acceptable
answers. This can be used to institute a rudimentary password for
signing up as a user on the wiki, or as an alternative to reCAPTCHA.
Example:
access-question: What is the code given to you by Ms. X?
access-question-answers: RED DOG, red dog
'';
};
rpxDomain = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc ''
Specifies the domain and key of your RPX account. The domain is just
the prefix of the complete RPX domain, so if your full domain is
'https://foo.rpxnow.com/', use 'foo' as the value of rpx-domain.
'';
};
rpxKey = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "RPX account access key.";
};
mailCommand = mkOption {
type = types.str;
default = "sendmail %s";
description = lib.mdDoc ''
Specifies the command to use to send notification emails. '%s' will
be replaced by the destination email address. The body of the
message will be read from stdin. If this field is left blank,
password reset will not be offered.
'';
};
resetPasswordMessage = mkOption {
type = types.lines;
default = ''
> From: gitit@$hostname$
> To: $useremail$
> Subject: Wiki password reset
>
> Hello $username$,
>
> To reset your password, please follow the link below:
> http://$hostname$:$port$$resetlink$
>
> Regards
'';
description = lib.mdDoc ''
Gives the text of the message that will be sent to the user should
she want to reset her password, or change other registration info.
The lines must be indented, and must begin with '>'. The initial
spaces and '> ' will be stripped off. $username$ will be replaced by
the user's username, $useremail$ by her email address, $hostname$ by
the hostname on which the wiki is running (as returned by the
hostname system call), $port$ by the port on which the wiki is
running, and $resetlink$ by the relative path of a reset link derived
from the user's existing hashed password. If your gitit wiki is being
proxied to a location other than the root path of $port$, you should
change the link to reflect this: for example, to
http://$hostname$/path/to/wiki$resetlink$ or
http://gitit.$hostname$$resetlink$
'';
};
useFeed = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Specifies whether an ATOM feed should be enabled (for the site and
for individual pages).
'';
};
baseUrl = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc ''
The base URL of the wiki, to be used in constructing feed IDs and RPX
token_urls. Set this if useFeed is false or authentication-method
is 'rpx'.
'';
};
absoluteUrls = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Make wikilinks absolute with respect to the base-url. So, for
example, in a wiki served at the base URL '/wiki', on a page
Sub/Page, the wikilink `[Cactus]()` will produce a link to
'/wiki/Cactus' if absoluteUrls is true, and a relative link to
'Cactus' (referring to '/wiki/Sub/Cactus') if absolute-urls is 'no'.
'';
};
feedDays = mkOption {
type = types.int;
default = 14;
description = lib.mdDoc "Number of days to be included in feeds.";
};
feedRefreshTime = mkOption {
type = types.int;
default = 60;
description = lib.mdDoc "Number of minutes to cache feeds before refreshing.";
};
pdfExport = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
If true, PDF will appear in export options. PDF will be created using
pdflatex, which must be installed and in the path. Note that PDF
exports create significant additional server load.
'';
};
pandocUserData = mkOption {
type = with types; nullOr path;
default = null;
description = lib.mdDoc ''
If a directory is specified, this will be searched for pandoc
customizations. These can include a templates/ directory for custom
templates for various export formats, an S5 directory for custom S5
styles, and a reference.odt for ODT exports. If no directory is
specified, $HOME/.pandoc will be searched. See pandoc's README for
more information.
'';
};
xssSanitize = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
If true, all HTML (including that produced by pandoc) is filtered
through xss-sanitize. Set to no only if you trust all of your users.
'';
};
oauthClientId = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "OAuth client ID";
};
oauthClientSecret = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "OAuth client secret";
};
oauthCallback = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "OAuth callback URL";
};
oauthAuthorizeEndpoint = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "OAuth authorize endpoint";
};
oauthAccessTokenEndpoint = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "OAuth access token endpoint";
};
githubOrg = mkOption {
type = with types; nullOr str;
default = null;
description = lib.mdDoc "Github organization";
};
};
configFile = pkgs.writeText "gitit.conf" ''
address: ${cfg.address}
port: ${toString cfg.port}
wiki-title: ${cfg.wikiTitle}
repository-type: ${cfg.repositoryType}
repository-path: ${cfg.repositoryPath}
require-authentication: ${cfg.requireAuthentication}
authentication-method: ${cfg.authenticationMethod}
user-file: ${cfg.userFile}
session-timeout: ${toString cfg.sessionTimeout}
static-dir: ${cfg.staticDir}
default-page-type: ${cfg.defaultPageType}
math: ${cfg.math}
mathjax-script: ${cfg.mathJaxScript}
show-lhs-bird-tracks: ${toYesNo cfg.showLhsBirdTracks}
templates-dir: ${cfg.templatesDir}
log-file: ${cfg.logFile}
log-level: ${cfg.logLevel}
front-page: ${cfg.frontPage}
no-delete: ${cfg.noDelete}
no-edit: ${cfg.noEdit}
default-summary: ${cfg.defaultSummary}
table-of-contents: ${toYesNo cfg.tableOfContents}
plugins: ${concatStringsSep "," cfg.plugins}
use-cache: ${toYesNo cfg.useCache}
cache-dir: ${cfg.cacheDir}
max-upload-size: ${cfg.maxUploadSize}
max-page-size: ${cfg.maxPageSize}
debug-mode: ${toYesNo cfg.debugMode}
compress-responses: ${toYesNo cfg.compressResponses}
mime-types-file: ${cfg.mimeTypesFile}
use-recaptcha: ${toYesNo cfg.useReCaptcha}
recaptcha-private-key: ${toString cfg.reCaptchaPrivateKey}
recaptcha-public-key: ${toString cfg.reCaptchaPublicKey}
access-question: ${cfg.accessQuestion}
access-question-answers: ${cfg.accessQuestionAnswers}
rpx-domain: ${toString cfg.rpxDomain}
rpx-key: ${toString cfg.rpxKey}
mail-command: ${cfg.mailCommand}
reset-password-message: ${cfg.resetPasswordMessage}
use-feed: ${toYesNo cfg.useFeed}
base-url: ${toString cfg.baseUrl}
absolute-urls: ${toYesNo cfg.absoluteUrls}
feed-days: ${toString cfg.feedDays}
feed-refresh-time: ${toString cfg.feedRefreshTime}
pdf-export: ${toYesNo cfg.pdfExport}
pandoc-user-data: ${toString cfg.pandocUserData}
xss-sanitize: ${toYesNo cfg.xssSanitize}
[Github]
oauthclientid: ${toString cfg.oauthClientId}
oauthclientsecret: ${toString cfg.oauthClientSecret}
oauthcallback: ${toString cfg.oauthCallback}
oauthauthorizeendpoint: ${toString cfg.oauthAuthorizeEndpoint}
oauthaccesstokenendpoint: ${toString cfg.oauthAccessTokenEndpoint}
github-org: ${toString cfg.githubOrg}
'';
in
{
options.services.gitit = gititOptions;
config = mkIf cfg.enable {
users.users.gitit = {
group = config.users.groups.gitit.name;
description = "Gitit user";
home = homeDir;
createHome = true;
uid = config.ids.uids.gitit;
};
users.groups.gitit.gid = config.ids.gids.gitit;
systemd.services.gitit = let
uid = toString config.ids.uids.gitit;
gid = toString config.ids.gids.gitit;
in {
description = "Git and Pandoc Powered Wiki";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = with pkgs; [ curl ]
++ optional cfg.pdfExport texlive.combined.scheme-basic
++ optional (cfg.repositoryType == "darcs") darcs
++ optional (cfg.repositoryType == "mercurial") mercurial
++ optional (cfg.repositoryType == "git") git;
preStart = let
gm = "gitit@${config.networking.hostName}";
in
with cfg; ''
chown ${uid}:${gid} -R ${homeDir}
for dir in ${repositoryPath} ${staticDir} ${templatesDir} ${cacheDir}
do
if [ ! -d $dir ]
then
mkdir -p $dir
find $dir -type d -exec chmod 0750 {} +
find $dir -type f -exec chmod 0640 {} +
fi
done
cd ${repositoryPath}
${
if repositoryType == "darcs" then
''
if [ ! -d _darcs ]
then
darcs initialize
echo "${gm}" > _darcs/prefs/email
''
else if repositoryType == "mercurial" then
''
if [ ! -d .hg ]
then
hg init
cat >> .hg/hgrc <<NAMED
[ui]
username = gitit ${gm}
NAMED
''
else
''
if [ ! -d .git ]
then
git init
git config user.email "${gm}"
git config user.name "gitit"
''}
chown ${uid}:${gid} -R ${repositoryPath}
fi
cd -
'';
serviceConfig = {
User = config.users.users.gitit.name;
Group = config.users.groups.gitit.name;
ExecStart = with cfg; gititSh haskellPackages extraPackages;
};
};
};
}

View file

@ -1,4 +1,4 @@
{ config, lib, pkgs, ... }:
{ config, lib, pkgs, utils, ... }:
with lib;
@ -8,12 +8,15 @@ let
gid = config.ids.gids.gpsd;
cfg = config.services.gpsd;
in
{
in {
###### interface
imports = [
(lib.mkRemovedOptionModule [ "services" "gpsd" "device" ]
"Use `services.gpsd.devices` instead.")
];
options = {
services.gpsd = {
@ -26,13 +29,17 @@ in
'';
};
device = mkOption {
type = types.str;
default = "/dev/ttyUSB0";
devices = mkOption {
type = types.listOf types.str;
default = [ "/dev/ttyUSB0" ];
description = lib.mdDoc ''
A device may be a local serial device for GPS input, or a URL of the form:
`[{dgpsip|ntrip}://][user:passwd@]host[:port][/stream]`
in which case it specifies an input source for DGPS or ntrip data.
List of devices that `gpsd` should subscribe to.
A device may be a local serial device for GPS input, or a
URL of the form:
`[{dgpsip|ntrip}://][user:passwd@]host[:port][/stream]` in
which case it specifies an input source for DGPS or ntrip
data.
'';
};
@ -89,13 +96,12 @@ in
};
###### implementation
config = mkIf cfg.enable {
users.users.gpsd =
{ inherit uid;
users.users.gpsd = {
inherit uid;
group = "gpsd";
description = "gpsd daemon user";
home = "/var/empty";
@ -109,13 +115,15 @@ in
after = [ "network.target" ];
serviceConfig = {
Type = "forking";
ExecStart = ''
ExecStart = let
devices = utils.escapeSystemdExecArgs cfg.devices;
in ''
${pkgs.gpsd}/sbin/gpsd -D "${toString cfg.debugLevel}" \
-S "${toString cfg.port}" \
${optionalString cfg.readonly "-b"} \
${optionalString cfg.nowait "-n"} \
${optionalString cfg.listenany "-G"} \
"${cfg.device}"
${devices}
'';
};
};

View file

@ -107,7 +107,7 @@ in {
];
services.grafana.settings.rendering = mkIf cfg.provisionGrafana {
url = "http://localhost:${toString cfg.settings.service.port}/render";
server_url = "http://localhost:${toString cfg.settings.service.port}/render";
callback_url = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
};

View file

@ -4,7 +4,8 @@
with import ./lib.nix { inherit config lib pkgs; };
let
inherit (lib) concatStringsSep literalExpression mkIf mkOption optionalString types;
inherit (lib) concatStringsSep literalExpression mkIf mkOption mkEnableOption
optionalString types;
bosConfig = pkgs.writeText "BosConfig" (''
restrictmode 1
@ -24,9 +25,15 @@ let
parm ${openafsSrv}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
parm ${openafsSrv}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
end
'') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable) ''
'') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable && (!cfg.roles.backup.enableFabs)) ''
bnode simple buserver 1
parm ${openafsSrv}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString (cfg.roles.backup.cellServDB != []) "-cellservdb /etc/openafs/backup/"}
parm ${openafsSrv}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString useBuCellServDB "-cellservdb /etc/openafs/backup/"}
end
'') + (optionalString (cfg.roles.database.enable &&
cfg.roles.backup.enable &&
cfg.roles.backup.enableFabs) ''
bnode simple buserver 1
parm ${lib.getBin pkgs.fabs}/bin/fabsys server --config ${fabsConfFile} ${cfg.roles.backup.fabsArgs}
end
''));
@ -34,12 +41,27 @@ let
pkgs.writeText "NetInfo" ((concatStringsSep "\nf " cfg.advertisedAddresses) + "\n")
else null;
buCellServDB = pkgs.writeText "backup-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.roles.backup.cellServDB);
buCellServDB = pkgs.writeText "backup-cellServDB-${cfg.cellName}"
(mkCellServDB cfg.cellName cfg.roles.backup.cellServDB);
useBuCellServDB = (cfg.roles.backup.cellServDB != []) && (!cfg.roles.backup.enableFabs);
cfg = config.services.openafsServer;
udpSizeStr = toString cfg.udpPacketSize;
fabsConfFile = pkgs.writeText "fabs.yaml" (builtins.toJSON ({
afs = {
aklog = cfg.package + "/bin/aklog";
cell = cfg.cellName;
dumpscan = cfg.package + "/bin/afsdump_scan";
fs = cfg.package + "/bin/fs";
pts = cfg.package + "/bin/pts";
vos = cfg.package + "/bin/vos";
};
k5start.command = (lib.getBin pkgs.kstart) + "/bin/k5start";
} // cfg.roles.backup.fabsExtraConfig));
in {
options = {
@ -80,8 +102,8 @@ in {
};
package = mkOption {
default = pkgs.openafs.server or pkgs.openafs;
defaultText = literalExpression "pkgs.openafs.server or pkgs.openafs";
default = pkgs.openafs;
defaultText = literalExpression "pkgs.openafs";
type = types.package;
description = lib.mdDoc "OpenAFS package for the server binaries";
};
@ -154,16 +176,20 @@ in {
};
backup = {
enable = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
Backup server role. Use in conjunction with the
enable = mkEnableOption (lib.mdDoc ''
Backup server role. When using OpenAFS built-in buserver, use in conjunction with the
`database` role to maintain the Backup
Database. Normally only used in conjunction with tape storage
or IBM's Tivoli Storage Manager.
'';
};
For a modern backup server, enable this role and see
{option}`enableFabs`.
'');
enableFabs = mkEnableOption (lib.mdDoc ''
FABS, the flexible AFS backup system. It stores volumes as dump files, relying on other
pre-existing backup solutions for handling them.
'');
buserverArgs = mkOption {
default = "";
@ -181,6 +207,30 @@ in {
other database server machines.
'';
};
fabsArgs = mkOption {
default = "";
type = types.str;
description = lib.mdDoc ''
Arguments to the fabsys process. See
{manpage}`fabsys_server(1)` and
{manpage}`fabsys_config(1)`.
'';
};
fabsExtraConfig = mkOption {
default = {};
type = types.attrs;
description = lib.mdDoc ''
Additional configuration parameters for the FABS backup server.
'';
example = literalExpression ''
{
afs.localauth = true;
afs.keytab = config.sops.secrets.fabsKeytab.path;
}
'';
};
};
};
@ -239,7 +289,7 @@ in {
mode = "0644";
};
buCellServDB = {
enable = (cfg.roles.backup.cellServDB != []);
enable = useBuCellServDB;
text = mkCellServDB cfg.cellName cfg.roles.backup.cellServDB;
target = "openafs/backup/CellServDB";
};
@ -257,7 +307,7 @@ in {
preStart = ''
mkdir -m 0755 -p /var/openafs
${optionalString (netInfo != null) "cp ${netInfo} /var/openafs/netInfo"}
${optionalString (cfg.roles.backup.cellServDB != []) "cp ${buCellServDB}"}
${optionalString useBuCellServDB "cp ${buCellServDB}"}
'';
serviceConfig = {
ExecStart = "${openafsBin}/bin/bosserver -nofork";

View file

@ -36,6 +36,17 @@ let
description = lib.mdDoc "Addresses who may request zone transfers.";
default = [ ];
};
allowQuery = mkOption {
type = types.listOf types.str;
description = lib.mdDoc ''
List of address ranges allowed to query this zone. Instead of the address(es), this may instead
contain the single string "any".
NOTE: This overrides the global-level `allow-query` setting, which is set to the contents
of `cachenetworks`.
'';
default = [ "any" ];
};
extraConfig = mkOption {
type = types.str;
description = lib.mdDoc "Extra zone config to be appended at the end of the zone section.";
@ -69,7 +80,7 @@ let
${cfg.extraConfig}
${ concatMapStrings
({ name, file, master ? true, slaves ? [], masters ? [], extraConfig ? "" }:
({ name, file, master ? true, slaves ? [], masters ? [], allowQuery ? [], extraConfig ? "" }:
''
zone "${name}" {
type ${if master then "master" else "slave"};
@ -87,6 +98,7 @@ let
};
''
}
allow-query { ${concatMapStrings (ip: "${ip}; ") allowQuery}};
${extraConfig}
};
'')
@ -119,7 +131,9 @@ in
description = lib.mdDoc ''
What networks are allowed to use us as a resolver. Note
that this is for recursive queries -- all networks are
allowed to query zones configured with the `zones` option.
allowed to query zones configured with the `zones` option
by default (although this may be overridden within each
zone's configuration, via the `allowQuery` option).
It is recommended that you limit cacheNetworks to avoid your
server being used for DNS amplification attacks.
'';

View file

@ -29,9 +29,9 @@ let
configFile = if (cfg.configFile != null) then cfg.configFile else configFile';
preStart = ''
install ${configFile} /run/${RuntimeDirectory}/ddclient.conf
install --mode=600 --owner=$USER ${configFile} /run/${RuntimeDirectory}/ddclient.conf
${lib.optionalString (cfg.configFile == null) (if (cfg.protocol == "nsupdate") then ''
install ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
install --mode=600 --owner=$USER ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
'' else if (cfg.passwordFile != null) then ''
"${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "${cfg.passwordFile}" "/run/${RuntimeDirectory}/ddclient.conf"
'' else ''
@ -218,6 +218,7 @@ with lib;
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
restartTriggers = optional (cfg.configFile != null) cfg.configFile;
path = lib.optional (lib.hasPrefix "if," cfg.use) pkgs.iproute2;
serviceConfig = {
DynamicUser = true;

View file

@ -81,9 +81,12 @@ let
noipv6
''}
${cfg.extraConfig}
${optionalString (config.networking.enableIPv6 && cfg.IPv6rs == null && staticIPv6Addresses != [ ]) noIPv6rs}
${optionalString (config.networking.enableIPv6 && cfg.IPv6rs == false) ''
noipv6rs
''}
${optionalString config.networking.enableIPv6 noIPv6rs}
${cfg.extraConfig}
'';
exitHook = pkgs.writeText "dhcpcd.exit-hook"
@ -160,6 +163,16 @@ in
'';
};
networking.dhcpcd.IPv6rs = mkOption {
type = types.nullOr types.bool;
default = null;
description = lib.mdDoc ''
Force enable or disable solicitation and receipt of IPv6 Router Advertisements.
This is required, for example, when using a static unique local IPv6 address (ULA)
and global IPv6 address auto-configuration with SLAAC.
'';
};
networking.dhcpcd.runHook = mkOption {
type = types.lines;
default = "";

View file

@ -218,6 +218,13 @@ in
systemd.services = dhcpdService "4" cfg4 // dhcpdService "6" cfg6;
warnings = [
''
The dhcpd4 and dhcpd6 modules will be removed from NixOS 23.11, because ISC DHCP reached its end of life.
See https://www.isc.org/blogs/isc-dhcp-eol/ for details.
Please switch to a different implementation like kea, systemd-networkd or dnsmasq.
''
];
};
}

View file

@ -42,7 +42,7 @@ let
configPath = pkgs.writeText "smokeping.conf" configFile;
cgiHome = pkgs.writeScript "smokeping.fcgi" ''
#!${pkgs.bash}/bin/bash
${cfg.package}/bin/smokeping_cgi ${configPath}
${cfg.package}/bin/smokeping_cgi /etc/smokeping.conf
'';
in
@ -307,6 +307,7 @@ in
source = "${pkgs.fping}/bin/fping";
};
};
environment.etc."smokeping.conf".source = configPath;
environment.systemPackages = [ pkgs.fping ];
users.users.${cfg.user} = {
isNormalUser = false;
@ -327,18 +328,23 @@ in
# Thus, we need to make `smokepingHome` (which is given to `thttpd -d` below) `755`.
homeMode = "755";
};
users.groups.${cfg.user} = {};
users.groups.${cfg.user} = { };
systemd.services.smokeping = {
requiredBy = [ "multi-user.target"];
reloadTriggers = [ configPath ];
requiredBy = [ "multi-user.target" ];
serviceConfig = {
User = cfg.user;
Restart = "on-failure";
ExecStart = "${cfg.package}/bin/smokeping --config=${configPath} --nodaemon";
ExecStart = "${cfg.package}/bin/smokeping --config=/etc/smokeping.conf --nodaemon";
};
preStart = ''
mkdir -m 0755 -p ${smokepingHome}/cache ${smokepingHome}/data
rm -f ${smokepingHome}/cropper
ln -s ${cfg.package}/htdocs/cropper ${smokepingHome}/cropper
rm -f ${smokepingHome}/css
ln -s ${cfg.package}/htdocs/css ${smokepingHome}/css
rm -f ${smokepingHome}/js
ln -s ${cfg.package}/htdocs/js ${smokepingHome}/js
rm -f ${smokepingHome}/smokeping.fcgi
ln -s ${cgiHome} ${smokepingHome}/smokeping.fcgi
${cfg.package}/bin/smokeping --check --config=${configPath}

View file

@ -194,5 +194,5 @@ in
(mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
];
meta.maintainers = with lib.maintainers; [ erictapen pennae ];
meta.maintainers = with lib.maintainers; [ pennae ];
}

View file

@ -0,0 +1,429 @@
{ config, lib, options, pkgs, utils, ... }:
with lib;
let
cfg = config.services.wstunnel;
attrsToArgs = attrs: utils.escapeSystemdExecArgs (
mapAttrsToList
(name: value: if value == true then "--${name}" else "--${name}=${value}")
attrs
);
hostPortSubmodule = {
options = {
host = mkOption {
description = mdDoc "The hostname.";
type = types.str;
};
port = mkOption {
description = mdDoc "The port.";
type = types.port;
};
};
};
localRemoteSubmodule = {
options = {
local = mkOption {
description = mdDoc "Local address and port to listen on.";
type = types.submodule hostPortSubmodule;
example = {
host = "127.0.0.1";
port = 51820;
};
};
remote = mkOption {
description = mdDoc "Address and port on remote to forward traffic to.";
type = types.submodule hostPortSubmodule;
example = {
host = "127.0.0.1";
port = 51820;
};
};
};
};
hostPortToString = { host, port }: "${host}:${builtins.toString port}";
localRemoteToString = { local, remote }: utils.escapeSystemdExecArg "${hostPortToString local}:${hostPortToString remote}";
commonOptions = {
enable = mkOption {
description = mdDoc "Whether to enable this `wstunnel` instance.";
type = types.bool;
default = true;
};
package = mkPackageOptionMD pkgs "wstunnel" {};
autoStart = mkOption {
description = mdDoc "Whether this tunnel server should be started automatically.";
type = types.bool;
default = true;
};
extraArgs = mkOption {
description = mdDoc "Extra command line arguments to pass to `wstunnel`. Attributes of the form `argName = true;` will be translated to `--argName`, and `argName = \"value\"` to `--argName=value`.";
type = with types; attrsOf (either str bool);
default = {};
example = {
"someNewOption" = true;
"someNewOptionWithValue" = "someValue";
};
};
verboseLogging = mkOption {
description = mdDoc "Enable verbose logging.";
type = types.bool;
default = false;
};
environmentFile = mkOption {
description = mdDoc "Environment file to be passed to the systemd service. Useful for passing secrets to the service to prevent them from being world-readable in the Nix store. Note however that the secrets are passed to `wstunnel` through the command line, which makes them locally readable for all users of the system at runtime.";
type = types.nullOr types.path;
default = null;
example = "/var/lib/secrets/wstunnelSecrets";
};
};
serverSubmodule = { config, ...}: {
options = commonOptions // {
listen = mkOption {
description = mdDoc "Address and port to listen on. Setting the port to a value below 1024 will also give the process the required `CAP_NET_BIND_SERVICE` capability.";
type = types.submodule hostPortSubmodule;
default = {
address = "0.0.0.0";
port = if config.enableHTTPS then 443 else 80;
};
defaultText = literalExpression ''
{
address = "0.0.0.0";
port = if enableHTTPS then 443 else 80;
}
'';
};
restrictTo = mkOption {
description = mdDoc "Accepted traffic will be forwarded only to this service. Set to `null` to allow forwarding to arbitrary addresses.";
type = types.nullOr (types.submodule hostPortSubmodule);
example = {
host = "127.0.0.1";
port = 51820;
};
};
enableHTTPS = mkOption {
description = mdDoc "Use HTTPS for the tunnel server.";
type = types.bool;
default = true;
};
tlsCertificate = mkOption {
description = mdDoc "TLS certificate to use instead of the hardcoded one in case of HTTPS connections. Use together with `tlsKey`.";
type = types.nullOr types.path;
default = null;
example = "/var/lib/secrets/cert.pem";
};
tlsKey = mkOption {
description = mdDoc "TLS key to use instead of the hardcoded on in case of HTTPS connections. Use together with `tlsCertificate`.";
type = types.nullOr types.path;
default = null;
example = "/var/lib/secrets/key.pem";
};
useACMEHost = mkOption {
description = mdDoc "Use a certificate generated by the NixOS ACME module for the given host. Note that this will not generate a new certificate - you will need to do so with `security.acme.certs`.";
type = types.nullOr types.str;
default = null;
example = "example.com";
};
};
};
clientSubmodule = { config, ... }: {
options = commonOptions // {
connectTo = mkOption {
description = mdDoc "Server address and port to connect to.";
type = types.submodule hostPortSubmodule;
example = {
host = "example.com";
};
};
enableHTTPS = mkOption {
description = mdDoc "Enable HTTPS when connecting to the server.";
type = types.bool;
default = true;
};
localToRemote = mkOption {
description = mdDoc "Local hosts and ports to listen on, plus the hosts and ports on remote to forward traffic to. Setting a local port to a value less than 1024 will additionally give the process the required CAP_NET_BIND_SERVICE capability.";
type = types.listOf (types.submodule localRemoteSubmodule);
default = [];
example = [ {
local = {
host = "127.0.0.1";
port = 8080;
};
remote = {
host = "127.0.0.1";
port = 8080;
};
} ];
};
dynamicToRemote = mkOption {
description = mdDoc "Host and port for the SOCKS5 proxy to dynamically forward traffic to. Leave this at `null` to disable the SOCKS5 proxy. Setting the port to a value less than 1024 will additionally give the service the required CAP_NET_BIND_SERVICE capability.";
type = types.nullOr (types.submodule hostPortSubmodule);
default = null;
example = {
host = "127.0.0.1";
port = 1080;
};
};
udp = mkOption {
description = mdDoc "Whether to forward UDP instead of TCP traffic.";
type = types.bool;
default = false;
};
udpTimeout = mkOption {
description = mdDoc "When using UDP forwarding, timeout in seconds after which the tunnel connection is closed. `-1` means no timeout.";
type = types.int;
default = 30;
};
httpProxy = mkOption {
description = mdDoc ''
Proxy to use to connect to the wstunnel server (`USER:PASS@HOST:PORT`).
::: {.warning}
Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `PROXY_PASSWORD=<your-password-here>` and set this option to `<user>:$PROXY_PASSWORD@<host>:<port>`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
:::
'';
type = types.nullOr types.str;
default = null;
};
soMark = mkOption {
description = mdDoc "Mark network packets with the SO_MARK sockoption with the specified value. Setting this option will also enable the required `CAP_NET_ADMIN` capability for the systemd service.";
type = types.nullOr types.int;
default = null;
};
upgradePathPrefix = mkOption {
description = mdDoc "Use a specific HTTP path prefix that will show up in the upgrade request to the `wstunnel` server. Useful when running `wstunnel` behind a reverse proxy.";
type = types.nullOr types.str;
default = null;
example = "wstunnel";
};
hostHeader = mkOption {
description = mdDoc "Use this as the HTTP host header instead of the real hostname. Useful for circumventing hostname-based firewalls.";
type = types.nullOr types.str;
default = null;
};
tlsSNI = mkOption {
description = mdDoc "Use this as the SNI while connecting via TLS. Useful for circumventing hostname-based firewalls.";
type = types.nullOr types.str;
default = null;
};
tlsVerifyCertificate = mkOption {
description = mdDoc "Whether to verify the TLS certificate of the server. It might be useful to set this to `false` when working with the `tlsSNI` option.";
type = types.bool;
default = true;
};
# The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
websocketPingInterval = mkOption {
description = mdDoc "Do a heartbeat ping every N seconds to keep up the websocket connection.";
type = types.nullOr types.ints.unsigned;
default = null;
};
upgradeCredentials = mkOption {
description = mdDoc ''
Use these credentials to authenticate during the HTTP upgrade request (Basic authorization type, `USER:[PASS]`).
::: {.warning}
Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `HTTP_PASSWORD=<your-password-here>` and set this option to `<user>:$HTTP_PASSWORD`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
:::
'';
type = types.nullOr types.str;
default = null;
};
customHeaders = mkOption {
description = mdDoc "Custom HTTP headers to send during the upgrade request.";
type = types.attrsOf types.str;
default = {};
example = {
"X-Some-Header" = "some-value";
};
};
};
};
generateServerUnit = name: serverCfg: {
name = "wstunnel-server-${name}";
value = {
description = "wstunnel server - ${name}";
requires = [ "network.target" "network-online.target" ];
after = [ "network.target" "network-online.target" ];
wantedBy = optional serverCfg.autoStart "multi-user.target";
serviceConfig = let
certConfig = config.security.acme.certs."${serverCfg.useACMEHost}";
in {
Type = "simple";
ExecStart = with serverCfg; let
resolvedTlsCertificate = if useACMEHost != null
then "${certConfig.directory}/fullchain.pem"
else tlsCertificate;
resolvedTlsKey = if useACMEHost != null
then "${certConfig.directory}/key.pem"
else tlsKey;
in ''
${package}/bin/wstunnel \
--server \
${optionalString (restrictTo != null) "--restrictTo=${utils.escapeSystemdExecArg (hostPortToString restrictTo)}"} \
${optionalString (resolvedTlsCertificate != null) "--tlsCertificate=${utils.escapeSystemdExecArg resolvedTlsCertificate}"} \
${optionalString (resolvedTlsKey != null) "--tlsKey=${utils.escapeSystemdExecArg resolvedTlsKey}"} \
${optionalString verboseLogging "--verbose"} \
${attrsToArgs extraArgs} \
${utils.escapeSystemdExecArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString listen}"}
'';
EnvironmentFile = optional (serverCfg.environmentFile != null) serverCfg.environmentFile;
DynamicUser = true;
SupplementaryGroups = optional (serverCfg.useACMEHost != null) certConfig.group;
PrivateTmp = true;
AmbientCapabilities = optional (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
NoNewPrivileges = true;
RestrictNamespaces = "uts ipc pid user cgroup";
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
PrivateDevices = true;
RestrictSUIDSGID = true;
};
};
};
generateClientUnit = name: clientCfg: {
name = "wstunnel-client-${name}";
value = {
description = "wstunnel client - ${name}";
requires = [ "network.target" "network-online.target" ];
after = [ "network.target" "network-online.target" ];
wantedBy = optional clientCfg.autoStart "multi-user.target";
serviceConfig = {
Type = "simple";
ExecStart = with clientCfg; ''
${package}/bin/wstunnel \
${concatStringsSep " " (builtins.map (x: "--localToRemote=${localRemoteToString x}") localToRemote)} \
${concatStringsSep " " (mapAttrsToList (n: v: "--customHeaders=\"${n}: ${v}\"") customHeaders)} \
${optionalString (dynamicToRemote != null) "--dynamicToRemote=${utils.escapeSystemdExecArg (hostPortToString dynamicToRemote)}"} \
${optionalString udp "--udp"} \
${optionalString (httpProxy != null) "--httpProxy=${httpProxy}"} \
${optionalString (soMark != null) "--soMark=${toString soMark}"} \
${optionalString (upgradePathPrefix != null) "--upgradePathPrefix=${upgradePathPrefix}"} \
${optionalString (hostHeader != null) "--hostHeader=${hostHeader}"} \
${optionalString (tlsSNI != null) "--tlsSNI=${tlsSNI}"} \
${optionalString tlsVerifyCertificate "--tlsVerifyCertificate"} \
${optionalString (websocketPingInterval != null) "--websocketPingFrequency=${toString websocketPingInterval}"} \
${optionalString (upgradeCredentials != null) "--upgradeCredentials=${upgradeCredentials}"} \
--udpTimeoutSec=${toString udpTimeout} \
${optionalString verboseLogging "--verbose"} \
${attrsToArgs extraArgs} \
${utils.escapeSystemdExecArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString connectTo}"}
'';
EnvironmentFile = optional (clientCfg.environmentFile != null) clientCfg.environmentFile;
DynamicUser = true;
PrivateTmp = true;
AmbientCapabilities = (optional (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]) ++ (optional ((clientCfg.dynamicToRemote.port or 1024) < 1024 || (any (x: x.local.port < 1024) clientCfg.localToRemote)) [ "CAP_NET_BIND_SERVICE" ]);
NoNewPrivileges = true;
RestrictNamespaces = "uts ipc pid user cgroup";
ProtectSystem = "strict";
ProtectHome = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectControlGroups = true;
PrivateDevices = true;
RestrictSUIDSGID = true;
};
};
};
in {
options.services.wstunnel = {
enable = mkEnableOption (mdDoc "wstunnel");
servers = mkOption {
description = mdDoc "`wstunnel` servers to set up.";
type = types.attrsOf (types.submodule serverSubmodule);
default = {};
example = {
"wg-tunnel" = {
listen.port = 8080;
enableHTTPS = true;
tlsCertificate = "/var/lib/secrets/fullchain.pem";
tlsKey = "/var/lib/secrets/key.pem";
restrictTo = {
host = "127.0.0.1";
port = 51820;
};
};
};
};
clients = mkOption {
description = mdDoc "`wstunnel` clients to set up.";
type = types.attrsOf (types.submodule clientSubmodule);
default = {};
example = {
"wg-tunnel" = {
connectTo = {
host = "example.com";
port = 8080;
};
enableHTTPS = true;
localToRemote = {
local = {
host = "127.0.0.1";
port = 51820;
};
remote = {
host = "127.0.0.1";
port = 51820;
};
};
udp = true;
};
};
};
};
config = mkIf cfg.enable {
systemd.services = (mapAttrs' generateServerUnit (filterAttrs (n: v: v.enable) cfg.servers)) // (mapAttrs' generateClientUnit (filterAttrs (n: v: v.enable) cfg.clients));
assertions = (mapAttrsToList (name: serverCfg: {
assertion = !(serverCfg.useACMEHost != null && (serverCfg.tlsCertificate != null || serverCfg.tlsKey != null));
message = ''
Options services.wstunnel.servers."${name}".useACMEHost and services.wstunnel.servers."${name}".{tlsCertificate, tlsKey} are mutually exclusive.
'';
}) cfg.servers) ++
(mapAttrsToList (name: serverCfg: {
assertion = !((serverCfg.tlsCertificate != null || serverCfg.tlsKey != null) && !(serverCfg.tlsCertificate != null && serverCfg.tlsKey != null));
message = ''
services.wstunnel.servers."${name}".tlsCertificate and services.wstunnel.servers."${name}".tlsKey need to be set together.
'';
}) cfg.servers) ++
(mapAttrsToList (name: clientCfg: {
assertion = !(clientCfg.localToRemote == [] && clientCfg.dynamicToRemote == null);
message = ''
Either one of services.wstunnel.clients."${name}".localToRemote or services.wstunnel.clients."${name}".dynamicToRemote must be set.
'';
}) cfg.clients);
};
meta.maintainers = with maintainers; [ alyaeanyx ];
}

View file

@ -0,0 +1,199 @@
{ config, lib, pkgs, utils, ... }:
let
inherit (lib) attrValues concatStringsSep filterAttrs length listToAttrs literalExpression
makeSearchPathOutput mkEnableOption mkIf mkOption nameValuePair optionals types;
inherit (utils) escapeSystemdPath;
cfg = config.services.v4l2-relayd;
kernelPackages = config.boot.kernelPackages;
gst = (with pkgs.gst_all_1; [
gst-plugins-bad
gst-plugins-base
gst-plugins-good
gstreamer.out
]);
instanceOpts = { name, ... }: {
options = {
enable = mkEnableOption (lib.mdDoc "this v4l2-relayd instance");
name = mkOption {
type = types.str;
default = name;
description = lib.mdDoc ''
The name of the instance.
'';
};
cardLabel = mkOption {
type = types.str;
description = lib.mdDoc ''
The name the camera will show up as.
'';
};
extraPackages = mkOption {
type = with types; listOf package;
default = [ ];
description = lib.mdDoc ''
Extra packages to add to {env}`GST_PLUGIN_PATH` for the instance.
'';
};
input = {
pipeline = mkOption {
type = types.str;
description = lib.mdDoc ''
The gstreamer-pipeline to use for the input-stream.
'';
};
format = mkOption {
type = types.str;
default = "YUY2";
description = lib.mdDoc ''
The video-format to read from input-stream.
'';
};
width = mkOption {
type = types.ints.positive;
default = 1280;
description = lib.mdDoc ''
The width to read from input-stream.
'';
};
height = mkOption {
type = types.ints.positive;
default = 720;
description = lib.mdDoc ''
The height to read from input-stream.
'';
};
framerate = mkOption {
type = types.ints.positive;
default = 30;
description = lib.mdDoc ''
The framerate to read from input-stream.
'';
};
};
output = {
format = mkOption {
type = types.str;
default = "YUY2";
description = lib.mdDoc ''
The video-format to write to output-stream.
'';
};
};
};
};
in
{
options.services.v4l2-relayd = {
instances = mkOption {
type = with types; attrsOf (submodule instanceOpts);
default = { };
example = literalExpression ''
{
example = {
cardLabel = "Example card";
input.pipeline = "videotestsrc";
};
}
'';
description = lib.mdDoc ''
v4l2-relayd instances to be created.
'';
};
};
config =
let
mkInstanceService = instance: {
description = "Streaming relay for v4l2loopback using GStreamer";
after = [ "modprobe@v4l2loopback.service" "systemd-logind.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "simple";
Restart = "always";
PrivateNetwork = true;
PrivateTmp = true;
LimitNPROC = 1;
};
environment = {
GST_PLUGIN_PATH = makeSearchPathOutput "lib" "lib/gstreamer-1.0" (gst ++ instance.extraPackages);
V4L2_DEVICE_FILE = "/run/v4l2-relayd-${instance.name}/device";
};
script =
let
appsrcOptions = concatStringsSep "," [
"caps=video/x-raw"
"format=${instance.input.format}"
"width=${toString instance.input.width}"
"height=${toString instance.input.height}"
"framerate=${toString instance.input.framerate}/1"
];
outputPipeline = [
"appsrc name=appsrc ${appsrcOptions}"
"videoconvert"
] ++ optionals (instance.input.format != instance.output.format) [
"video/x-raw,format=${instance.output.format}"
"queue"
] ++ [ "v4l2sink name=v4l2sink device=$(cat $V4L2_DEVICE_FILE)" ];
in
''
exec ${pkgs.v4l2-relayd}/bin/v4l2-relayd -i "${instance.input.pipeline}" -o "${concatStringsSep " ! " outputPipeline}"
'';
preStart = ''
mkdir -p $(dirname $V4L2_DEVICE_FILE)
${kernelPackages.v4l2loopback.bin}/bin/v4l2loopback-ctl add -x 1 -n "${instance.cardLabel}" > $V4L2_DEVICE_FILE
'';
postStop = ''
${kernelPackages.v4l2loopback.bin}/bin/v4l2loopback-ctl delete $(cat $V4L2_DEVICE_FILE)
rm -rf $(dirname $V4L2_DEVICE_FILE)
'';
};
mkInstanceServices = instances: listToAttrs (map
(instance:
nameValuePair "v4l2-relayd-${escapeSystemdPath instance.name}" (mkInstanceService instance)
)
instances);
enabledInstances = attrValues (filterAttrs (n: v: v.enable) cfg.instances);
in
{
boot = mkIf ((length enabledInstances) > 0) {
extraModulePackages = [ kernelPackages.v4l2loopback ];
kernelModules = [ "v4l2loopback" ];
};
systemd.services = mkInstanceServices enabledInstances;
};
meta.maintainers = with lib.maintainers; [ betaboon ];
}

View file

@ -0,0 +1,106 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.chatgpt-retrieval-plugin;
in
{
options.services.chatgpt-retrieval-plugin = {
enable = mkEnableOption (lib.mdDoc "chatgpt-retrieval-plugin service");
port = mkOption {
type = types.port;
default = 8080;
description = lib.mdDoc "Port the chatgpt-retrieval-plugin service listens on.";
};
host = mkOption {
type = types.str;
default = "127.0.0.1";
example = "0.0.0.0";
description = lib.mdDoc "The hostname or IP address for chatgpt-retrieval-plugin to bind to.";
};
bearerTokenPath = mkOption {
type = types.path;
description = lib.mdDoc ''
Path to the secret bearer token used for the http api authentication.
'';
default = "";
example = "config.age.secrets.CHATGPT_RETRIEVAL_PLUGIN_BEARER_TOKEN.path";
};
openaiApiKeyPath = mkOption {
type = types.path;
description = lib.mdDoc ''
Path to the secret openai api key used for embeddings.
'';
default = "";
example = "config.age.secrets.CHATGPT_RETRIEVAL_PLUGIN_OPENAI_API_KEY.path";
};
datastore = mkOption {
type = types.enum [ "pinecone" "weaviate" "zilliz" "milvus" "qdrant" "redis" ];
default = "qdrant";
description = lib.mdDoc "This specifies the vector database provider you want to use to store and query embeddings.";
};
qdrantCollection = mkOption {
type = types.str;
description = lib.mdDoc ''
name of the qdrant collection used to store documents.
'';
default = "document_chunks";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.bearerTokenPath != "";
message = "services.chatgpt-retrieval-plugin.bearerTokenPath should not be an empty string.";
}
{
assertion = cfg.openaiApiKeyPath != "";
message = "services.chatgpt-retrieval-plugin.openaiApiKeyPath should not be an empty string.";
}
];
systemd.services.chatgpt-retrieval-plugin = {
description = "ChatGPT Retrieval Plugin";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
LoadCredential = [
"BEARER_TOKEN:${cfg.bearerTokenPath}"
"OPENAI_API_KEY:${cfg.openaiApiKeyPath}"
];
StateDirectory = "chatgpt-retrieval-plugin";
StateDirectoryMode = "0755";
};
# it doesn't make sense to pass secrets as env vars, this is a hack until
# upstream has proper secret management.
script = ''
export BEARER_TOKEN=$(${pkgs.systemd}/bin/systemd-creds cat BEARER_TOKEN)
export OPENAI_API_KEY=$(${pkgs.systemd}/bin/systemd-creds cat OPENAI_API_KEY)
exec ${pkgs.chatgpt-retrieval-plugin}/bin/start --host ${cfg.host} --port ${toString cfg.port}
'';
environment = {
DATASTORE = cfg.datastore;
QDRANT_COLLECTION = mkIf (cfg.datastore == "qdrant") cfg.qdrantCollection;
};
};
systemd.tmpfiles.rules = [
# create the directory for static files for fastapi
"C /var/lib/chatgpt-retrieval-plugin/.well-known - - - - ${pkgs.chatgpt-retrieval-plugin}/${pkgs.python3Packages.python.sitePackages}/.well-known"
];
};
}

View file

@ -36,7 +36,8 @@ let
);
'';
configFile = pkgs.writeText "config.php" ''
${strings.fileContents "${pkgs.cloudlog}/install/config/config.php"}
<?php
include('${pkgs.cloudlog}/install/config/config.php');
$config['datadir'] = "${cfg.dataDir}/";
$config['base_url'] = "${cfg.baseUrl}";
${cfg.extraConfig}

View file

@ -48,6 +48,8 @@ let
# User and group
User = cfg.user;
Group = cfg.group;
# Working directory
WorkingDirectory = cfg.package;
# State directory and mode
StateDirectory = "mastodon";
StateDirectoryMode = "0750";
@ -110,6 +112,37 @@ let
$sudo ${cfg.package}/bin/tootctl "$@"
'';
sidekiqUnits = lib.attrsets.mapAttrs' (name: processCfg:
lib.nameValuePair "mastodon-sidekiq-${name}" (let
jobClassArgs = toString (builtins.map (c: "-q ${c}") processCfg.jobClasses);
jobClassLabel = toString ([""] ++ processCfg.jobClasses);
threads = toString (if processCfg.threads == null then cfg.sidekiqThreads else processCfg.threads);
in {
after = [ "network.target" "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
requires = [ "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
description = "Mastodon sidekiq${jobClassLabel}";
wantedBy = [ "mastodon.target" ];
environment = env // {
PORT = toString(cfg.sidekiqPort);
DB_POOL = threads;
};
serviceConfig = {
ExecStart = "${cfg.package}/bin/sidekiq ${jobClassArgs} -c ${threads} -r ${cfg.package}";
Restart = "always";
RestartSec = 20;
EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
WorkingDirectory = cfg.package;
# System Call Filtering
SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
} // cfgService;
path = with pkgs; [ file imagemagick ffmpeg ];
})
) cfg.sidekiqProcesses;
in {
options = {
@ -195,12 +228,53 @@ in {
type = lib.types.port;
default = 55002;
};
sidekiqThreads = lib.mkOption {
description = lib.mdDoc "Worker threads used by the mastodon-sidekiq service.";
description = lib.mdDoc "Worker threads used by the mastodon-sidekiq-all service. If `sidekiqProcesses` is configured and any processes specify null `threads`, this value is used.";
type = lib.types.int;
default = 25;
};
sidekiqProcesses = lib.mkOption {
description = lib.mdDoc "How many Sidekiq processes should be used to handle background jobs, and which job classes they handle. *Read the [upstream documentation](https://docs.joinmastodon.org/admin/scaling/#sidekiq) before configuring this!*";
type = with lib.types; attrsOf (submodule {
options = {
jobClasses = lib.mkOption {
type = listOf (enum [ "default" "push" "pull" "mailers" "scheduler" "ingress" ]);
description = lib.mdDoc "If not empty, which job classes should be executed by this process. *Only one process should handle the 'scheduler' class. If left empty, this process will handle the 'scheduler' class.*";
};
threads = lib.mkOption {
type = nullOr int;
description = lib.mdDoc "Number of threads this process should use for executing jobs. If null, the configured `sidekiqThreads` are used.";
};
};
});
default = {
all = {
jobClasses = [ ];
threads = null;
};
};
example = {
all = {
jobClasses = [ ];
threads = null;
};
ingress = {
jobClasses = [ "ingress" ];
threads = 5;
};
default = {
jobClasses = [ "default" ];
threads = 10;
};
push-pull = {
jobClasses = [ "push" "pull" ];
threads = 5;
};
};
};
vapidPublicKeyFile = lib.mkOption {
description = lib.mdDoc ''
Path to file containing the public key used for Web Push
@ -482,7 +556,7 @@ in {
};
};
config = lib.mkIf cfg.enable {
config = lib.mkIf cfg.enable (lib.mkMerge [{
assertions = [
{
assertion = databaseActuallyCreateLocally -> (cfg.user == cfg.database.user);
@ -517,6 +591,12 @@ in {
environment.systemPackages = [ mastodonTootctl ];
systemd.targets.mastodon = {
description = "Target for all Mastodon services";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
};
systemd.services.mastodon-init-dirs = {
script = ''
umask 077
@ -551,7 +631,7 @@ in {
environment = env;
serviceConfig = {
Type = "oneshot";
WorkingDirectory = cfg.package;
SyslogIdentifier = "mastodon-init-dirs";
# System Call Filtering
SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@resources" ])) "@chown" "pipe" "pipe2" ];
} // cfgService;
@ -609,7 +689,7 @@ in {
requires = [ "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
wantedBy = [ "multi-user.target" ];
wantedBy = [ "mastodon.target" ];
description = "Mastodon streaming";
environment = env // (if cfg.enableUnixSocket
then { SOCKET = "/run/mastodon-streaming/streaming.socket"; }
@ -636,7 +716,7 @@ in {
requires = [ "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
wantedBy = [ "multi-user.target" ];
wantedBy = [ "mastodon.target" ];
description = "Mastodon web";
environment = env // (if cfg.enableUnixSocket
then { SOCKET = "/run/mastodon-web/web.socket"; }
@ -657,31 +737,6 @@ in {
path = with pkgs; [ file imagemagick ffmpeg ];
};
systemd.services.mastodon-sidekiq = {
after = [ "network.target" "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
requires = [ "mastodon-init-dirs.service" ]
++ lib.optional databaseActuallyCreateLocally "postgresql.service"
++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
wantedBy = [ "multi-user.target" ];
description = "Mastodon sidekiq";
environment = env // {
PORT = toString(cfg.sidekiqPort);
DB_POOL = toString cfg.sidekiqThreads;
};
serviceConfig = {
ExecStart = "${cfg.package}/bin/sidekiq -c ${toString cfg.sidekiqThreads} -r ${cfg.package}";
Restart = "always";
RestartSec = 20;
EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
WorkingDirectory = cfg.package;
# System Call Filtering
SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
} // cfgService;
path = with pkgs; [ file imagemagick ffmpeg ];
};
systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable {
description = "Mastodon media auto remove";
environment = env;
@ -757,7 +812,9 @@ in {
];
users.groups.${cfg.group}.members = lib.optional cfg.configureNginx config.services.nginx.user;
};
}
{ systemd.services = sidekiqUnits; }
]);
meta.maintainers = with lib.maintainers; [ happy-river erictapen ];

View file

@ -46,6 +46,15 @@ let
done
'';
dbAddr = if cfg.database.socket == null then
"${cfg.database.host}:${toString cfg.database.port}"
else if cfg.database.type == "mysql" then
"${cfg.database.host}:${cfg.database.socket}"
else if cfg.database.type == "postgres" then
"${cfg.database.socket}"
else
throw "Unsupported database type: ${cfg.database.type} for socket: ${cfg.database.socket}";
mediawikiConfig = pkgs.writeText "LocalSettings.php" ''
<?php
# Protect against web entry
@ -87,7 +96,8 @@ let
## Database settings
$wgDBtype = "${cfg.database.type}";
$wgDBserver = "${cfg.database.host}:${if cfg.database.socket != null then cfg.database.socket else toString cfg.database.port}";
$wgDBserver = "${dbAddr}";
$wgDBport = "${toString cfg.database.port}";
$wgDBname = "${cfg.database.name}";
$wgDBuser = "${cfg.database.user}";
${optionalString (cfg.database.passwordFile != null) "$wgDBpassword = file_get_contents(\"${cfg.database.passwordFile}\");"}
@ -246,7 +256,8 @@ in
port = mkOption {
type = types.port;
default = 3306;
default = if cfg.database.type == "mysql" then 3306 else 5432;
defaultText = literalExpression "3306";
description = lib.mdDoc "Database host port.";
};
@ -286,14 +297,19 @@ in
socket = mkOption {
type = types.nullOr types.path;
default = if cfg.database.createLocally then "/run/mysqld/mysqld.sock" else null;
default = if (cfg.database.type == "mysql" && cfg.database.createLocally) then
"/run/mysqld/mysqld.sock"
else if (cfg.database.type == "postgres" && cfg.database.createLocally) then
"/run/postgresql"
else
null;
defaultText = literalExpression "/run/mysqld/mysqld.sock";
description = lib.mdDoc "Path to the unix socket file to use for authentication.";
};
createLocally = mkOption {
type = types.bool;
default = cfg.database.type == "mysql";
default = cfg.database.type == "mysql" || cfg.database.type == "postgres";
defaultText = literalExpression "true";
description = lib.mdDoc ''
Create the database and database user locally.
@ -354,8 +370,8 @@ in
config = mkIf cfg.enable {
assertions = [
{ assertion = cfg.database.createLocally -> cfg.database.type == "mysql";
message = "services.mediawiki.createLocally is currently only supported for database type 'mysql'";
{ assertion = cfg.database.createLocally -> (cfg.database.type == "mysql" || cfg.database.type == "postgres");
message = "services.mediawiki.createLocally is currently only supported for database type 'mysql' and 'postgres'";
}
{ assertion = cfg.database.createLocally -> cfg.database.user == user;
message = "services.mediawiki.database.user must be set to ${user} if services.mediawiki.database.createLocally is set true";
@ -374,15 +390,23 @@ in
Vector = "${cfg.package}/share/mediawiki/skins/Vector";
};
services.mysql = mkIf cfg.database.createLocally {
services.mysql = mkIf (cfg.database.type == "mysql" && cfg.database.createLocally) {
enable = true;
package = mkDefault pkgs.mariadb;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [
{ name = cfg.database.user;
ensureUsers = [{
name = cfg.database.user;
ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
}
];
}];
};
services.postgresql = mkIf (cfg.database.type == "postgres" && cfg.database.createLocally) {
enable = true;
ensureDatabases = [ cfg.database.name ];
ensureUsers = [{
name = cfg.database.user;
ensurePermissions = { "DATABASE \"${cfg.database.name}\"" = "ALL PRIVILEGES"; };
}];
};
services.phpfpm.pools.mediawiki = {
@ -431,7 +455,8 @@ in
systemd.services.mediawiki-init = {
wantedBy = [ "multi-user.target" ];
before = [ "phpfpm-mediawiki.service" ];
after = optional cfg.database.createLocally "mysql.service";
after = optional (cfg.database.type == "mysql" && cfg.database.createLocally) "mysql.service"
++ optional (cfg.database.type == "postgres" && cfg.database.createLocally) "postgresql.service";
script = ''
if ! test -e "${stateDir}/secret.key"; then
tr -dc A-Za-z0-9 </dev/urandom 2>/dev/null | head -c 64 > ${stateDir}/secret.key
@ -442,7 +467,7 @@ in
${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/install.php \
--confpath /tmp \
--scriptpath / \
--dbserver ${cfg.database.host}${optionalString (cfg.database.socket != null) ":${cfg.database.socket}"} \
--dbserver "${dbAddr}" \
--dbport ${toString cfg.database.port} \
--dbname ${cfg.database.name} \
${optionalString (cfg.database.tablePrefix != null) "--dbprefix ${cfg.database.tablePrefix}"} \
@ -464,7 +489,8 @@ in
};
};
systemd.services.httpd.after = optional (cfg.database.createLocally && cfg.database.type == "mysql") "mysql.service";
systemd.services.httpd.after = optional (cfg.database.createLocally && cfg.database.type == "mysql") "mysql.service"
++ optional (cfg.database.createLocally && cfg.database.type == "postgres") "postgresql.service";
users.users.${user} = {
group = group;

View file

@ -4,45 +4,17 @@ with lib;
let
cfg = config.services.netbox;
pythonFmt = pkgs.formats.pythonVars {};
staticDir = cfg.dataDir + "/static";
configFile = pkgs.writeTextFile {
name = "configuration.py";
text = ''
STATIC_ROOT = '${staticDir}'
MEDIA_ROOT = '${cfg.dataDir}/media'
REPORTS_ROOT = '${cfg.dataDir}/reports'
SCRIPTS_ROOT = '${cfg.dataDir}/scripts'
ALLOWED_HOSTS = ['*']
DATABASE = {
'NAME': 'netbox',
'USER': 'netbox',
'HOST': '/run/postgresql',
}
# Redis database settings. Redis is used for caching and for queuing background tasks such as webhook events. A separate
# configuration exists for each. Full connection details are required in both sections, and it is strongly recommended
# to use two separate database IDs.
REDIS = {
'tasks': {
'URL': 'unix://${config.services.redis.servers.netbox.unixSocket}?db=0',
'SSL': False,
},
'caching': {
'URL': 'unix://${config.services.redis.servers.netbox.unixSocket}?db=1',
'SSL': False,
}
}
with open("${cfg.secretKeyFile}", "r") as file:
SECRET_KEY = file.readline()
${optionalString cfg.enableLdap "REMOTE_AUTH_BACKEND = 'netbox.authentication.LDAPBackend'"}
${cfg.extraConfig}
'';
settingsFile = pythonFmt.generate "netbox-settings.py" cfg.settings;
extraConfigFile = pkgs.writeTextFile {
name = "netbox-extraConfig.py";
text = cfg.extraConfig;
};
pkg = (pkgs.netbox.overrideAttrs (old: {
configFile = pkgs.concatText "configuration.py" [ settingsFile extraConfigFile ];
pkg = (cfg.package.overrideAttrs (old: {
installPhase = old.installPhase + ''
ln -s ${configFile} $out/opt/netbox/netbox/netbox/configuration.py
'' + optionalString cfg.enableLdap ''
@ -70,6 +42,30 @@ in {
'';
};
settings = lib.mkOption {
description = lib.mdDoc ''
Configuration options to set in `configuration.py`.
See the [documentation](https://docs.netbox.dev/en/stable/configuration/) for more possible options.
'';
default = { };
type = lib.types.submodule {
freeformType = pythonFmt.type;
options = {
ALLOWED_HOSTS = lib.mkOption {
type = with lib.types; listOf str;
default = ["*"];
description = lib.mdDoc ''
A list of valid fully-qualified domain names (FQDNs) and/or IP
addresses that can be used to reach the NetBox service.
'';
};
};
};
};
listenAddress = mkOption {
type = types.str;
default = "[::1]";
@ -78,6 +74,17 @@ in {
'';
};
package = mkOption {
type = types.package;
default = if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
defaultText = literalExpression ''
if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
'';
description = lib.mdDoc ''
NetBox package to use.
'';
};
port = mkOption {
type = types.port;
default = 8001;
@ -117,7 +124,7 @@ in {
default = "";
description = lib.mdDoc ''
Additional lines of configuration appended to the `configuration.py`.
See the [documentation](https://netbox.readthedocs.io/en/stable/configuration/optional-settings/) for more possible options.
See the [documentation](https://docs.netbox.dev/en/stable/configuration/) for more possible options.
'';
};
@ -138,11 +145,90 @@ in {
Path to the Configuration-File for LDAP-Authentication, will be loaded as `ldap_config.py`.
See the [documentation](https://netbox.readthedocs.io/en/stable/installation/6-ldap/#configuration) for possible options.
'';
example = ''
import ldap
from django_auth_ldap.config import LDAPSearch, PosixGroupType
AUTH_LDAP_SERVER_URI = "ldaps://ldap.example.com/"
AUTH_LDAP_USER_SEARCH = LDAPSearch(
"ou=accounts,ou=posix,dc=example,dc=com",
ldap.SCOPE_SUBTREE,
"(uid=%(user)s)",
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
"ou=groups,ou=posix,dc=example,dc=com",
ldap.SCOPE_SUBTREE,
"(objectClass=posixGroup)",
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType()
# Mirror LDAP group assignments.
AUTH_LDAP_MIRROR_GROUPS = True
# For more granular permissions, we can map LDAP groups to Django groups.
AUTH_LDAP_FIND_GROUP_PERMS = True
'';
};
};
config = mkIf cfg.enable {
services.netbox.plugins = mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
services.netbox = {
plugins = mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
settings = {
STATIC_ROOT = staticDir;
MEDIA_ROOT = "${cfg.dataDir}/media";
REPORTS_ROOT = "${cfg.dataDir}/reports";
SCRIPTS_ROOT = "${cfg.dataDir}/scripts";
DATABASE = {
NAME = "netbox";
USER = "netbox";
HOST = "/run/postgresql";
};
# Redis database settings. Redis is used for caching and for queuing
# background tasks such as webhook events. A separate configuration
# exists for each. Full connection details are required in both
# sections, and it is strongly recommended to use two separate database
# IDs.
REDIS = {
tasks = {
URL = "unix://${config.services.redis.servers.netbox.unixSocket}?db=0";
SSL = false;
};
caching = {
URL = "unix://${config.services.redis.servers.netbox.unixSocket}?db=1";
SSL = false;
};
};
REMOTE_AUTH_BACKEND = lib.mkIf cfg.enableLdap "netbox.authentication.LDAPBackend";
LOGGING = lib.mkDefault {
version = 1;
formatters.precise.format = "[%(levelname)s@%(name)s] %(message)s";
handlers.console = {
class = "logging.StreamHandler";
formatter = "precise";
};
# log to console/systemd instead of file
root = {
level = "INFO";
handlers = [ "console" ];
};
};
};
extraConfig = ''
with open("${cfg.secretKeyFile}", "r") as file:
SECRET_KEY = file.readline()
'';
};
services.redis.servers.netbox.enable = true;

View file

@ -51,11 +51,11 @@ in
default = "none";
type = types.enum ([ "none" "1" "2" "3" 1 2 3 ]);
apply = v: toString v;
description = lib.mdDoc "Garage replication mode, defaults to none, see: <https://garagehq.deuxfleurs.fr/reference_manual/configuration.html#replication_mode> for reference.";
description = lib.mdDoc "Garage replication mode, defaults to none, see: <https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode> for reference.";
};
};
};
description = lib.mdDoc "Garage configuration, see <https://garagehq.deuxfleurs.fr/reference_manual/configuration.html> for reference.";
description = lib.mdDoc "Garage configuration, see <https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/> for reference.";
};
package = mkOption {

View file

@ -31,6 +31,7 @@ let
# Mime.types values are taken from brotli sample configuration - https://github.com/google/ngx_brotli
# and Nginx Server Configs - https://github.com/h5bp/server-configs-nginx
# "text/html" is implicitly included in {brotli,gzip,zstd}_types
compressMimeTypes = [
"application/atom+xml"
"application/geo+json"
@ -55,7 +56,6 @@ let
"text/calendar"
"text/css"
"text/csv"
"text/html"
"text/javascript"
"text/markdown"
"text/plain"
@ -102,6 +102,17 @@ let
proxy_set_header X-Forwarded-Server $host;
'';
proxyCachePathConfig = concatStringsSep "\n" (mapAttrsToList (name: proxyCachePath: ''
proxy_cache_path ${concatStringsSep " " [
"/var/cache/nginx/${name}"
"keys_zone=${proxyCachePath.keysZoneName}:${proxyCachePath.keysZoneSize}"
"levels=${proxyCachePath.levels}"
"use_temp_path=${if proxyCachePath.useTempPath then "on" else "off"}"
"inactive=${proxyCachePath.inactive}"
"max_size=${proxyCachePath.maxSize}"
]};
'') (filterAttrs (name: conf: conf.enable) cfg.proxyCachePath));
upstreamConfig = toString (flip mapAttrsToList cfg.upstreams (name: upstream: ''
upstream ${name} {
${toString (flip mapAttrsToList upstream.servers (name: server: ''
@ -184,8 +195,9 @@ let
brotli_types ${lib.concatStringsSep " " compressMimeTypes};
''}
${optionalString cfg.recommendedGzipSettings ''
${optionalString cfg.recommendedGzipSettings
# https://docs.nginx.com/nginx/admin-guide/web-server/compression/
''
gzip on;
gzip_static on;
gzip_vary on;
@ -240,16 +252,10 @@ let
server_tokens ${if cfg.serverTokens then "on" else "off"};
${optionalString cfg.proxyCache.enable ''
proxy_cache_path /var/cache/nginx keys_zone=${cfg.proxyCache.keysZoneName}:${cfg.proxyCache.keysZoneSize}
levels=${cfg.proxyCache.levels}
use_temp_path=${if cfg.proxyCache.useTempPath then "on" else "off"}
inactive=${cfg.proxyCache.inactive}
max_size=${cfg.proxyCache.maxSize};
''}
${cfg.commonHttpConfig}
${proxyCachePathConfig}
${vhosts}
${optionalString cfg.statusPage ''
@ -311,12 +317,15 @@ let
else defaultListen;
listenString = { addr, port, ssl, extraParameters ? [], ... }:
(if ssl && vhost.http3 then "
# UDP listener for **QUIC+HTTP/3
listen ${addr}:${toString port} http3 "
# UDP listener for QUIC transport protocol.
(if ssl && vhost.quic then "
listen ${addr}:${toString port} quic "
+ optionalString vhost.default "default_server "
+ optionalString vhost.reuseport "reuseport "
+ optionalString (extraParameters != []) (concatStringsSep " " extraParameters)
+ optionalString (extraParameters != []) (concatStringsSep " " (
let inCompatibleParameters = [ "ssl" "proxy_protocol" "http2" ];
isCompatibleParameter = param: !(any (p: p == param) inCompatibleParameters);
in filter isCompatibleParameter extraParameters))
+ ";" else "")
+ "
@ -363,6 +372,10 @@ let
server {
${concatMapStringsSep "\n" listenString hostListen}
server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
${optionalString (hasSSL && vhost.quic) ''
http3 ${if vhost.http3 then "on" else "off"};
http3_hq ${if vhost.http3_hq then "on" else "off"};
''}
${acmeLocation}
${optionalString (vhost.root != null) "root ${vhost.root};"}
${optionalString (vhost.globalRedirect != null) ''
@ -384,9 +397,10 @@ let
ssl_conf_command Options KTLS;
''}
${optionalString (hasSSL && vhost.http3) ''
${optionalString (hasSSL && vhost.quic && vhost.http3)
# Advertise that HTTP/3 is available
add_header Alt-Svc 'h3=":443"; ma=86400' always;
''
add_header Alt-Svc 'h3=":$server_port"; ma=86400';
''}
${mkBasicAuth vhostName vhost}
@ -476,7 +490,8 @@ in
default = false;
type = types.bool;
description = lib.mdDoc ''
Enable recommended brotli settings. Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/blob/master/README.md).
Enable recommended brotli settings.
Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/).
This adds `pkgs.nginxModules.brotli` to `services.nginx.additionalModules`.
'';
@ -487,6 +502,18 @@ in
type = types.bool;
description = lib.mdDoc ''
Enable recommended gzip settings.
Learn more about compression in Gzip format [here](https://docs.nginx.com/nginx/admin-guide/web-server/compression/).
'';
};
recommendedZstdSettings = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
Enable recommended zstd settings.
Learn more about compression in Zstd format [here](https://github.com/tokers/zstd-nginx-module).
This adds `pkgs.nginxModules.zstd` to `services.nginx.additionalModules`.
'';
};
@ -498,16 +525,6 @@ in
'';
};
recommendedZstdSettings = mkOption {
default = false;
type = types.bool;
description = lib.mdDoc ''
Enable recommended zstd settings. Learn more about compression in Zstd format [here](https://github.com/tokers/zstd-nginx-module).
This adds `pkgs.nginxModules.zstd` to `services.nginx.additionalModules`.
'';
};
proxyTimeout = mkOption {
type = types.str;
default = "60s";
@ -796,10 +813,10 @@ in
'';
};
proxyCache = mkOption {
type = types.submodule {
proxyCachePath = mkOption {
type = types.attrsOf (types.submodule ({ ... }: {
options = {
enable = mkEnableOption (lib.mdDoc "Enable proxy cache");
enable = mkEnableOption (lib.mdDoc "this proxy cache path entry");
keysZoneName = mkOption {
type = types.str;
@ -857,9 +874,12 @@ in
description = lib.mdDoc "Set maximum cache size";
};
};
};
}));
default = {};
description = lib.mdDoc "Configure proxy cache";
description = lib.mdDoc ''
Configure a proxy cache path entry.
See <http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path> for documentation.
'';
};
resolver = mkOption {
@ -970,6 +990,12 @@ in
The Nginx log directory has been moved to /var/log/nginx, the cache directory
to /var/cache/nginx. The option services.nginx.stateDir has been removed.
'')
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "inactive" ] [ "services" "nginx" "proxyCachePath" "" "inactive" ])
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "useTempPath" ] [ "services" "nginx" "proxyCachePath" "" "useTempPath" ])
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "levels" ] [ "services" "nginx" "proxyCachePath" "" "levels" ])
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "keysZoneSize" ] [ "services" "nginx" "proxyCachePath" "" "keysZoneSize" ])
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "keysZoneName" ] [ "services" "nginx" "proxyCachePath" "" "keysZoneName" ])
(mkRenamedOptionModule [ "services" "nginx" "proxyCache" "enable" ] [ "services" "nginx" "proxyCachePath" "" "enable" ])
];
config = mkIf cfg.enable {
@ -1027,6 +1053,14 @@ in
services.nginx.virtualHosts.<name>.useACMEHost are mutually exclusive.
'';
}
{
assertion = cfg.package.pname != "nginxQuic" -> all (host: !host.quic) (attrValues virtualHosts);
message = ''
services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic package,
which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
'';
}
] ++ map (name: mkCertOwnershipAssertion {
inherit (cfg) group user;
cert = config.security.acme.certs.${name};

View file

@ -188,24 +188,54 @@ with lib;
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to enable HTTP 2.
Whether to enable the HTTP/2 protocol.
Note that (as of writing) due to nginx's implementation, to disable
HTTP 2 you have to disable it on all vhosts that use a given
HTTP/2 you have to disable it on all vhosts that use a given
IP address / port.
If there is one server block configured to enable http2,then it is
If there is one server block configured to enable http2, then it is
enabled for all server blocks on this IP.
See https://stackoverflow.com/a/39466948/263061.
'';
};
http3 = mkOption {
type = types.bool;
default = true;
description = lib.mdDoc ''
Whether to enable the HTTP/3 protocol.
This requires using `pkgs.nginxQuic` package
which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
and activate the QUIC transport protocol
`services.nginx.virtualHosts.<name>.quic = true;`.
Note that HTTP/3 support is experimental and
*not* yet recommended for production.
Read more at https://quic.nginx.org/
'';
};
http3_hq = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to enable HTTP 3.
Whether to enable the HTTP/0.9 protocol negotiation used in QUIC interoperability tests.
This requires using `pkgs.nginxQuic` package
which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
and activate the QUIC transport protocol
`services.nginx.virtualHosts.<name>.quic = true;`.
Note that special application protocol support is experimental and
*not* yet recommended for production.
Read more at https://quic.nginx.org/
'';
};
quic = mkOption {
type = types.bool;
default = false;
description = lib.mdDoc ''
Whether to enable the QUIC transport protocol.
This requires using `pkgs.nginxQuic` package
which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
Note that HTTP 3 support is experimental and
Note that QUIC support is experimental and
*not* yet recommended for production.
Read more at https://quic.nginx.org/
'';

View file

@ -0,0 +1,201 @@
{ lib, pkgs, config, utils, ... }:
let
inherit (lib) concatMapStrings literalExpression mdDoc mkDefault mkEnableOption mkIf mkOption types;
cfg = config.services.xserver.desktopManager.budgie;
nixos-background-light = pkgs.nixos-artwork.wallpapers.nineish;
nixos-background-dark = pkgs.nixos-artwork.wallpapers.nineish-dark-gray;
nixos-gsettings-overrides = pkgs.budgie.budgie-gsettings-overrides.override {
inherit (cfg) extraGSettingsOverrides extraGSettingsOverridePackages;
inherit nixos-background-dark nixos-background-light;
};
in {
options = {
services.xserver.desktopManager.budgie = {
enable = mkEnableOption (mdDoc "Budgie desktop");
sessionPath = mkOption {
description = mdDoc "Additional list of packages to be added to the session search path. Useful for GSettings-conditional autostart.";
type = with types; listOf package;
example = literalExpression "[ pkgs.budgie.budgie-desktop-view ]";
default = [];
};
extraGSettingsOverrides = mkOption {
description = mdDoc "Additional GSettings overrides.";
type = types.lines;
default = "";
};
extraGSettingsOverridePackages = mkOption {
description = mdDoc "List of packages for which GSettings are overridden.";
type = with types; listOf path;
default = [];
};
};
environment.budgie.excludePackages = mkOption {
description = mdDoc "Which packages Budgie should exclude from the default environment.";
type = with types; listOf package;
default = [];
example = literalExpression "[ pkgs.mate-terminal ]";
};
};
config = mkIf cfg.enable {
services.xserver.displayManager.sessionPackages = with pkgs; [
budgie.budgie-desktop
];
services.xserver.displayManager.lightdm.greeters.slick = {
enable = mkDefault true;
theme = mkDefault { name = "Qogir"; package = pkgs.qogir-theme; };
iconTheme = mkDefault { name = "Qogir"; package = pkgs.qogir-icon-theme; };
cursorTheme = mkDefault { name = "Qogir"; package = pkgs.qogir-icon-theme; };
};
services.xserver.desktopManager.budgie.sessionPath = [ pkgs.budgie.budgie-desktop-view ];
environment.extraInit = ''
${concatMapStrings (p: ''
if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
fi
if [ -d "${p}/lib/girepository-1.0" ]; then
export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
fi
'') cfg.sessionPath}
'';
environment.systemPackages = with pkgs;
[
# Budgie Desktop.
budgie.budgie-backgrounds
budgie.budgie-control-center
budgie.budgie-desktop
budgie.budgie-desktop-view
budgie.budgie-screensaver
# Required by the Budgie Desktop session.
(gnome.gnome-session.override {gnomeShellSupport = false;})
# Required by Budgie Menu.
gnome-menus
# Provides `gsettings`.
glib
# Update user directories.
xdg-user-dirs
]
++ (utils.removePackagesByName [
cinnamon.nemo
mate.eom
mate.pluma
mate.atril
mate.engrampa
mate.mate-calc
mate.mate-terminal
mate.mate-system-monitor
vlc
# Desktop themes.
qogir-theme
qogir-icon-theme
# Default settings.
nixos-gsettings-overrides
] config.environment.budgie.excludePackages)
++ cfg.sessionPath;
# Fonts.
fonts.fonts = mkDefault [
pkgs.noto-fonts
pkgs.hack-font
];
fonts.fontconfig.defaultFonts = {
sansSerif = mkDefault ["Noto Sans"];
monospace = mkDefault ["Hack"];
};
# Qt application style.
qt = {
enable = mkDefault true;
style = mkDefault "gtk2";
platformTheme = mkDefault "gtk2";
};
environment.pathsToLink = [
"/share" # TODO: https://github.com/NixOS/nixpkgs/issues/47173
];
# GSettings overrides.
environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
# Required by Budgie Desktop.
services.xserver.updateDbusEnvironment = true;
programs.dconf.enable = true;
# Required by Budgie Screensaver.
security.pam.services.budgie-screensaver = {};
# Required by Budgie's Polkit Dialog.
security.polkit.enable = mkDefault true;
# Required by Budgie Panel plugins and/or Budgie Control Center panels.
networking.networkmanager.enable = mkDefault true; # for BCC's Network panel.
programs.nm-applet.enable = config.networking.networkmanager.enable; # Budgie has no Network applet.
programs.nm-applet.indicator = false; # Budgie doesn't support AppIndicators.
hardware.bluetooth.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Bluetooth panel.
hardware.pulseaudio.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Sound panel.
xdg.portal.enable = mkDefault true; # for BCC's Applications panel.
xdg.portal.extraPortals = with pkgs; [
xdg-desktop-portal-gtk # provides a XDG Portals implementation.
];
services.geoclue2.enable = mkDefault true; # for BCC's Privacy > Location Services panel.
services.upower.enable = config.powerManagement.enable; # for Budgie's Status Indicator and BCC's Power panel.
services.xserver.libinput.enable = mkDefault true; # for BCC's Mouse panel.
services.colord.enable = mkDefault true; # for BCC's Color panel.
services.gnome.at-spi2-core.enable = mkDefault true; # for BCC's A11y panel.
services.accounts-daemon.enable = mkDefault true; # for BCC's Users panel.
services.fprintd.enable = mkDefault true; # for BCC's Users panel.
services.udisks2.enable = mkDefault true; # for BCC's Details panel.
# For BCC's Online Accounts panel.
services.gnome.gnome-online-accounts.enable = mkDefault true;
services.gnome.gnome-online-miners.enable = true;
# For BCC's Printers panel.
services.printing.enable = mkDefault true;
services.system-config-printer.enable = config.services.printing.enable;
# For BCC's Sharing panel.
services.dleyna-renderer.enable = mkDefault true;
services.dleyna-server.enable = mkDefault true;
services.gnome.gnome-user-share.enable = mkDefault true;
services.gnome.rygel.enable = mkDefault true;
# Other default services.
services.gnome.evolution-data-server.enable = mkDefault true;
services.gnome.glib-networking.enable = mkDefault true;
services.gnome.gnome-keyring.enable = mkDefault true;
services.gnome.gnome-settings-daemon.enable = mkDefault true;
services.gvfs.enable = mkDefault true;
# Register packages for DBus.
services.dbus.packages = with pkgs; [
budgie.budgie-control-center
];
# Shell integration for MATE Terminal.
programs.bash.vteIntegration = true;
programs.zsh.vteIntegration = true;
};
}

View file

@ -21,7 +21,7 @@ in
./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./lumina.nix
./lxqt.nix ./enlightenment.nix ./gnome.nix ./retroarch.nix ./kodi.nix
./mate.nix ./pantheon.nix ./surf-display.nix ./cde.nix
./cinnamon.nix
./cinnamon.nix ./budgie.nix
];
options = {

View file

@ -27,8 +27,7 @@ in
type = types.enum [ "x11" "wayland" ];
default = "x11";
description = lib.mdDoc ''
Backend to use in qtile:
<option>x11</option> or <option>wayland</option>.
Backend to use in qtile: `x11` or `wayland`.
'';
};

View file

@ -15,10 +15,10 @@ in
services.xserver.windowManager.session = singleton {
name = "stumpwm";
start = ''
${pkgs.lispPackages.stumpwm}/bin/stumpwm &
${pkgs.sbclPackages.stumpwm}/bin/stumpwm &
waitPID=$!
'';
};
environment.systemPackages = [ pkgs.lispPackages.stumpwm ];
environment.systemPackages = [ pkgs.sbclPackages.stumpwm ];
};
}

View file

@ -16,7 +16,9 @@ in
default = false;
type = types.bool;
description = lib.mdDoc ''
Whether to enable the systemd DNS resolver daemon.
Whether to enable the systemd DNS resolver daemon, `systemd-resolved`.
Search for `services.resolved` to see all options.
'';
};

View file

@ -62,5 +62,5 @@ in
};
};
meta.maintainers = with maintainers; [ houstdav000 ];
meta.maintainers = with maintainers; [ cyntheticfox ];
}

View file

@ -85,7 +85,8 @@ in rec {
stdenv
subversion
tarball
vim;
vim
tests-stdenv-gcc-stageCompare;
};
tested = let
@ -135,6 +136,7 @@ in rec {
"nixos.tests.proxy"
"nixos.tests.simple"
"nixpkgs.jdk"
"nixpkgs.tests-stdenv-gcc-stageCompare"
])
];
};

View file

@ -112,6 +112,7 @@ in {
btrbk-doas = handleTest ./btrbk-doas.nix {};
btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
btrbk-section-order = handleTest ./btrbk-section-order.nix {};
budgie = handleTest ./budgie.nix {};
buildbot = handleTest ./buildbot.nix {};
buildkite-agents = handleTest ./buildkite-agents.nix {};
caddy = handleTest ./caddy.nix {};
@ -122,9 +123,9 @@ in {
cassandra_3_0 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_0; };
cassandra_3_11 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_11; };
cassandra_4 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_4; };
ceph-multi-node = handleTestOn ["x86_64-linux"] ./ceph-multi-node.nix {};
ceph-single-node = handleTestOn ["x86_64-linux"] ./ceph-single-node.nix {};
ceph-single-node-bluestore = handleTestOn ["x86_64-linux"] ./ceph-single-node-bluestore.nix {};
ceph-multi-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix {};
ceph-single-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix {};
ceph-single-node-bluestore = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node-bluestore.nix {};
certmgr = handleTest ./certmgr.nix {};
cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
cgit = handleTest ./cgit.nix {};
@ -459,7 +460,8 @@ in {
netdata = handleTest ./netdata.nix {};
networking.networkd = handleTest ./networking.nix { networkd = true; };
networking.scripted = handleTest ./networking.nix { networkd = false; };
netbox = handleTest ./web-apps/netbox.nix {};
netbox = handleTest ./web-apps/netbox.nix { inherit (pkgs) netbox; };
netbox_3_3 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_3; };
# TODO: put in networking.nix after the test becomes more complete
networkingProxy = handleTest ./networking-proxy.nix {};
nextcloud = handleTest ./nextcloud {};

View file

@ -0,0 +1,51 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "budgie";
meta = with lib; {
maintainers = [ maintainers.federicoschonborn ];
};
nodes.machine = { ... }: {
imports = [
./common/user-account.nix
];
services.xserver.enable = true;
services.xserver.displayManager = {
lightdm.enable = true;
autoLogin = {
enable = true;
user = "alice";
};
};
services.xserver.desktopManager.budgie.enable = true;
};
testScript = { nodes, ... }:
let
user = nodes.machine.users.users.alice;
in
''
with subtest("Wait for login"):
machine.wait_for_x()
machine.wait_for_file("${user.home}/.Xauthority")
machine.succeed("xauth merge ${user.home}/.Xauthority")
with subtest("Check that logging in has given the user ownership of devices"):
machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
with subtest("Check if Budgie session components actually start"):
machine.wait_until_succeeds("pgrep budgie-daemon")
machine.wait_for_window("budgie-daemon")
machine.wait_until_succeeds("pgrep budgie-panel")
machine.wait_for_window("budgie-panel")
with subtest("Open MATE terminal"):
machine.succeed("su - ${user.name} -c 'DISPLAY=:0 mate-terminal >&2 &'")
machine.wait_for_window("Terminal")
machine.sleep(20)
machine.screenshot("screen")
'';
})

View file

@ -23,7 +23,7 @@ import ./make-test-python.nix {
];
};
networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
environment.systemPackages = with pkgs; [ git python3Packages.buildbot-full ];
environment.systemPackages = with pkgs; [ git buildbot-full ];
};
bbworker = { pkgs, ... }: {
@ -31,7 +31,7 @@ import ./make-test-python.nix {
enable = true;
masterUrl = "bbmaster:9989";
};
environment.systemPackages = with pkgs; [ git python3Packages.buildbot-worker ];
environment.systemPackages = with pkgs; [ git buildbot-worker ];
};
gitrepo = { pkgs, ... }: {

View file

@ -268,14 +268,6 @@ let
'';
}) { inherit pkgs system; };
in {
ELK-6 = mkElkTest "elk-6-oss" {
name = "elk-6-oss";
elasticsearch = pkgs.elasticsearch6-oss;
logstash = pkgs.logstash6-oss;
kibana = pkgs.kibana6-oss;
journalbeat = pkgs.journalbeat6;
metricbeat = pkgs.metricbeat6;
};
# We currently only package upstream binaries.
# Feel free to package an SSPL licensed source-based package!
# ELK-7 = mkElkTest "elk-7-oss" {
@ -287,13 +279,6 @@ in {
# metricbeat = pkgs.metricbeat7;
# };
unfree = lib.dontRecurseIntoAttrs {
ELK-6 = mkElkTest "elk-6" {
elasticsearch = pkgs.elasticsearch6;
logstash = pkgs.logstash6;
kibana = pkgs.kibana6;
journalbeat = pkgs.journalbeat6;
metricbeat = pkgs.metricbeat6;
};
ELK-7 = mkElkTest "elk-7" {
elasticsearch = pkgs.elasticsearch7;
logstash = pkgs.logstash7;

View file

@ -24,7 +24,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
services.xserver.desktopManager.gnome.enable = true;
services.xserver.desktopManager.gnome.debug = true;
services.xserver.displayManager.defaultSession = "gnome-xorg";
programs.gnome-terminal.enable = true;
systemd.user.services = {
"org.gnome.Shell@x11" = {
@ -61,10 +60,10 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
# False when startup is done
startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
# Start gnome-terminal
gnomeTerminalCommand = su "gnome-terminal";
# Start Console
launchConsole = su "${bus} gapplication launch org.gnome.Console";
# Hopefully gnome-terminal's wm class
# Hopefully Console's wm class
wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
in ''
with subtest("Login to GNOME Xorg with GDM"):
@ -82,13 +81,17 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
"${startingUp} | grep -q 'true,..false'"
)
with subtest("Open Gnome Terminal"):
with subtest("Open Console"):
# Close the Activities view so that Shell can correctly track the focused window.
machine.send_key("esc")
machine.succeed(
"${gnomeTerminalCommand}"
"${launchConsole}"
)
# correct output should be (true, '"Gnome-terminal"')
# correct output should be (true, '"kgx"')
# For some reason, this deviates from Wayland.
machine.wait_until_succeeds(
"${wmClass} | grep -q 'true,...Gnome-terminal'"
"${wmClass} | grep -q 'true,...kgx'"
)
machine.sleep(20)
machine.screenshot("screen")

View file

@ -22,14 +22,6 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
services.xserver.desktopManager.gnome.enable = true;
services.xserver.desktopManager.gnome.debug = true;
programs.gnome-terminal.enable = true;
environment.systemPackages = [
(pkgs.makeAutostartItem {
name = "org.gnome.Terminal";
package = pkgs.gnome.gnome-terminal;
})
];
systemd.user.services = {
"org.gnome.Shell@wayland" = {
@ -64,10 +56,10 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
# False when startup is done
startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
# Start gnome-terminal
gnomeTerminalCommand = su "${bus} gnome-terminal";
# Start Console
launchConsole = su "${bus} gapplication launch org.gnome.Console";
# Hopefully gnome-terminal's wm class
# Hopefully Console's wm class
wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
in ''
with subtest("Login to GNOME with GDM"):
@ -86,10 +78,16 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : {
"${startingUp} | grep -q 'true,..false'"
)
with subtest("Open Gnome Terminal"):
# correct output should be (true, '"gnome-terminal-server"')
with subtest("Open Console"):
# Close the Activities view so that Shell can correctly track the focused window.
machine.send_key("esc")
machine.succeed(
"${launchConsole}"
)
# correct output should be (true, '"org.gnome.Console"')
machine.wait_until_succeeds(
"${wmClass} | grep -q 'gnome-terminal-server'"
"${wmClass} | grep -q 'true,...org.gnome.Console'"
)
machine.sleep(20)
machine.screenshot("screen")

View file

@ -8,7 +8,6 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
services.mongodb.enable = true;
services.elasticsearch.enable = true;
services.elasticsearch.package = pkgs.elasticsearch-oss;
services.elasticsearch.extraConf = ''
network.publish_host: 127.0.0.1
network.bind_host: 127.0.0.1

View file

@ -107,6 +107,8 @@ in
with subtest("Network is up"):
alice.wait_until_succeeds("ping -c1 bob")
alice.succeed("systemctl restart ipsec")
bob.succeed("systemctl restart ipsec")
with subtest("Eve can eavesdrop cleartext traffic"):
eavesdrop()

View file

@ -1,10 +1,12 @@
import ./make-test-python.nix ({ pkgs, lib, ... }: {
name = "mediawiki";
meta.maintainers = [ lib.maintainers.aanderse ];
{
system ? builtins.currentSystem,
config ? {},
pkgs ? import ../.. { inherit system config; },
}:
nodes.machine =
{ ... }:
{ services.mediawiki.enable = true;
let
shared = {
services.mediawiki.enable = true;
services.mediawiki.virtualHost.hostName = "localhost";
services.mediawiki.virtualHost.adminAddr = "root@example.com";
services.mediawiki.passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
@ -17,6 +19,17 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
};
};
testLib = import ../lib/testing-python.nix {
inherit system pkgs;
extraConfigurations = [ shared ];
};
in
{
mysql = testLib.makeTest {
name = "mediawiki-mysql";
nodes.machine = {
services.mediawiki.database.type = "mysql";
};
testScript = ''
start_all()
@ -25,4 +38,20 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
page = machine.succeed("curl -fL http://localhost/")
assert "MediaWiki has been installed" in page
'';
})
};
postgresql = testLib.makeTest {
name = "mediawiki-postgres";
nodes.machine = {
services.mediawiki.database.type = "postgres";
};
testScript = ''
start_all()
machine.wait_for_unit("phpfpm-mediawiki.service")
page = machine.succeed("curl -fL http://localhost/")
assert "MediaWiki has been installed" in page
'';
};
}

View file

@ -36,8 +36,10 @@ in
sslCertificateKey = ./common/acme/server/acme.test.key.pem;
http2 = true;
http3 = true;
http3_hq = false;
quic = true;
reuseport = true;
root = lib.mkForce (pkgs.runCommandLocal "testdir2" {} ''
root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
mkdir "$out"
cat > "$out/index.html" <<EOF
<html><body>Hello World!</body></html>
@ -82,6 +84,8 @@ in
# Check header reading
client.succeed("curl --verbose --http3 --head https://acme.test | grep 'content-type'")
client.succeed("curl --verbose --http3 --head https://acme.test | grep 'HTTP/3 200'")
client.succeed("curl --verbose --http3 --head https://acme.test/error | grep 'HTTP/3 404'")
# Check change User-Agent
client.succeed("curl --verbose --http3 --user-agent 'Curl test 3.0' https://acme.test")

View file

@ -67,10 +67,10 @@ import ./make-test-python.nix ({ pkgs, ... }: {
};
testScript = { nodes, ... }: let
etagSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/etagSystem";
justReloadSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/justReloadSystem";
reloadRestartSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadRestartSystem";
reloadWithErrorsSystem = "${nodes.webserver.config.system.build.toplevel}/specialisation/reloadWithErrorsSystem";
etagSystem = "${nodes.webserver.system.build.toplevel}/specialisation/etagSystem";
justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/justReloadSystem";
reloadRestartSystem = "${nodes.webserver.system.build.toplevel}/specialisation/reloadRestartSystem";
reloadWithErrorsSystem = "${nodes.webserver.system.build.toplevel}/specialisation/reloadWithErrorsSystem";
in ''
url = "http://localhost/index.html"

View file

@ -84,8 +84,6 @@ in
};
};
services.elasticsearch.package = pkgs.elasticsearch-oss;
environment.systemPackages = [
(sendEmail "dmarc@localhost")
pkgs.jq
@ -158,8 +156,6 @@ in
};
};
services.elasticsearch.package = pkgs.elasticsearch-oss;
environment.systemPackages = [
pkgs.jq
];

View file

@ -1,5 +1,7 @@
import ./make-test-python.nix ({ pkgs, ... }: {
name = "tracee-integration";
meta.maintainers = pkgs.tracee.meta.maintainers;
nodes = {
machine = { config, pkgs, ... }: {
# EventFilters/trace_only_events_from_new_containers and
@ -7,11 +9,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
# require docker/dockerd
virtualisation.docker.enable = true;
environment.systemPackages = [
environment.systemPackages = with pkgs; [
# required by Test_EventFilters/trace_events_from_ls_and_which_binary_in_separate_scopes
pkgs.which
which
# build the go integration tests as a binary
(pkgs.tracee.overrideAttrs (oa: {
(tracee.overrideAttrs (oa: {
pname = oa.pname + "-integration";
postPatch = oa.postPatch or "" + ''
# prepare tester.sh (which will be embedded in the test binary)
@ -20,10 +22,11 @@ import ./make-test-python.nix ({ pkgs, ... }: {
# fix the test to look at nixos paths for running programs
substituteInPlace tests/integration/integration_test.go \
--replace "bin=/usr/bin/" "comm=" \
--replace "binary=/usr/bin/" "comm=" \
--replace "/usr/bin/dockerd" "dockerd" \
--replace "/usr/bin" "/run/current-system/sw/bin"
'';
nativeBuildInputs = oa.nativeBuildInputs or [ ] ++ [ pkgs.makeWrapper ];
nativeBuildInputs = oa.nativeBuildInputs or [ ] ++ [ makeWrapper ];
buildPhase = ''
runHook preBuild
# just build the static lib we need for the go test binary
@ -34,6 +37,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
runHook postBuild
'';
doCheck = false;
outputs = [ "out" ];
installPhase = ''
mkdir -p $out/bin
mv $GOPATH/tracee-integration $out/bin/

View file

@ -9,7 +9,7 @@
${extraInit}
server.wait_for_unit("redis-mastodon.service")
server.wait_for_unit("mastodon-sidekiq.service")
server.wait_for_unit("mastodon-sidekiq-all.service")
server.wait_for_unit("mastodon-streaming.service")
server.wait_for_unit("mastodon-web.service")
server.wait_for_open_port(55000)

View file

@ -1,21 +1,146 @@
import ../make-test-python.nix ({ lib, pkgs, ... }: {
let
ldapDomain = "example.org";
ldapSuffix = "dc=example,dc=org";
ldapRootUser = "admin";
ldapRootPassword = "foobar";
testUser = "alice";
testPassword = "verySecure";
testGroup = "netbox-users";
in import ../make-test-python.nix ({ lib, pkgs, netbox, ... }: {
name = "netbox";
meta = with lib.maintainers; {
maintainers = [ n0emis ];
maintainers = [ minijackson n0emis ];
};
nodes.machine = { ... }: {
nodes.machine = { config, ... }: {
services.netbox = {
enable = true;
package = netbox;
secretKeyFile = pkgs.writeText "secret" ''
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
'';
enableLdap = true;
ldapConfigPath = pkgs.writeText "ldap_config.py" ''
import ldap
from django_auth_ldap.config import LDAPSearch, PosixGroupType
AUTH_LDAP_SERVER_URI = "ldap://localhost/"
AUTH_LDAP_USER_SEARCH = LDAPSearch(
"ou=accounts,ou=posix,${ldapSuffix}",
ldap.SCOPE_SUBTREE,
"(uid=%(user)s)",
)
AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
"ou=groups,ou=posix,${ldapSuffix}",
ldap.SCOPE_SUBTREE,
"(objectClass=posixGroup)",
)
AUTH_LDAP_GROUP_TYPE = PosixGroupType()
# Mirror LDAP group assignments.
AUTH_LDAP_MIRROR_GROUPS = True
# For more granular permissions, we can map LDAP groups to Django groups.
AUTH_LDAP_FIND_GROUP_PERMS = True
'';
};
services.nginx = {
enable = true;
recommendedProxySettings = true;
virtualHosts.netbox = {
default = true;
locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}";
locations."/static/".alias = "/var/lib/netbox/static/";
};
};
# Adapted from the sssd-ldap NixOS test
services.openldap = {
enable = true;
settings = {
children = {
"cn=schema".includes = [
"${pkgs.openldap}/etc/schema/core.ldif"
"${pkgs.openldap}/etc/schema/cosine.ldif"
"${pkgs.openldap}/etc/schema/inetorgperson.ldif"
"${pkgs.openldap}/etc/schema/nis.ldif"
];
"olcDatabase={1}mdb" = {
attrs = {
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
olcDatabase = "{1}mdb";
olcDbDirectory = "/var/lib/openldap/db";
olcSuffix = ldapSuffix;
olcRootDN = "cn=${ldapRootUser},${ldapSuffix}";
olcRootPW = ldapRootPassword;
};
};
};
};
declarativeContents = {
${ldapSuffix} = ''
dn: ${ldapSuffix}
objectClass: top
objectClass: dcObject
objectClass: organization
o: ${ldapDomain}
dn: ou=posix,${ldapSuffix}
objectClass: top
objectClass: organizationalUnit
dn: ou=accounts,ou=posix,${ldapSuffix}
objectClass: top
objectClass: organizationalUnit
dn: uid=${testUser},ou=accounts,ou=posix,${ldapSuffix}
objectClass: person
objectClass: posixAccount
userPassword: ${testPassword}
homeDirectory: /home/${testUser}
uidNumber: 1234
gidNumber: 1234
cn: ""
sn: ""
dn: ou=groups,ou=posix,${ldapSuffix}
objectClass: top
objectClass: organizationalUnit
dn: cn=${testGroup},ou=groups,ou=posix,${ldapSuffix}
objectClass: posixGroup
gidNumber: 2345
memberUid: ${testUser}
'';
};
};
testScript = ''
machine.start()
users.users.nginx.extraGroups = [ "netbox" ];
networking.firewall.allowedTCPPorts = [ 80 ];
};
testScript = let
changePassword = pkgs.writeText "change-password.py" ''
from django.contrib.auth.models import User
u = User.objects.get(username='netbox')
u.set_password('netbox')
u.save()
'';
in ''
from typing import Any, Dict
import json
start_all()
machine.wait_for_unit("netbox.target")
machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening")
@ -26,5 +151,167 @@ import ../make-test-python.nix ({ lib, pkgs, ... }: {
with subtest("Staticfiles are generated"):
machine.succeed("test -e /var/lib/netbox/static/netbox.js")
with subtest("Superuser can be created"):
machine.succeed(
"netbox-manage createsuperuser --noinput --username netbox --email netbox@example.com"
)
# Django doesn't have a "clean" way of inputting the password from the command line
machine.succeed("cat '${changePassword}' | netbox-manage shell")
machine.wait_for_unit("network.target")
with subtest("Home screen loads from nginx"):
machine.succeed(
"curl -sSfL http://localhost | grep '<title>Home | NetBox</title>'"
)
with subtest("Staticfiles can be fetched"):
machine.succeed("curl -sSfL http://localhost/static/netbox.js")
machine.succeed("curl -sSfL http://localhost/static/docs/")
with subtest("Can interact with API"):
json.loads(
machine.succeed("curl -sSfL -H 'Accept: application/json' 'http://localhost/api/'")
)
def login(username: str, password: str):
encoded_data = json.dumps({"username": username, "password": password})
uri = "/users/tokens/provision/"
result = json.loads(
machine.succeed(
"curl -sSfL "
"-X POST "
"-H 'Accept: application/json' "
"-H 'Content-Type: application/json' "
f"'http://localhost/api{uri}' "
f"--data '{encoded_data}'"
)
)
return result["key"]
with subtest("Can login"):
auth_token = login("netbox", "netbox")
def get(uri: str):
return json.loads(
machine.succeed(
"curl -sSfL "
"-H 'Accept: application/json' "
f"-H 'Authorization: Token {auth_token}' "
f"'http://localhost/api{uri}'"
)
)
def delete(uri: str):
return machine.succeed(
"curl -sSfL "
f"-X DELETE "
"-H 'Accept: application/json' "
f"-H 'Authorization: Token {auth_token}' "
f"'http://localhost/api{uri}'"
)
def data_request(uri: str, method: str, data: Dict[str, Any]):
encoded_data = json.dumps(data)
return json.loads(
machine.succeed(
"curl -sSfL "
f"-X {method} "
"-H 'Accept: application/json' "
"-H 'Content-Type: application/json' "
f"-H 'Authorization: Token {auth_token}' "
f"'http://localhost/api{uri}' "
f"--data '{encoded_data}'"
)
)
def post(uri: str, data: Dict[str, Any]):
return data_request(uri, "POST", data)
def patch(uri: str, data: Dict[str, Any]):
return data_request(uri, "PATCH", data)
with subtest("Can create objects"):
result = post("/dcim/sites/", {"name": "Test site", "slug": "test-site"})
site_id = result["id"]
# Example from:
# http://netbox.extra.cea.fr/static/docs/integrations/rest-api/#creating-a-new-object
post("/ipam/prefixes/", {"prefix": "192.0.2.0/24", "site": site_id})
result = post(
"/dcim/manufacturers/",
{"name": "Test manufacturer", "slug": "test-manufacturer"}
)
manufacturer_id = result["id"]
# Had an issue with device-types before NetBox 3.4.0
result = post(
"/dcim/device-types/",
{
"model": "Test device type",
"manufacturer": manufacturer_id,
"slug": "test-device-type",
},
)
device_type_id = result["id"]
with subtest("Can list objects"):
result = get("/dcim/sites/")
assert result["count"] == 1
assert result["results"][0]["id"] == site_id
assert result["results"][0]["name"] == "Test site"
assert result["results"][0]["description"] == ""
result = get("/dcim/device-types/")
assert result["count"] == 1
assert result["results"][0]["id"] == device_type_id
assert result["results"][0]["model"] == "Test device type"
with subtest("Can update objects"):
new_description = "Test site description"
patch(f"/dcim/sites/{site_id}/", {"description": new_description})
result = get(f"/dcim/sites/{site_id}/")
assert result["description"] == new_description
with subtest("Can delete objects"):
# Delete a device-type since no object depends on it
delete(f"/dcim/device-types/{device_type_id}/")
result = get("/dcim/device-types/")
assert result["count"] == 0
with subtest("Can use the GraphQL API"):
encoded_data = json.dumps({
"query": "query { prefix_list { prefix, site { id, description } } }",
})
result = json.loads(
machine.succeed(
"curl -sSfL "
"-H 'Accept: application/json' "
"-H 'Content-Type: application/json' "
f"-H 'Authorization: Token {auth_token}' "
"'http://localhost/graphql/' "
f"--data '{encoded_data}'"
)
)
assert len(result["data"]["prefix_list"]) == 1
assert result["data"]["prefix_list"][0]["prefix"] == "192.0.2.0/24"
assert result["data"]["prefix_list"][0]["site"]["id"] == str(site_id)
assert result["data"]["prefix_list"][0]["site"]["description"] == new_description
with subtest("Can login with LDAP"):
machine.wait_for_unit("openldap.service")
login("alice", "${testPassword}")
with subtest("Can associate LDAP groups"):
result = get("/users/users/?username=${testUser}")
assert result["count"] == 1
assert any(group["name"] == "${testGroup}" for group in result["results"][0]["groups"])
'';
})

View file

@ -10,8 +10,13 @@ let
InterfacePeers = {
eth1 = [ "tcp://192.168.1.200:12345" ];
};
MulticastInterfaces = [ "eth1" ];
LinkLocalTCPPort = 54321;
MulticastInterfaces = [ {
Regex = ".*";
Beacon = true;
Listen = true;
Port = 54321;
Priority = 0;
} ];
PublicKey = "2b6f918b6c1a4b54d6bcde86cf74e074fb32ead4ee439b7930df2aa60c825186";
PrivateKey = "0c4a24acd3402722ce9277ed179f4a04b895b49586493c25fbaed60653d857d62b6f918b6c1a4b54d6bcde86cf74e074fb32ead4ee439b7930df2aa60c825186";
};
@ -115,8 +120,12 @@ in import ./make-test-python.nix ({ pkgs, ...} : {
settings = {
IfTAPMode = true;
IfName = "ygg0";
MulticastInterfaces = [ "eth1" ];
LinkLocalTCPPort = 43210;
MulticastInterfaces = [
{
Port = 43210;
}
];
openMulticastPort = true;
};
persistentKeys = true;
};

View file

@ -1,5 +1,5 @@
{ lib, stdenv, fetchurl, libclthreads, zita-alsa-pcmi, alsa-lib, libjack2
, libclxclient, libX11, libXft, readline
, libclxclient, libX11, libXft, readline, aeolus-stops
}:
stdenv.mkDerivation rec {
@ -16,17 +16,29 @@ stdenv.mkDerivation rec {
libX11 libXft readline
];
patchPhase = ''sed "s@ldconfig.*@@" -i source/Makefile'';
postPatch = ''
sed -i source/Makefile -e /ldconfig/d
substituteInPlace source/main.cc --replace /etc/ "$out/etc/"
'';
preBuild = "cd source";
makeFlags = [ "DESTDIR=" "PREFIX=$(out)" ];
meta = {
postInstall = let cfg = ''
# Aeolus system wide default options
# Ignored if ~/.aeolusrc with local options exists
-u -S ${aeolus-stops}/${aeolus-stops.subdir}
''; in ''
mkdir -p $out/etc
echo -n "${cfg}" > $out/etc/aeolus.conf
'';
meta = with lib; {
description = "Synthetized (not sampled) pipe organ emulator";
homepage = "http://kokkinizita.linuxaudio.org/linuxaudio/aeolus/index.html";
license = lib.licenses.lgpl3;
platforms = lib.platforms.linux;
maintainers = [ lib.maintainers.nico202 ];
license = licenses.lgpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ nico202 orivej ];
};
}

View file

@ -0,0 +1,33 @@
{ lib, stdenvNoCC, fetchurl }:
stdenvNoCC.mkDerivation rec {
pname = "stops";
version = "0.4.0";
src = fetchurl {
url = "https://kokkinizita.linuxaudio.org/linuxaudio/downloads/${pname}-${version}.tar.bz2";
hash = "sha256-DnmguOAGyw9nv88ekJfbC04Qwbsw5tXEAaKeiCQR/LA=";
};
outputHashMode = "recursive";
outputHash = "sha256-gGHowq7g7MZmnhrpqG+3wNLwQCtpiBB88euIKeQIpJ0=";
subdir = "share/Aeolus/stops";
installPhase = ''
runHook preInstall
mkdir -p $out/${subdir}
cp -r * $out/${subdir}
runHook postInstall
'';
meta = with lib; {
description = "aeolus synthesizer instrument definitions";
homepage = "http://kokkinizita.linuxaudio.org/linuxaudio/aeolus/index.html";
license = licenses.lgpl3;
platforms = platforms.linux;
maintainers = with maintainers; [ nico202 orivej ];
};
}

View file

@ -18,6 +18,8 @@ stdenv.mkDerivation rec {
sha256 = "sha256-wzBOPTs8PTHzu5RpKwKhx552E7QnDx2Zn4OFaes8Q2I=";
};
NIX_CFLAGS_COMPILE = [ "-Wno-error=deprecated-declarations" ];
makeFlags = [ "DESTDIR=\${out}" "PREFIX=''" ];
sourceRoot = "source/src";
nativeBuildInputs = [ pkg-config wrapGAppsHook4 ];

View file

@ -18,7 +18,7 @@
python3Packages.buildPythonApplication rec {
pname = "eartag";
version = "0.3.2";
version = "0.3.3";
format = "other";
src = fetchFromGitLab {
@ -26,7 +26,7 @@ python3Packages.buildPythonApplication rec {
owner = "knuxify";
repo = pname;
rev = version;
sha256 = "sha256-XvbfQtE8LsztQ2VByG2jLYND3qVpH6owdAgh3b//lI4=";
sha256 = "sha256-120voKmlEDsVSxNfqmwBvTB90dQUwnf2CtxvOKqi8+U=";
};
postPatch = ''

View file

@ -10,13 +10,13 @@
# gcc only supports objc on darwin
buildGoModule.override { stdenv = clangStdenv; } rec {
pname = "go-musicfox";
version = "3.7.7";
version = "4.0.1";
src = fetchFromGitHub {
owner = "anhoder";
repo = pname;
rev = "v${version}";
hash = "sha256-gQPr+mCZ7tnASs/ibow1b0Qj3ppZhdgP4U1Vxo+FfE4=";
hash = "sha256-eOcQWw5wXU59/EzDLk028mV/Ro6Ii0DYcp4wdDedhrA=";
};
deleteVendor = true;

View file

@ -1,4 +1,4 @@
{ lib, fetchFromGitHub, cmake, pkg-config, alsa-lib ? null, fftwFloat, fltk13
{ lib, fetchFromGitHub, fetchpatch, cmake, pkg-config, alsa-lib ? null, carla ? null, fftwFloat, fltk13
, fluidsynth ? null, lame ? null, libgig ? null, libjack2 ? null, libpulseaudio ? null
, libsamplerate, libsoundio ? null, libsndfile, libvorbis ? null, portaudio ? null
, qtbase, qtx11extras, qttools, SDL ? null, mkDerivation }:
@ -18,6 +18,7 @@ mkDerivation rec {
nativeBuildInputs = [ cmake qttools pkg-config ];
buildInputs = [
carla
alsa-lib
fftwFloat
fltk13
@ -36,6 +37,13 @@ mkDerivation rec {
SDL # TODO: switch to SDL2 in the next version
];
patches = [
(fetchpatch {
url = "https://raw.githubusercontent.com/archlinux/svntogit-community/cf64acc45e3264c6923885867e2dbf8b7586a36b/trunk/lmms-carla-export.patch";
sha256 = "sha256-wlSewo93DYBN2PvrcV58dC9kpoo9Y587eCeya5OX+j4=";
})
];
cmakeFlags = [ "-DWANT_QT5=ON" ];
meta = with lib; {

View file

@ -1,76 +1,66 @@
{ lib
, python3
, stdenv
, fetchFromGitHub
, substituteAll
, appstream-glib
, dbus
, desktop-file-utils
, gettext
, glib
, glib-networking
, gobject-introspection
, gst_all_1
, gtk4
, libadwaita
, librsvg
, libpulseaudio
, libsoup_3
, meson
, ninja
, pkg-config
, pulseaudio
, rustPlatform
, wrapGAppsHook4
}:
python3.pkgs.buildPythonApplication rec {
stdenv.mkDerivation rec {
pname = "mousai";
version = "0.6.6";
format = "other";
version = "0.7.0";
src = fetchFromGitHub {
owner = "SeaDve";
repo = "Mousai";
rev = "v${version}";
sha256 = "sha256-nCbFVFg+nVF8BOBfdzQVgdTRXR5UF18PJFC266yTFwg=";
hash = "sha256-dL+ZBv97T0sN7mPoOKsp5f6Dl9aarBYm2RRUfOclb+s=";
};
patches = [
(substituteAll {
src = ./paths.patch;
pactl = "${lib.getBin pulseaudio}/bin/pactl";
})
];
postPatch = ''
substituteInPlace build-aux/meson/postinstall.py \
--replace gtk-update-icon-cache gtk4-update-icon-cache
patchShebangs build-aux/meson
'';
cargoDeps = rustPlatform.fetchCargoTarball {
inherit src;
name = "${pname}-${version}";
hash = "sha256-qAtMpYVZwyay1KGYlH40T0HambrWh4CaZnwjvqev44g=";
};
nativeBuildInputs = [
appstream-glib
desktop-file-utils
gettext
glib
gobject-introspection
gtk4
meson
ninja
pkg-config
wrapGAppsHook4
];
] ++ (with rustPlatform; [
cargoSetupHook
rust.cargo
rust.rustc
]);
buildInputs = [
dbus
gst_all_1.gstreamer
gst_all_1.gst-plugins-base
gst_all_1.gst-plugins-good
gst_all_1.gst-plugins-bad
glib
glib-networking
gtk4
libadwaita
librsvg
pulseaudio
];
propagatedBuildInputs = with python3.pkgs; [
pygobject3
requests
libpulseaudio
libsoup_3
];
meta = with lib; {
@ -78,5 +68,6 @@ python3.pkgs.buildPythonApplication rec {
homepage = "https://github.com/SeaDve/Mousai";
license = licenses.gpl3Plus;
maintainers = with maintainers; [ dotlambda ];
platforms = platforms.linux;
};
}

View file

@ -0,0 +1,63 @@
{ lib
, stdenv
, fetchFromGitHub
, autoreconfHook
, pkg-config
, SDL2
, SDL2_ttf
, SDL2_image
, boost
, libmpdclient
, libwtk-sdl2
, icu
, libconfig
, dejavu_fonts
}:
stdenv.mkDerivation rec {
pname = "mpd-touch-screen-gui";
version = "unstable-2022-12-30";
src = fetchFromGitHub {
owner = "muesli4";
repo = pname;
rev = "156eaebede89da2b83a98d8f9dfa46af12282fb4";
sha256 = "sha256-vr/St4BghrndjUQ0nZI/uJq+F/MjEj6ulc4DYwQ/pgU=";
};
nativeBuildInputs = [
pkg-config
autoreconfHook
];
postPatch = ''
sed -i s#/usr/share/fonts/TTF#${dejavu_fonts}/share/fonts/truetype#g data/program.conf
'';
buildInputs = [
SDL2
SDL2_ttf
SDL2_image
boost
libmpdclient
libwtk-sdl2
icu
libconfig
];
# https://stackoverflow.com/questions/53089494/configure-error-could-not-find-a-version-of-the-library
configureFlags = [
"--with-boost-libdir=${boost.out}/lib"
];
doCheck = true;
meta = with lib; {
description = "A small MPD client that let's you view covers and has controls suitable for small touchscreens";
homepage = "https://github.com/muesli4/mpd-touch-screen-gui";
# See: https://github.com/muesli4/mpd-touch-screen-gui/tree/master/LICENSES
license = licenses.lgpl3Plus;
maintainers = with maintainers; [ doronbehar ];
platforms = platforms.all;
};
}

View file

@ -3,7 +3,7 @@
, fetchurl
, makeWrapper
, pkg-config
, perl
, libOnly ? false # whether to build only the library
, withAlsa ? stdenv.hostPlatform.isLinux
, alsa-lib
, withPulse ? stdenv.hostPlatform.isLinux
@ -14,29 +14,36 @@
, withJack ? stdenv.hostPlatform.isUnix
, jack
, withConplay ? !stdenv.hostPlatform.isWindows
, perl
}:
assert withConplay -> !libOnly;
stdenv.mkDerivation rec {
pname = "mpg123";
pname = "${lib.optionalString libOnly "lib"}mpg123";
version = "1.31.2";
src = fetchurl {
url = "mirror://sourceforge/${pname}/${pname}-${version}.tar.bz2";
url = "mirror://sourceforge/mpg123/mpg123-${version}.tar.bz2";
sha256 = "sha256-sX8ikF4x9DtrQB399qce0Ru30Fb2jbRJ1wufmug5x94=";
};
outputs = [ "out" ] ++ lib.optionals withConplay [ "conplay" ];
outputs = [ "out" ] ++ lib.optional withConplay "conplay";
nativeBuildInputs = lib.optionals withConplay [ makeWrapper ]
++ lib.optionals (withPulse || withJack) [ pkg-config ];
nativeBuildInputs = lib.optionals (!libOnly) (
lib.optionals withConplay [ makeWrapper ]
++ lib.optionals (withPulse || withJack) [ pkg-config ]
);
buildInputs = lib.optionals withConplay [ perl ]
buildInputs = lib.optionals (!libOnly) (
lib.optionals withConplay [ perl ]
++ lib.optionals withAlsa [ alsa-lib ]
++ lib.optionals withPulse [ libpulseaudio ]
++ lib.optionals withCoreAudio [ AudioUnit AudioToolbox ]
++ lib.optionals withJack [ jack ];
++ lib.optionals withJack [ jack ]
);
configureFlags = [
configureFlags = lib.optionals (!libOnly) [
"--with-audio=${lib.strings.concatStringsSep "," (
lib.optional withJack "jack"
++ lib.optional withPulse "pulse"

View file

@ -16,13 +16,13 @@
stdenv.mkDerivation rec {
pname = "mympd";
version = "10.2.5";
version = "10.2.6";
src = fetchFromGitHub {
owner = "jcorporation";
repo = "myMPD";
rev = "v${version}";
sha256 = "sha256-ZxGMvbm9GKhhfCNZdeIYUh2FF4c3vXtvRdu24u3Zrtg=";
sha256 = "sha256-a/HjuBeq7ySDUcEcR6KKnwlvzUInjWmio/zI59sNsak=";
};
nativeBuildInputs = [

View file

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
pname = "ncpamixer";
version = "unstable-2021-10-17";
version = "1.3.3.3";
src = fetchFromGitHub {
owner = "fulhax";
repo = "ncpamixer";
rev = "4faf8c27d4de55ddc244f372cbf5b2319d0634f7";
sha256 = "sha256-ElbxdAaXAY0pj0oo2IcxGT+K+7M5XdCgom0XbJ9BxW4=";
rev = version;
sha256 = "sha256-TxSfiBSsCAImzCXv6o64Jy7tSefpYCkU0xtuHx26Ss4=";
};
nativeBuildInputs = [ cmake pkg-config ];

Some files were not shown because too many files have changed in this diff Show more