Project import generated by Copybara.
GitOrigin-RevId: 3d1a7716d7f1fccbd7d30ab3b2ed3db831f43bde
This commit is contained in:
parent
13f2f79e6d
commit
bba55970ba
1531 changed files with 28951 additions and 9714 deletions
4
third_party/nixpkgs/.github/CODEOWNERS
vendored
4
third_party/nixpkgs/.github/CODEOWNERS
vendored
|
@ -94,10 +94,6 @@
|
||||||
/pkgs/applications/science/math/R @peti
|
/pkgs/applications/science/math/R @peti
|
||||||
/pkgs/development/r-modules @peti
|
/pkgs/development/r-modules @peti
|
||||||
|
|
||||||
# Ruby
|
|
||||||
/pkgs/development/interpreters/ruby @alyssais
|
|
||||||
/pkgs/development/ruby-modules @alyssais
|
|
||||||
|
|
||||||
# Rust
|
# Rust
|
||||||
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq
|
/pkgs/development/compilers/rust @Mic92 @LnL7 @zowoq
|
||||||
/pkgs/build-support/rust @andir @danieldk @zowoq
|
/pkgs/build-support/rust @andir @danieldk @zowoq
|
||||||
|
|
|
@ -2,6 +2,7 @@ name: "Label PR"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request_target:
|
pull_request_target:
|
||||||
|
types: [edited, opened, synchronize, reopened]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
labels:
|
labels:
|
||||||
|
|
|
@ -15,11 +15,11 @@ jobs:
|
||||||
with:
|
with:
|
||||||
# pull_request_target checks out the base branch by default
|
# pull_request_target checks out the base branch by default
|
||||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||||
- uses: cachix/install-nix-action@v12
|
- uses: cachix/install-nix-action@v13
|
||||||
with:
|
with:
|
||||||
# explicitly enable sandbox
|
# explicitly enable sandbox
|
||||||
extra_nix_config: sandbox = true
|
extra_nix_config: sandbox = true
|
||||||
- uses: cachix/cachix-action@v8
|
- uses: cachix/cachix-action@v9
|
||||||
with:
|
with:
|
||||||
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
||||||
name: nixpkgs-ci
|
name: nixpkgs-ci
|
||||||
|
|
|
@ -15,11 +15,11 @@ jobs:
|
||||||
with:
|
with:
|
||||||
# pull_request_target checks out the base branch by default
|
# pull_request_target checks out the base branch by default
|
||||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||||
- uses: cachix/install-nix-action@v12
|
- uses: cachix/install-nix-action@v13
|
||||||
with:
|
with:
|
||||||
# explicitly enable sandbox
|
# explicitly enable sandbox
|
||||||
extra_nix_config: sandbox = true
|
extra_nix_config: sandbox = true
|
||||||
- uses: cachix/cachix-action@v8
|
- uses: cachix/cachix-action@v9
|
||||||
with:
|
with:
|
||||||
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
# This cache is for the nixos/nixpkgs manual builds and should not be trusted or used elsewhere.
|
||||||
name: nixpkgs-ci
|
name: nixpkgs-ci
|
||||||
|
|
2
third_party/nixpkgs/doc/builders/images.xml
vendored
2
third_party/nixpkgs/doc/builders/images.xml
vendored
|
@ -7,6 +7,6 @@
|
||||||
</para>
|
</para>
|
||||||
<xi:include href="images/appimagetools.xml" />
|
<xi:include href="images/appimagetools.xml" />
|
||||||
<xi:include href="images/dockertools.section.xml" />
|
<xi:include href="images/dockertools.section.xml" />
|
||||||
<xi:include href="images/ocitools.xml" />
|
<xi:include href="images/ocitools.section.xml" />
|
||||||
<xi:include href="images/snaptools.xml" />
|
<xi:include href="images/snaptools.xml" />
|
||||||
</chapter>
|
</chapter>
|
||||||
|
|
37
third_party/nixpkgs/doc/builders/images/ocitools.section.md
vendored
Normal file
37
third_party/nixpkgs/doc/builders/images/ocitools.section.md
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
# pkgs.ociTools {#sec-pkgs-ociTools}
|
||||||
|
|
||||||
|
`pkgs.ociTools` is a set of functions for creating containers according to the [OCI container specification v1.0.0](https://github.com/opencontainers/runtime-spec). Beyond that it makes no assumptions about the container runner you choose to use to run the created container.
|
||||||
|
|
||||||
|
## buildContainer {#ssec-pkgs-ociTools-buildContainer}
|
||||||
|
|
||||||
|
This function creates a simple OCI container that runs a single command inside of it. An OCI container consists of a `config.json` and a rootfs directory.The nix store of the container will contain all referenced dependencies of the given command.
|
||||||
|
|
||||||
|
The parameters of `buildContainer` with an example value are described below:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
buildContainer {
|
||||||
|
args = [
|
||||||
|
(with pkgs;
|
||||||
|
writeScript "run.sh" ''
|
||||||
|
#!${bash}/bin/bash
|
||||||
|
exec ${bash}/bin/bash
|
||||||
|
'').outPath
|
||||||
|
];
|
||||||
|
|
||||||
|
mounts = {
|
||||||
|
"/data" = {
|
||||||
|
type = "none";
|
||||||
|
source = "/var/lib/mydata";
|
||||||
|
options = [ "bind" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
readonly = false;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `args` specifies a set of arguments to run inside the container. This is the only required argument for `buildContainer`. All referenced packages inside the derivation will be made available inside the container
|
||||||
|
|
||||||
|
- `mounts` specifies additional mount points chosen by the user. By default only a minimal set of necessary filesystems are mounted into the container (e.g procfs, cgroupfs)
|
||||||
|
|
||||||
|
- `readonly` makes the container\'s rootfs read-only if it is set to true. The default value is false `false`.
|
|
@ -1,61 +0,0 @@
|
||||||
<section xmlns="http://docbook.org/ns/docbook"
|
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
|
||||||
xml:id="sec-pkgs-ociTools">
|
|
||||||
<title>pkgs.ociTools</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
<varname>pkgs.ociTools</varname> is a set of functions for creating containers according to the <link xlink:href="https://github.com/opencontainers/runtime-spec">OCI container specification v1.0.0</link>. Beyond that it makes no assumptions about the container runner you choose to use to run the created container.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<section xml:id="ssec-pkgs-ociTools-buildContainer">
|
|
||||||
<title>buildContainer</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
This function creates a simple OCI container that runs a single command inside of it. An OCI container consists of a <varname>config.json</varname> and a rootfs directory.The nix store of the container will contain all referenced dependencies of the given command.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The parameters of <varname>buildContainer</varname> with an example value are described below:
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<example xml:id='ex-ociTools-buildContainer'>
|
|
||||||
<title>Build Container</title>
|
|
||||||
<programlisting>
|
|
||||||
buildContainer {
|
|
||||||
args = [ (with pkgs; writeScript "run.sh" ''
|
|
||||||
#!${bash}/bin/bash
|
|
||||||
exec ${bash}/bin/bash
|
|
||||||
'').outPath ]; <co xml:id='ex-ociTools-buildContainer-1' />
|
|
||||||
|
|
||||||
mounts = {
|
|
||||||
"/data" = {
|
|
||||||
type = "none";
|
|
||||||
source = "/var/lib/mydata";
|
|
||||||
options = [ "bind" ];
|
|
||||||
};
|
|
||||||
};<co xml:id='ex-ociTools-buildContainer-2' />
|
|
||||||
|
|
||||||
readonly = false; <co xml:id='ex-ociTools-buildContainer-3' />
|
|
||||||
}
|
|
||||||
</programlisting>
|
|
||||||
<calloutlist>
|
|
||||||
<callout arearefs='ex-ociTools-buildContainer-1'>
|
|
||||||
<para>
|
|
||||||
<varname>args</varname> specifies a set of arguments to run inside the container. This is the only required argument for <varname>buildContainer</varname>. All referenced packages inside the derivation will be made available inside the container
|
|
||||||
</para>
|
|
||||||
</callout>
|
|
||||||
<callout arearefs='ex-ociTools-buildContainer-2'>
|
|
||||||
<para>
|
|
||||||
<varname>mounts</varname> specifies additional mount points chosen by the user. By default only a minimal set of necessary filesystems are mounted into the container (e.g procfs, cgroupfs)
|
|
||||||
</para>
|
|
||||||
</callout>
|
|
||||||
<callout arearefs='ex-ociTools-buildContainer-3'>
|
|
||||||
<para>
|
|
||||||
<varname>readonly</varname> makes the container's rootfs read-only if it is set to true. The default value is false <literal>false</literal>.
|
|
||||||
</para>
|
|
||||||
</callout>
|
|
||||||
</calloutlist>
|
|
||||||
</example>
|
|
||||||
</section>
|
|
||||||
</section>
|
|
32
third_party/nixpkgs/doc/builders/packages/citrix.section.md
vendored
Normal file
32
third_party/nixpkgs/doc/builders/packages/citrix.section.md
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
# Citrix Workspace {#sec-citrix}
|
||||||
|
|
||||||
|
The [Citrix Workspace App](https://www.citrix.com/products/workspace-app/) is a remote desktop viewer which provides access to [XenDesktop](https://www.citrix.com/products/xenapp-xendesktop/) installations.
|
||||||
|
|
||||||
|
## Basic usage {#sec-citrix-base}
|
||||||
|
|
||||||
|
The tarball archive needs to be downloaded manually as the license agreements of the vendor for [Citrix Workspace](https://www.citrix.de/downloads/workspace-app/linux/workspace-app-for-linux-latest.html) needs to be accepted first. Then run `nix-prefetch-url file://$PWD/linuxx64-$version.tar.gz`. With the archive available in the store the package can be built and installed with Nix.
|
||||||
|
|
||||||
|
## Citrix Selfservice {#sec-citrix-selfservice}
|
||||||
|
|
||||||
|
The [selfservice](https://support.citrix.com/article/CTX200337) is an application managing Citrix desktops and applications. Please note that this feature only works with at least citrix_workspace_20_06_0 and later versions.
|
||||||
|
|
||||||
|
In order to set this up, you first have to [download the `.cr` file from the Netscaler Gateway](https://its.uiowa.edu/support/article/102186). After that you can configure the `selfservice` like this:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ storebrowse -C ~/Downloads/receiverconfig.cr
|
||||||
|
$ selfservice
|
||||||
|
```
|
||||||
|
|
||||||
|
## Custom certificates {#sec-citrix-custom-certs}
|
||||||
|
|
||||||
|
The `Citrix Workspace App` in `nixpkgs` trusts several certificates [from the Mozilla database](https://curl.haxx.se/docs/caextract.html) by default. However several companies using Citrix might require their own corporate certificate. On distros with imperative packaging these certs can be stored easily in [`$ICAROOT`](https://developer-docs.citrix.com/projects/receiver-for-linux-command-reference/en/13.7/), however this directory is a store path in `nixpkgs`. In order to work around this issue the package provides a simple mechanism to add custom certificates without rebuilding the entire package using `symlinkJoin`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
with import <nixpkgs> { config.allowUnfree = true; };
|
||||||
|
let
|
||||||
|
extraCerts = [
|
||||||
|
./custom-cert-1.pem
|
||||||
|
./custom-cert-2.pem # ...
|
||||||
|
];
|
||||||
|
in citrix_workspace.override { inherit extraCerts; }
|
||||||
|
```
|
|
@ -1,48 +0,0 @@
|
||||||
<section xmlns="http://docbook.org/ns/docbook"
|
|
||||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
|
||||||
xml:id="sec-citrix">
|
|
||||||
<title>Citrix Workspace</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The <link xlink:href="https://www.citrix.com/products/workspace-app/">Citrix Workspace App</link> is a remote desktop viewer which provides access to <link xlink:href="https://www.citrix.com/products/xenapp-xendesktop/">XenDesktop</link> installations.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<section xml:id="sec-citrix-base">
|
|
||||||
<title>Basic usage</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The tarball archive needs to be downloaded manually as the license agreements of the vendor for <link xlink:href="https://www.citrix.de/downloads/workspace-app/linux/workspace-app-for-linux-latest.html">Citrix Workspace</link> needs to be accepted first. Then run <command>nix-prefetch-url file://$PWD/linuxx64-$version.tar.gz</command>. With the archive available in the store the package can be built and installed with Nix.
|
|
||||||
</para>
|
|
||||||
</section>
|
|
||||||
|
|
||||||
<section xml:id="sec-citrix-selfservice">
|
|
||||||
<title>Citrix Selfservice</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The <link xlink:href="https://support.citrix.com/article/CTX200337">selfservice</link> is an application managing Citrix desktops and applications. Please note that this feature only works with at least <package>citrix_workspace_20_06_0</package> and later versions.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
In order to set this up, you first have to <link xlink:href="https://its.uiowa.edu/support/article/102186">download the <literal>.cr</literal> file from the Netscaler Gateway</link>. After that you can configure the <command>selfservice</command> like this:
|
|
||||||
<screen>
|
|
||||||
<prompt>$ </prompt>storebrowse -C ~/Downloads/receiverconfig.cr
|
|
||||||
<prompt>$ </prompt>selfservice
|
|
||||||
</screen>
|
|
||||||
</para>
|
|
||||||
</section>
|
|
||||||
|
|
||||||
<section xml:id="sec-citrix-custom-certs">
|
|
||||||
<title>Custom certificates</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The <literal>Citrix Workspace App</literal> in <literal>nixpkgs</literal> trusts several certificates <link xlink:href="https://curl.haxx.se/docs/caextract.html">from the Mozilla database</link> by default. However several companies using Citrix might require their own corporate certificate. On distros with imperative packaging these certs can be stored easily in <link xlink:href="https://developer-docs.citrix.com/projects/receiver-for-linux-command-reference/en/13.7/"><literal>$ICAROOT</literal></link>, however this directory is a store path in <literal>nixpkgs</literal>. In order to work around this issue the package provides a simple mechanism to add custom certificates without rebuilding the entire package using <literal>symlinkJoin</literal>:
|
|
||||||
<programlisting>
|
|
||||||
<![CDATA[with import <nixpkgs> { config.allowUnfree = true; };
|
|
||||||
let extraCerts = [ ./custom-cert-1.pem ./custom-cert-2.pem /* ... */ ]; in
|
|
||||||
citrix_workspace.override {
|
|
||||||
inherit extraCerts;
|
|
||||||
}]]>
|
|
||||||
</programlisting>
|
|
||||||
</para>
|
|
||||||
</section>
|
|
||||||
</section>
|
|
|
@ -5,7 +5,7 @@
|
||||||
<para>
|
<para>
|
||||||
This chapter contains information about how to use and maintain the Nix expressions for a number of specific packages, such as the Linux kernel or X.org.
|
This chapter contains information about how to use and maintain the Nix expressions for a number of specific packages, such as the Linux kernel or X.org.
|
||||||
</para>
|
</para>
|
||||||
<xi:include href="citrix.xml" />
|
<xi:include href="citrix.section.xml" />
|
||||||
<xi:include href="dlib.xml" />
|
<xi:include href="dlib.xml" />
|
||||||
<xi:include href="eclipse.section.xml" />
|
<xi:include href="eclipse.section.xml" />
|
||||||
<xi:include href="elm.section.xml" />
|
<xi:include href="elm.section.xml" />
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
## How to use Agda
|
## How to use Agda
|
||||||
|
|
||||||
Agda can be installed from `agda`:
|
Agda can be installed from `agda`:
|
||||||
```
|
```ShellSession
|
||||||
$ nix-env -iA agda
|
$ nix-env -iA agda
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -15,13 +15,13 @@ To use Agda with libraries, the `agda.withPackages` function can be used. This f
|
||||||
|
|
||||||
For example, suppose we wanted a version of Agda which has access to the standard library. This can be obtained with the expressions:
|
For example, suppose we wanted a version of Agda which has access to the standard library. This can be obtained with the expressions:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
agda.withPackages [ agdaPackages.standard-library ]
|
agda.withPackages [ agdaPackages.standard-library ]
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
```
|
```nix
|
||||||
agda.withPackages (p: [ p.standard-library ])
|
agda.withPackages (p: [ p.standard-library ])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ If you want to use a library in your home directory (for instance if it is a dev
|
||||||
Agda will not by default use these libraries. To tell Agda to use the library we have some options:
|
Agda will not by default use these libraries. To tell Agda to use the library we have some options:
|
||||||
|
|
||||||
* Call `agda` with the library flag:
|
* Call `agda` with the library flag:
|
||||||
```
|
```ShellSession
|
||||||
$ agda -l standard-library -i . MyFile.agda
|
$ agda -l standard-library -i . MyFile.agda
|
||||||
```
|
```
|
||||||
* Write a `my-library.agda-lib` file for the project you are working on which may look like:
|
* Write a `my-library.agda-lib` file for the project you are working on which may look like:
|
||||||
|
@ -49,7 +49,7 @@ More information can be found in the [official Agda documentation on library man
|
||||||
Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee754` is made available to the Agda program via the `--with-compiler` flag.
|
Agda modules can be compiled with the `--compile` flag. A version of `ghc` with `ieee754` is made available to the Agda program via the `--with-compiler` flag.
|
||||||
This can be overridden by a different version of `ghc` as follows:
|
This can be overridden by a different version of `ghc` as follows:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
agda.withPackages {
|
agda.withPackages {
|
||||||
pkgs = [ ... ];
|
pkgs = [ ... ];
|
||||||
ghc = haskell.compiler.ghcHEAD;
|
ghc = haskell.compiler.ghcHEAD;
|
||||||
|
@ -80,12 +80,12 @@ By default, Agda sources are files ending on `.agda`, or literate Agda files end
|
||||||
## Adding Agda packages to Nixpkgs
|
## Adding Agda packages to Nixpkgs
|
||||||
|
|
||||||
To add an Agda package to `nixpkgs`, the derivation should be written to `pkgs/development/libraries/agda/${library-name}/` and an entry should be added to `pkgs/top-level/agda-packages.nix`. Here it is called in a scope with access to all other Agda libraries, so the top line of the `default.nix` can look like:
|
To add an Agda package to `nixpkgs`, the derivation should be written to `pkgs/development/libraries/agda/${library-name}/` and an entry should be added to `pkgs/top-level/agda-packages.nix`. Here it is called in a scope with access to all other Agda libraries, so the top line of the `default.nix` can look like:
|
||||||
```
|
```nix
|
||||||
{ mkDerivation, standard-library, fetchFromGitHub }:
|
{ mkDerivation, standard-library, fetchFromGitHub }:
|
||||||
```
|
```
|
||||||
and `mkDerivation` should be called instead of `agdaPackages.mkDerivation`. Here is an example skeleton derivation for iowa-stdlib:
|
and `mkDerivation` should be called instead of `agdaPackages.mkDerivation`. Here is an example skeleton derivation for iowa-stdlib:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
mkDerivation {
|
mkDerivation {
|
||||||
version = "1.5.0";
|
version = "1.5.0";
|
||||||
pname = "iowa-stdlib";
|
pname = "iowa-stdlib";
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
For local development, it's recommended to use nix-shell to create a dotnet environment:
|
For local development, it's recommended to use nix-shell to create a dotnet environment:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
# shell.nix
|
# shell.nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ mkShell {
|
||||||
|
|
||||||
It's very likely that more than one sdk will be needed on a given project. Dotnet provides several different frameworks (E.g dotnetcore, aspnetcore, etc.) as well as many versions for a given framework. Normally, dotnet is able to fetch a framework and install it relative to the executable. However, this would mean writing to the nix store in nixpkgs, which is read-only. To support the many-sdk use case, one can compose an environment using `dotnetCorePackages.combinePackages`:
|
It's very likely that more than one sdk will be needed on a given project. Dotnet provides several different frameworks (E.g dotnetcore, aspnetcore, etc.) as well as many versions for a given framework. Normally, dotnet is able to fetch a framework and install it relative to the executable. However, this would mean writing to the nix store in nixpkgs, which is read-only. To support the many-sdk use case, one can compose an environment using `dotnetCorePackages.combinePackages`:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
|
|
||||||
mkShell {
|
mkShell {
|
||||||
|
@ -37,7 +37,7 @@ mkShell {
|
||||||
|
|
||||||
This will produce a dotnet installation that has the dotnet 3.1, 3.0, and 2.1 sdk. The first sdk listed will have it's cli utility present in the resulting environment. Example info output:
|
This will produce a dotnet installation that has the dotnet 3.1, 3.0, and 2.1 sdk. The first sdk listed will have it's cli utility present in the resulting environment. Example info output:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ dotnet --info
|
$ dotnet --info
|
||||||
.NET Core SDK (reflecting any global.json):
|
.NET Core SDK (reflecting any global.json):
|
||||||
Version: 3.1.101
|
Version: 3.1.101
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
The easiest way to get a working idris version is to install the `idris` attribute:
|
The easiest way to get a working idris version is to install the `idris` attribute:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ # On NixOS
|
$ # On NixOS
|
||||||
$ nix-env -i nixos.idris
|
$ nix-env -i nixos.idris
|
||||||
$ # On non-NixOS
|
$ # On non-NixOS
|
||||||
|
@ -21,7 +21,7 @@ self: super: {
|
||||||
|
|
||||||
And then:
|
And then:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ # On NixOS
|
$ # On NixOS
|
||||||
$ nix-env -iA nixos.myIdris
|
$ nix-env -iA nixos.myIdris
|
||||||
$ # On non-NixOS
|
$ # On non-NixOS
|
||||||
|
@ -29,7 +29,7 @@ $ nix-env -iA nixpkgs.myIdris
|
||||||
```
|
```
|
||||||
|
|
||||||
To see all available Idris packages:
|
To see all available Idris packages:
|
||||||
```
|
```ShellSesssion
|
||||||
$ # On NixOS
|
$ # On NixOS
|
||||||
$ nix-env -qaPA nixos.idrisPackages
|
$ nix-env -qaPA nixos.idrisPackages
|
||||||
$ # On non-NixOS
|
$ # On non-NixOS
|
||||||
|
@ -37,7 +37,7 @@ $ nix-env -qaPA nixpkgs.idrisPackages
|
||||||
```
|
```
|
||||||
|
|
||||||
Similarly, entering a `nix-shell`:
|
Similarly, entering a `nix-shell`:
|
||||||
```
|
```ShellSesssion
|
||||||
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -45,14 +45,14 @@ $ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruvi
|
||||||
|
|
||||||
To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
|
To have access to these libraries in idris, call it with an argument `-p <library name>` for each library:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
$ nix-shell -p 'idrisPackages.with-packages (with idrisPackages; [ contrib pruviloj ])'
|
||||||
[nix-shell:~]$ idris -p contrib -p pruviloj
|
[nix-shell:~]$ idris -p contrib -p pruviloj
|
||||||
```
|
```
|
||||||
|
|
||||||
A listing of all available packages the Idris binary has access to is available via `--listlibs`:
|
A listing of all available packages the Idris binary has access to is available via `--listlibs`:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ idris --listlibs
|
$ idris --listlibs
|
||||||
00prelude-idx.ibc
|
00prelude-idx.ibc
|
||||||
pruviloj
|
pruviloj
|
||||||
|
@ -105,7 +105,7 @@ build-idris-package {
|
||||||
|
|
||||||
Assuming this file is saved as `yaml.nix`, it's buildable using
|
Assuming this file is saved as `yaml.nix`, it's buildable using
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
|
$ nix-build -E '(import <nixpkgs> {}).idrisPackages.callPackage ./yaml.nix {}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ with import <nixpkgs> {};
|
||||||
|
|
||||||
in another file (say `default.nix`) to be able to build it with
|
in another file (say `default.nix`) to be able to build it with
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
$ nix-build -A yaml
|
$ nix-build -A yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ Specifically, you can set `idrisBuildOptions`, `idrisTestOptions`, `idrisInstall
|
||||||
|
|
||||||
For example you could set
|
For example you could set
|
||||||
|
|
||||||
```
|
```nix
|
||||||
build-idris-package {
|
build-idris-package {
|
||||||
idrisBuildOptions = [ "--log" "1" "--verbose" ]
|
idrisBuildOptions = [ "--log" "1" "--verbose" ]
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,9 @@
|
||||||
#### Overview
|
#### Overview
|
||||||
|
|
||||||
Several versions of the Python interpreter are available on Nix, as well as a
|
Several versions of the Python interpreter are available on Nix, as well as a
|
||||||
high amount of packages. The attribute `python` refers to the default
|
high amount of packages. The attribute `python3` refers to the default
|
||||||
interpreter, which is currently CPython 2.7. It is also possible to refer to
|
interpreter, which is currently CPython 3.8. The attribute `python` refers to
|
||||||
|
CPython 2.7 for backwards-compatibility. It is also possible to refer to
|
||||||
specific versions, e.g. `python38` refers to CPython 3.8, and `pypy` refers to
|
specific versions, e.g. `python38` refers to CPython 3.8, and `pypy` refers to
|
||||||
the default PyPy interpreter.
|
the default PyPy interpreter.
|
||||||
|
|
||||||
|
@ -78,7 +79,7 @@ $ nix-shell -p 'python38.withPackages(ps: with ps; [ numpy toolz ])'
|
||||||
By default `nix-shell` will start a `bash` session with this interpreter in our
|
By default `nix-shell` will start a `bash` session with this interpreter in our
|
||||||
`PATH`, so if we then run:
|
`PATH`, so if we then run:
|
||||||
|
|
||||||
```
|
```Python console
|
||||||
[nix-shell:~/src/nixpkgs]$ python3
|
[nix-shell:~/src/nixpkgs]$ python3
|
||||||
Python 3.8.1 (default, Dec 18 2019, 19:06:26)
|
Python 3.8.1 (default, Dec 18 2019, 19:06:26)
|
||||||
[GCC 9.2.0] on linux
|
[GCC 9.2.0] on linux
|
||||||
|
@ -89,7 +90,7 @@ Type "help", "copyright", "credits" or "license" for more information.
|
||||||
Note that no other modules are in scope, even if they were imperatively
|
Note that no other modules are in scope, even if they were imperatively
|
||||||
installed into our user environment as a dependency of a Python application:
|
installed into our user environment as a dependency of a Python application:
|
||||||
|
|
||||||
```
|
```Python console
|
||||||
>>> import requests
|
>>> import requests
|
||||||
Traceback (most recent call last):
|
Traceback (most recent call last):
|
||||||
File "<stdin>", line 1, in <module>
|
File "<stdin>", line 1, in <module>
|
||||||
|
@ -145,8 +146,8 @@ print(f"The dot product of {a} and {b} is: {np.dot(a, b)}")
|
||||||
Executing this script requires a `python3` that has `numpy`. Using what we learned
|
Executing this script requires a `python3` that has `numpy`. Using what we learned
|
||||||
in the previous section, we could startup a shell and just run it like so:
|
in the previous section, we could startup a shell and just run it like so:
|
||||||
|
|
||||||
```
|
```ShellSesssion
|
||||||
nix-shell -p 'python38.withPackages(ps: with ps; [ numpy ])' --run 'python3 foo.py'
|
$ nix-shell -p 'python38.withPackages(ps: with ps; [ numpy ])' --run 'python3 foo.py'
|
||||||
The dot product of [1 2] and [3 4] is: 11
|
The dot product of [1 2] and [3 4] is: 11
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -762,10 +763,10 @@ and in this case the `python38` interpreter is automatically used.
|
||||||
Versions 2.7, 3.6, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
|
Versions 2.7, 3.6, 3.7, 3.8 and 3.9 of the CPython interpreter are available as
|
||||||
respectively `python27`, `python36`, `python37`, `python38` and `python39`. The
|
respectively `python27`, `python36`, `python37`, `python38` and `python39`. The
|
||||||
aliases `python2` and `python3` correspond to respectively `python27` and
|
aliases `python2` and `python3` correspond to respectively `python27` and
|
||||||
`python38`. The default interpreter, `python`, maps to `python2`. The PyPy
|
`python39`. The attribute `python` maps to `python2`. The PyPy interpreters
|
||||||
interpreters compatible with Python 2.7 and 3 are available as `pypy27` and
|
compatible with Python 2.7 and 3 are available as `pypy27` and `pypy3`, with
|
||||||
`pypy3`, with aliases `pypy2` mapping to `pypy27` and `pypy` mapping to `pypy2`.
|
aliases `pypy2` mapping to `pypy27` and `pypy` mapping to `pypy2`. The Nix
|
||||||
The Nix expressions for the interpreters can be found in
|
expressions for the interpreters can be found in
|
||||||
`pkgs/development/interpreters/python`.
|
`pkgs/development/interpreters/python`.
|
||||||
|
|
||||||
All packages depending on any Python interpreter get appended
|
All packages depending on any Python interpreter get appended
|
||||||
|
@ -788,6 +789,23 @@ Each interpreter has the following attributes:
|
||||||
- `executable`. Name of the interpreter executable, e.g. `python3.8`.
|
- `executable`. Name of the interpreter executable, e.g. `python3.8`.
|
||||||
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
|
- `pkgs`. Set of Python packages for that specific interpreter. The package set can be modified by overriding the interpreter and passing `packageOverrides`.
|
||||||
|
|
||||||
|
### Optimizations
|
||||||
|
|
||||||
|
The Python interpreters are by default not build with optimizations enabled, because
|
||||||
|
the builds are in that case not reproducible. To enable optimizations, override the
|
||||||
|
interpreter of interest, e.g using
|
||||||
|
|
||||||
|
```
|
||||||
|
let
|
||||||
|
pkgs = import ./. {};
|
||||||
|
mypython = pkgs.python3.override {
|
||||||
|
enableOptimizations = true;
|
||||||
|
reproducibleBuild = false;
|
||||||
|
self = mypython;
|
||||||
|
};
|
||||||
|
in mypython
|
||||||
|
```
|
||||||
|
|
||||||
### Building packages and applications
|
### Building packages and applications
|
||||||
|
|
||||||
Python libraries and applications that use `setuptools` or
|
Python libraries and applications that use `setuptools` or
|
||||||
|
|
|
@ -103,7 +103,7 @@ supported Qt version.
|
||||||
### Example adding a Qt library {#qt-library-all-packages-nix}
|
### Example adding a Qt library {#qt-library-all-packages-nix}
|
||||||
|
|
||||||
The following represents the contents of `qt5-packages.nix`.
|
The following represents the contents of `qt5-packages.nix`.
|
||||||
```
|
```nix
|
||||||
{
|
{
|
||||||
# ...
|
# ...
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ to select the Qt 5 version used for the application.
|
||||||
### Example adding a Qt application {#qt-application-all-packages-nix}
|
### Example adding a Qt application {#qt-application-all-packages-nix}
|
||||||
|
|
||||||
The following represents the contents of `qt5-packages.nix`.
|
The following represents the contents of `qt5-packages.nix`.
|
||||||
```
|
```nix
|
||||||
{
|
{
|
||||||
# ...
|
# ...
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ The following represents the contents of `qt5-packages.nix`.
|
||||||
```
|
```
|
||||||
|
|
||||||
The following represents the contents of `all-packages.nix`.
|
The following represents the contents of `all-packages.nix`.
|
||||||
```
|
```nix
|
||||||
{
|
{
|
||||||
# ...
|
# ...
|
||||||
|
|
||||||
|
|
|
@ -2,13 +2,14 @@
|
||||||
|
|
||||||
To install the rust compiler and cargo put
|
To install the rust compiler and cargo put
|
||||||
|
|
||||||
```
|
```nix
|
||||||
rustc
|
environment.systemPackages = [
|
||||||
cargo
|
rustc
|
||||||
|
cargo
|
||||||
|
];
|
||||||
```
|
```
|
||||||
|
|
||||||
into the `environment.systemPackages` or bring them into
|
into your `configuration.nix` or bring them into scope with `nix-shell -p rustc cargo`.
|
||||||
scope with `nix-shell -p rustc cargo`.
|
|
||||||
|
|
||||||
For other versions such as daily builds (beta and nightly),
|
For other versions such as daily builds (beta and nightly),
|
||||||
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
|
use either `rustup` from nixpkgs (which will manage the rust installation in your home directory),
|
||||||
|
@ -18,7 +19,7 @@ or use Mozilla's [Rust nightlies overlay](#using-the-rust-nightlies-overlay).
|
||||||
|
|
||||||
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
|
Rust applications are packaged by using the `buildRustPackage` helper from `rustPlatform`:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
{ lib, rustPlatform }:
|
{ lib, rustPlatform }:
|
||||||
|
|
||||||
rustPlatform.buildRustPackage rec {
|
rustPlatform.buildRustPackage rec {
|
||||||
|
@ -49,7 +50,7 @@ package. `cargoHash256` is used for traditional Nix SHA-256 hashes,
|
||||||
such as the one in the example above. `cargoHash` should instead be
|
such as the one in the example above. `cargoHash` should instead be
|
||||||
used for [SRI](https://www.w3.org/TR/SRI/) hashes. For example:
|
used for [SRI](https://www.w3.org/TR/SRI/) hashes. For example:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
|
cargoHash = "sha256-l1vL2ZdtDRxSGvP0X/l3nMw8+6WF67KPutJEzUROjg8=";
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -59,13 +60,13 @@ expression and building the package once. The correct checksum can
|
||||||
then be taken from the failed build. A fake hash can be used for
|
then be taken from the failed build. A fake hash can be used for
|
||||||
`cargoSha256` as follows:
|
`cargoSha256` as follows:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
cargoSha256 = lib.fakeSha256;
|
cargoSha256 = lib.fakeSha256;
|
||||||
```
|
```
|
||||||
|
|
||||||
For `cargoHash` you can use:
|
For `cargoHash` you can use:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
cargoHash = lib.fakeHash;
|
cargoHash = lib.fakeHash;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -262,7 +263,7 @@ Otherwise, some steps may fail because of the modified directory structure of `t
|
||||||
source code in a reproducible way. If it is missing or out-of-date one can use
|
source code in a reproducible way. If it is missing or out-of-date one can use
|
||||||
the `cargoPatches` attribute to update or add it.
|
the `cargoPatches` attribute to update or add it.
|
||||||
|
|
||||||
```
|
```nix
|
||||||
rustPlatform.buildRustPackage rec {
|
rustPlatform.buildRustPackage rec {
|
||||||
(...)
|
(...)
|
||||||
cargoPatches = [
|
cargoPatches = [
|
||||||
|
@ -489,7 +490,7 @@ an example for a minimal `hello` crate:
|
||||||
|
|
||||||
Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
|
Now, the file produced by the call to `carnix`, called `hello.nix`, looks like:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
|
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
|
||||||
{ stdenv, buildRustCrate, fetchgit }:
|
{ stdenv, buildRustCrate, fetchgit }:
|
||||||
let kernel = stdenv.buildPlatform.parsed.kernel.name;
|
let kernel = stdenv.buildPlatform.parsed.kernel.name;
|
||||||
|
@ -518,7 +519,7 @@ dependencies, for instance by adding a single line `libc="*"` to our
|
||||||
`Cargo.lock`. Then, `carnix` needs to be run again, and produces the
|
`Cargo.lock`. Then, `carnix` needs to be run again, and produces the
|
||||||
following nix file:
|
following nix file:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
|
# Generated by carnix 0.6.5: carnix -o hello.nix --src ./. Cargo.lock --standalone
|
||||||
{ stdenv, buildRustCrate, fetchgit }:
|
{ stdenv, buildRustCrate, fetchgit }:
|
||||||
let kernel = stdenv.buildPlatform.parsed.kernel.name;
|
let kernel = stdenv.buildPlatform.parsed.kernel.name;
|
||||||
|
@ -573,7 +574,7 @@ Some crates require external libraries. For crates from
|
||||||
Starting from that file, one can add more overrides, to add features
|
Starting from that file, one can add more overrides, to add features
|
||||||
or build inputs by overriding the hello crate in a seperate file.
|
or build inputs by overriding the hello crate in a seperate file.
|
||||||
|
|
||||||
```
|
```nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
((import ./hello.nix).hello {}).override {
|
((import ./hello.nix).hello {}).override {
|
||||||
crateOverrides = defaultCrateOverrides // {
|
crateOverrides = defaultCrateOverrides // {
|
||||||
|
@ -593,7 +594,7 @@ derivation depend on the crate's version, the `attrs` argument of
|
||||||
the override above can be read, as in the following example, which
|
the override above can be read, as in the following example, which
|
||||||
patches the derivation:
|
patches the derivation:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
((import ./hello.nix).hello {}).override {
|
((import ./hello.nix).hello {}).override {
|
||||||
crateOverrides = defaultCrateOverrides // {
|
crateOverrides = defaultCrateOverrides // {
|
||||||
|
@ -614,7 +615,7 @@ dependencies. For instance, to override the build inputs for crate
|
||||||
`libc` in the example above, where `libc` is a dependency of the main
|
`libc` in the example above, where `libc` is a dependency of the main
|
||||||
crate, we could do:
|
crate, we could do:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
((import hello.nix).hello {}).override {
|
((import hello.nix).hello {}).override {
|
||||||
crateOverrides = defaultCrateOverrides // {
|
crateOverrides = defaultCrateOverrides // {
|
||||||
|
@ -630,27 +631,27 @@ general. A number of other parameters can be overridden:
|
||||||
|
|
||||||
- The version of rustc used to compile the crate:
|
- The version of rustc used to compile the crate:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(hello {}).override { rust = pkgs.rust; };
|
(hello {}).override { rust = pkgs.rust; };
|
||||||
```
|
```
|
||||||
|
|
||||||
- Whether to build in release mode or debug mode (release mode by
|
- Whether to build in release mode or debug mode (release mode by
|
||||||
default):
|
default):
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(hello {}).override { release = false; };
|
(hello {}).override { release = false; };
|
||||||
```
|
```
|
||||||
|
|
||||||
- Whether to print the commands sent to rustc when building
|
- Whether to print the commands sent to rustc when building
|
||||||
(equivalent to `--verbose` in cargo:
|
(equivalent to `--verbose` in cargo:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(hello {}).override { verbose = false; };
|
(hello {}).override { verbose = false; };
|
||||||
```
|
```
|
||||||
|
|
||||||
- Extra arguments to be passed to `rustc`:
|
- Extra arguments to be passed to `rustc`:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
|
(hello {}).override { extraRustcOpts = "-Z debuginfo=2"; };
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -662,7 +663,7 @@ general. A number of other parameters can be overridden:
|
||||||
`postInstall`. As an example, here is how to create a new module
|
`postInstall`. As an example, here is how to create a new module
|
||||||
before running the build script:
|
before running the build script:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(hello {}).override {
|
(hello {}).override {
|
||||||
preConfigure = ''
|
preConfigure = ''
|
||||||
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
|
echo "pub const PATH=\"${hi.out}\";" >> src/path.rs"
|
||||||
|
@ -676,7 +677,7 @@ One can also supply features switches. For example, if we want to
|
||||||
compile `diesel_cli` only with the `postgres` feature, and no default
|
compile `diesel_cli` only with the `postgres` feature, and no default
|
||||||
features, we would write:
|
features, we would write:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
(callPackage ./diesel.nix {}).diesel {
|
(callPackage ./diesel.nix {}).diesel {
|
||||||
default = false;
|
default = false;
|
||||||
postgres = true;
|
postgres = true;
|
||||||
|
@ -699,7 +700,7 @@ Using the example `hello` project above, we want to do the following:
|
||||||
|
|
||||||
A typical `shell.nix` might look like:
|
A typical `shell.nix` might look like:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
|
|
||||||
stdenv.mkDerivation {
|
stdenv.mkDerivation {
|
||||||
|
@ -721,7 +722,7 @@ stdenv.mkDerivation {
|
||||||
```
|
```
|
||||||
|
|
||||||
You should now be able to run the following:
|
You should now be able to run the following:
|
||||||
```
|
```ShellSesssion
|
||||||
$ nix-shell --pure
|
$ nix-shell --pure
|
||||||
$ cargo build
|
$ cargo build
|
||||||
$ cargo test
|
$ cargo test
|
||||||
|
@ -731,7 +732,7 @@ $ cargo test
|
||||||
To control your rust version (i.e. use nightly) from within `shell.nix` (or
|
To control your rust version (i.e. use nightly) from within `shell.nix` (or
|
||||||
other nix expressions) you can use the following `shell.nix`
|
other nix expressions) you can use the following `shell.nix`
|
||||||
|
|
||||||
```
|
```nix
|
||||||
# Latest Nightly
|
# Latest Nightly
|
||||||
with import <nixpkgs> {};
|
with import <nixpkgs> {};
|
||||||
let src = fetchFromGitHub {
|
let src = fetchFromGitHub {
|
||||||
|
@ -759,7 +760,7 @@ stdenv.mkDerivation {
|
||||||
```
|
```
|
||||||
|
|
||||||
Now run:
|
Now run:
|
||||||
```
|
```ShellSession
|
||||||
$ rustc --version
|
$ rustc --version
|
||||||
rustc 1.26.0-nightly (188e693b3 2018-03-26)
|
rustc 1.26.0-nightly (188e693b3 2018-03-26)
|
||||||
```
|
```
|
||||||
|
@ -794,7 +795,7 @@ in the `~/.config/nixpkgs/overlays` directory.
|
||||||
|
|
||||||
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
|
Add the following to your `configuration.nix`, `home-configuration.nix`, `shell.nix`, or similar:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
{ pkgs ? import <nixpkgs> {
|
{ pkgs ? import <nixpkgs> {
|
||||||
overlays = [
|
overlays = [
|
||||||
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
|
(import (builtins.fetchTarball https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz))
|
||||||
|
|
|
@ -156,7 +156,7 @@ assuming that "using latest version" is ok most of the time.
|
||||||
|
|
||||||
First create a vim-scripts file having one plugin name per line. Example:
|
First create a vim-scripts file having one plugin name per line. Example:
|
||||||
|
|
||||||
```
|
```vim
|
||||||
"tlib"
|
"tlib"
|
||||||
{'name': 'vim-addon-sql'}
|
{'name': 'vim-addon-sql'}
|
||||||
{'filetype_regex': '\%(vim)$', 'names': ['reload', 'vim-dev-plugin']}
|
{'filetype_regex': '\%(vim)$', 'names': ['reload', 'vim-dev-plugin']}
|
||||||
|
@ -197,7 +197,7 @@ nix-shell -p vimUtils.vim_with_vim2nix --command "vim -c 'source generate.vim'"
|
||||||
You should get a Vim buffer with the nix derivations (output1) and vam.pluginDictionaries (output2).
|
You should get a Vim buffer with the nix derivations (output1) and vam.pluginDictionaries (output2).
|
||||||
You can add your Vim to your system's configuration file like this and start it by "vim-my":
|
You can add your Vim to your system's configuration file like this and start it by "vim-my":
|
||||||
|
|
||||||
```
|
```nix
|
||||||
my-vim =
|
my-vim =
|
||||||
let plugins = let inherit (vimUtils) buildVimPluginFrom2Nix; in {
|
let plugins = let inherit (vimUtils) buildVimPluginFrom2Nix; in {
|
||||||
copy paste output1 here
|
copy paste output1 here
|
||||||
|
@ -217,7 +217,7 @@ my-vim =
|
||||||
|
|
||||||
Sample output1:
|
Sample output1:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
|
"reload" = buildVimPluginFrom2Nix { # created by nix#NixDerivation
|
||||||
name = "reload";
|
name = "reload";
|
||||||
src = fetchgit {
|
src = fetchgit {
|
||||||
|
@ -248,7 +248,7 @@ Nix expressions for Vim plugins are stored in [pkgs/misc/vim-plugins](/pkgs/misc
|
||||||
|
|
||||||
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](/pkgs/misc/vim-plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
|
Some plugins require overrides in order to function properly. Overrides are placed in [overrides.nix](/pkgs/misc/vim-plugins/overrides.nix). Overrides are most often required when a plugin requires some dependencies, or extra steps are required during the build process. For example `deoplete-fish` requires both `deoplete-nvim` and `vim-fish`, and so the following override was added:
|
||||||
|
|
||||||
```
|
```nix
|
||||||
deoplete-fish = super.deoplete-fish.overrideAttrs(old: {
|
deoplete-fish = super.deoplete-fish.overrideAttrs(old: {
|
||||||
dependencies = with super; [ deoplete-nvim vim-fish ];
|
dependencies = with super; [ deoplete-nvim vim-fish ];
|
||||||
});
|
});
|
||||||
|
|
|
@ -190,7 +190,7 @@ If one imagines the saturating self references at the end being replaced with in
|
||||||
```
|
```
|
||||||
(native..., native, native, native, foreign, foreign, foreign...)
|
(native..., native, native, native, foreign, foreign, foreign...)
|
||||||
```
|
```
|
||||||
On can then imagine any sequence of platforms such that there are bootstrap stages with their 3 platforms determined by "sliding a window" that is the 3 tuple through the sequence. This was the original model for bootstrapping. Without a target platform (assume a better world where all compilers are multi-target and all standard libraries are built in their own derivation), this is sufficient. Conversely if one wishes to cross compile "faster", with a "Canadian Cross" bootstrapping stage where `build != host != target`, more bootstrapping stages are needed since no sliding window provides the pesky `pkgsBuildTarget` package set since it skips the Canadian cross stage's "host".
|
One can then imagine any sequence of platforms such that there are bootstrap stages with their 3 platforms determined by "sliding a window" that is the 3 tuple through the sequence. This was the original model for bootstrapping. Without a target platform (assume a better world where all compilers are multi-target and all standard libraries are built in their own derivation), this is sufficient. Conversely if one wishes to cross compile "faster", with a "Canadian Cross" bootstrapping stage where `build != host != target`, more bootstrapping stages are needed since no sliding window provides the pesky `pkgsBuildTarget` package set since it skips the Canadian cross stage's "host".
|
||||||
|
|
||||||
|
|
||||||
::: note
|
::: note
|
||||||
|
|
24
third_party/nixpkgs/lib/generators.nix
vendored
24
third_party/nixpkgs/lib/generators.nix
vendored
|
@ -307,4 +307,28 @@ rec {
|
||||||
${expr "" v}
|
${expr "" v}
|
||||||
</plist>'';
|
</plist>'';
|
||||||
|
|
||||||
|
/* Translate a simple Nix expression to Dhall notation.
|
||||||
|
* Note that integers are translated to Integer and never
|
||||||
|
* the Natural type.
|
||||||
|
*/
|
||||||
|
toDhall = { }@args: v:
|
||||||
|
with builtins;
|
||||||
|
let concatItems = lib.strings.concatStringsSep ", ";
|
||||||
|
in if isAttrs v then
|
||||||
|
"{ ${
|
||||||
|
concatItems (lib.attrsets.mapAttrsToList
|
||||||
|
(key: value: "${key} = ${toDhall args value}") v)
|
||||||
|
} }"
|
||||||
|
else if isList v then
|
||||||
|
"[ ${concatItems (map (toDhall args) v)} ]"
|
||||||
|
else if isInt v then
|
||||||
|
"${if v < 0 then "" else "+"}${toString v}"
|
||||||
|
else if isBool v then
|
||||||
|
(if v then "True" else "False")
|
||||||
|
else if isFunction v then
|
||||||
|
abort "generators.toDhall: cannot convert a function to Dhall"
|
||||||
|
else if isNull v then
|
||||||
|
abort "generators.toDhall: cannot convert a null to Dhall"
|
||||||
|
else
|
||||||
|
builtins.toJSON v;
|
||||||
}
|
}
|
||||||
|
|
2
third_party/nixpkgs/lib/trivial.nix
vendored
2
third_party/nixpkgs/lib/trivial.nix
vendored
|
@ -158,7 +158,7 @@ rec {
|
||||||
seq deepSeq genericClosure;
|
seq deepSeq genericClosure;
|
||||||
|
|
||||||
|
|
||||||
## nixpks version strings
|
## nixpkgs version strings
|
||||||
|
|
||||||
/* Returns the current full nixpkgs version number. */
|
/* Returns the current full nixpkgs version number. */
|
||||||
version = release + versionSuffix;
|
version = release + versionSuffix;
|
||||||
|
|
|
@ -984,6 +984,16 @@
|
||||||
githubId = 12128029;
|
githubId = 12128029;
|
||||||
name = "babariviere";
|
name = "babariviere";
|
||||||
};
|
};
|
||||||
|
babbaj = {
|
||||||
|
name = "babbaj";
|
||||||
|
email = "babbaj45@gmail.com";
|
||||||
|
github = "babbaj";
|
||||||
|
githubId = 12820770;
|
||||||
|
keys = [{
|
||||||
|
longkeyid = "rsa4096/0xF044309848A07CAC";
|
||||||
|
fingerprint = "6FBC A462 4EAF C69C A7C4 98C1 F044 3098 48A0 7CAC";
|
||||||
|
}];
|
||||||
|
};
|
||||||
bachp = {
|
bachp = {
|
||||||
email = "pascal.bach@nextrem.ch";
|
email = "pascal.bach@nextrem.ch";
|
||||||
github = "bachp";
|
github = "bachp";
|
||||||
|
@ -1106,6 +1116,12 @@
|
||||||
githubId = 14111;
|
githubId = 14111;
|
||||||
name = "Brandon Dimcheff";
|
name = "Brandon Dimcheff";
|
||||||
};
|
};
|
||||||
|
beardhatcode = {
|
||||||
|
name = "Robbert Gurdeep Singh";
|
||||||
|
email = "nixpkgs@beardhatcode.be";
|
||||||
|
github = "beardhatcode";
|
||||||
|
githubId = 662538;
|
||||||
|
};
|
||||||
bendlas = {
|
bendlas = {
|
||||||
email = "herwig@bendlas.net";
|
email = "herwig@bendlas.net";
|
||||||
github = "bendlas";
|
github = "bendlas";
|
||||||
|
@ -1284,6 +1300,12 @@
|
||||||
githubId = 50839;
|
githubId = 50839;
|
||||||
name = "Brian Jones";
|
name = "Brian Jones";
|
||||||
};
|
};
|
||||||
|
bootstrap-prime = {
|
||||||
|
email = "bootstrap.prime@gmail.com";
|
||||||
|
github = "bootstrap-prime";
|
||||||
|
githubId = 68566724;
|
||||||
|
name = "bootstrap-prime";
|
||||||
|
};
|
||||||
commandodev = {
|
commandodev = {
|
||||||
email = "ben@perurbis.com";
|
email = "ben@perurbis.com";
|
||||||
github = "commandodev";
|
github = "commandodev";
|
||||||
|
@ -2349,6 +2371,12 @@
|
||||||
githubId = 15774340;
|
githubId = 15774340;
|
||||||
name = "Thomas Depierre";
|
name = "Thomas Depierre";
|
||||||
};
|
};
|
||||||
|
diegolelis = {
|
||||||
|
email = "diego.o.lelis@gmail.com";
|
||||||
|
github = "diegolelis";
|
||||||
|
githubId = 8404455;
|
||||||
|
name = "Diego Lelis";
|
||||||
|
};
|
||||||
dipinhora = {
|
dipinhora = {
|
||||||
email = "dipinhora+github@gmail.com";
|
email = "dipinhora+github@gmail.com";
|
||||||
github = "dipinhora";
|
github = "dipinhora";
|
||||||
|
@ -2391,6 +2419,12 @@
|
||||||
githubId = 10913120;
|
githubId = 10913120;
|
||||||
name = "Dje4321";
|
name = "Dje4321";
|
||||||
};
|
};
|
||||||
|
djwf = {
|
||||||
|
email = "dave@weller-fahy.com";
|
||||||
|
github = "djwf";
|
||||||
|
githubId = 73162;
|
||||||
|
name = "David J. Weller-Fahy";
|
||||||
|
};
|
||||||
dkabot = {
|
dkabot = {
|
||||||
email = "dkabot@dkabot.com";
|
email = "dkabot@dkabot.com";
|
||||||
github = "dkabot";
|
github = "dkabot";
|
||||||
|
@ -3609,6 +3643,12 @@
|
||||||
githubId = 76716;
|
githubId = 76716;
|
||||||
name = "Graham Christensen";
|
name = "Graham Christensen";
|
||||||
};
|
};
|
||||||
|
gravndal = {
|
||||||
|
email = "gaute.ravndal+nixos@gmail.com";
|
||||||
|
github = "gravndal";
|
||||||
|
githubId = 4656860;
|
||||||
|
name = "Gaute Ravndal";
|
||||||
|
};
|
||||||
grburst = {
|
grburst = {
|
||||||
email = "GRBurst@protonmail.com";
|
email = "GRBurst@protonmail.com";
|
||||||
github = "GRBurst";
|
github = "GRBurst";
|
||||||
|
@ -4978,6 +5018,12 @@
|
||||||
githubId = 16481032;
|
githubId = 16481032;
|
||||||
name = "Kiba Fox";
|
name = "Kiba Fox";
|
||||||
};
|
};
|
||||||
|
kidd = {
|
||||||
|
email = "raimonster@gmail.com";
|
||||||
|
github = "kidd";
|
||||||
|
githubId = 25607;
|
||||||
|
name = "Raimon Grau";
|
||||||
|
};
|
||||||
kierdavis = {
|
kierdavis = {
|
||||||
email = "kierdavis@gmail.com";
|
email = "kierdavis@gmail.com";
|
||||||
github = "kierdavis";
|
github = "kierdavis";
|
||||||
|
@ -5048,6 +5094,12 @@
|
||||||
fingerprint = "8992 44FC D291 5CA2 0A97 802C 156C 88A5 B0A0 4B2A";
|
fingerprint = "8992 44FC D291 5CA2 0A97 802C 156C 88A5 B0A0 4B2A";
|
||||||
}];
|
}];
|
||||||
};
|
};
|
||||||
|
kiyengar = {
|
||||||
|
email = "hello@kiyengar.net";
|
||||||
|
github = "karthikiyengar";
|
||||||
|
githubId = 8260207;
|
||||||
|
name = "Karthik Iyengar";
|
||||||
|
};
|
||||||
kkallio = {
|
kkallio = {
|
||||||
email = "tierpluspluslists@gmail.com";
|
email = "tierpluspluslists@gmail.com";
|
||||||
name = "Karn Kallio";
|
name = "Karn Kallio";
|
||||||
|
@ -6186,6 +6238,12 @@
|
||||||
github = "meutraa";
|
github = "meutraa";
|
||||||
githubId = 68550871;
|
githubId = 68550871;
|
||||||
};
|
};
|
||||||
|
mephistophiles = {
|
||||||
|
email = "mussitantesmortem@gmail.com";
|
||||||
|
name = "Maxim Zhukov";
|
||||||
|
github = "Mephistophiles";
|
||||||
|
githubId = 4850908;
|
||||||
|
};
|
||||||
mfossen = {
|
mfossen = {
|
||||||
email = "msfossen@gmail.com";
|
email = "msfossen@gmail.com";
|
||||||
github = "mfossen";
|
github = "mfossen";
|
||||||
|
@ -7473,6 +7531,12 @@
|
||||||
githubId = 627831;
|
githubId = 627831;
|
||||||
name = "Hoang Xuan Phu";
|
name = "Hoang Xuan Phu";
|
||||||
};
|
};
|
||||||
|
piegames = {
|
||||||
|
name = "piegames";
|
||||||
|
email = "nix@piegames.de";
|
||||||
|
github = "piegamesde";
|
||||||
|
githubId = 14054505;
|
||||||
|
};
|
||||||
pierrechevalier83 = {
|
pierrechevalier83 = {
|
||||||
email = "pierrechevalier83@gmail.com";
|
email = "pierrechevalier83@gmail.com";
|
||||||
github = "pierrechevalier83";
|
github = "pierrechevalier83";
|
||||||
|
@ -9187,6 +9251,12 @@
|
||||||
githubId = 65870;
|
githubId = 65870;
|
||||||
name = "Сухарик";
|
name = "Сухарик";
|
||||||
};
|
};
|
||||||
|
sumnerevans = {
|
||||||
|
email = "me@sumnerevans.com";
|
||||||
|
github = "sumnerevans";
|
||||||
|
githubId = 16734772;
|
||||||
|
name = "Sumner Evans";
|
||||||
|
};
|
||||||
superbo = {
|
superbo = {
|
||||||
email = "supernbo@gmail.com";
|
email = "supernbo@gmail.com";
|
||||||
github = "SuperBo";
|
github = "SuperBo";
|
||||||
|
@ -9779,6 +9849,12 @@
|
||||||
githubId = 27586264;
|
githubId = 27586264;
|
||||||
name = "Tobias Schmidt";
|
name = "Tobias Schmidt";
|
||||||
};
|
};
|
||||||
|
totoroot = {
|
||||||
|
name = "Matthias Thym";
|
||||||
|
email = "git@thym.at";
|
||||||
|
github = "totoroot";
|
||||||
|
githubId = 39650930;
|
||||||
|
};
|
||||||
travisbhartwell = {
|
travisbhartwell = {
|
||||||
email = "nafai@travishartwell.net";
|
email = "nafai@travishartwell.net";
|
||||||
github = "travisbhartwell";
|
github = "travisbhartwell";
|
||||||
|
@ -10374,6 +10450,12 @@
|
||||||
githubId = 1322287;
|
githubId = 1322287;
|
||||||
name = "William O'Hanley";
|
name = "William O'Hanley";
|
||||||
};
|
};
|
||||||
|
woky = {
|
||||||
|
email = "pampu.andrei@pm.me";
|
||||||
|
github = "andreisergiu98";
|
||||||
|
githubId = 11740700;
|
||||||
|
name = "Andrei Pampu";
|
||||||
|
};
|
||||||
wolfangaukang = {
|
wolfangaukang = {
|
||||||
email = "liquid.query960@4wrd.cc";
|
email = "liquid.query960@4wrd.cc";
|
||||||
github = "wolfangaukang";
|
github = "wolfangaukang";
|
||||||
|
@ -10434,6 +10516,12 @@
|
||||||
githubId = 13489144;
|
githubId = 13489144;
|
||||||
name = "Calle Rosenquist";
|
name = "Calle Rosenquist";
|
||||||
};
|
};
|
||||||
|
xdhampus = {
|
||||||
|
name = "Hampus";
|
||||||
|
email = "16954508+xdHampus@users.noreply.github.com";
|
||||||
|
github = "xdHampus";
|
||||||
|
githubId = 16954508;
|
||||||
|
};
|
||||||
xe = {
|
xe = {
|
||||||
email = "me@christine.website";
|
email = "me@christine.website";
|
||||||
github = "Xe";
|
github = "Xe";
|
||||||
|
|
|
@ -1017,6 +1017,14 @@ systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
|
||||||
will have changed.
|
will have changed.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The syntax of the PostgreSQL configuration file is now checked at build
|
||||||
|
time. If your configuration includes a file inaccessible inside the build
|
||||||
|
sandbox, set <varname>services.postgresql.checkConfig</varname> to
|
||||||
|
<literal>false</literal>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
The rkt module has been removed, it was archived by upstream.
|
The rkt module has been removed, it was archived by upstream.
|
||||||
|
|
|
@ -23,6 +23,9 @@
|
||||||
Support is planned until the end of December 2021, handing over to 21.11.
|
Support is planned until the end of December 2021, handing over to 21.11.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>The default Linux kernel was updated to the 5.10 LTS series, coming from the 5.4 LTS series.</para>
|
||||||
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>GNOME desktop environment was upgraded to 3.38, see its <link xlink:href="https://help.gnome.org/misc/release-notes/3.38/">release notes</link>.</para>
|
<para>GNOME desktop environment was upgraded to 3.38, see its <link xlink:href="https://help.gnome.org/misc/release-notes/3.38/">release notes</link>.</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
@ -67,6 +70,12 @@
|
||||||
for the motivation).
|
for the motivation).
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
Python optimizations were disabled again. Builds with optimizations enabled
|
||||||
|
are not reproducible. Optimizations can now be enabled with an option.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
<link xlink:href="https://kodi.tv/">Kodi</link> has been updated to version 19.0 "Matrix". See
|
<link xlink:href="https://kodi.tv/">Kodi</link> has been updated to version 19.0 "Matrix". See
|
||||||
|
@ -235,7 +244,7 @@
|
||||||
</listitem>
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
xfsprogs was update from 4.19 to 5.10. It now enables reflink support by default on filesystem creation.
|
xfsprogs was update from 4.19 to 5.11. It now enables reflink support by default on filesystem creation.
|
||||||
Support for reflinks was added with an experimental status to kernel 4.9 and deemed stable in kernel 4.16.
|
Support for reflinks was added with an experimental status to kernel 4.9 and deemed stable in kernel 4.16.
|
||||||
If you want to be able to mount XFS filesystems created with this release of xfsprogs on kernel releases older than those, you need to format them
|
If you want to be able to mount XFS filesystems created with this release of xfsprogs on kernel releases older than those, you need to format them
|
||||||
with <literal>mkfs.xfs -m reflink=0</literal>.
|
with <literal>mkfs.xfs -m reflink=0</literal>.
|
||||||
|
@ -579,6 +588,11 @@ self: super:
|
||||||
<xref linkend="opt-services.xserver.videoDrivers" /> no longer uses the deprecated <literal>cirrus</literal> and <literal>vesa</literal> device dependent X drivers by default. It also enables both <literal>amdgpu</literal> and <literal>nouveau</literal> drivers by default now.
|
<xref linkend="opt-services.xserver.videoDrivers" /> no longer uses the deprecated <literal>cirrus</literal> and <literal>vesa</literal> device dependent X drivers by default. It also enables both <literal>amdgpu</literal> and <literal>nouveau</literal> drivers by default now.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The <literal>kindlegen</literal> package is gone, because it is no longer supported or hosted by Amazon. Sadly, its replacement, Kindle Previewer, has no Linux support. However, there are other ways to generate MOBI files. See <link xlink:href="https://github.com/NixOS/nixpkgs/issues/96439">the discussion</link> for more info.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>
|
<para>
|
||||||
The <package>apacheKafka</package> packages are now built with
|
The <package>apacheKafka</package> packages are now built with
|
||||||
|
@ -625,6 +639,15 @@ environment.systemPackages = [
|
||||||
</programlisting>
|
</programlisting>
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
<option>environment.defaultPackages</option> now includes the nano package.
|
||||||
|
If <package>pkgs.nano</package> is not added to the list,
|
||||||
|
make sure another editor is installed and the <literal>EDITOR</literal>
|
||||||
|
environment variable is set to it.
|
||||||
|
Environment variables can be set using <option>environment.variables</option>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
@ -883,6 +906,14 @@ environment.systemPackages = [
|
||||||
Please test your setup and container images with containerd prior to upgrading.
|
Please test your setup and container images with containerd prior to upgrading.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
<listitem>
|
||||||
|
<para>
|
||||||
|
The GitLab module now has support for automatic backups. A
|
||||||
|
schedule can be set with the
|
||||||
|
<link linkend="opt-services.gitlab.backup.startAt">services.gitlab.backup.startAt</link>
|
||||||
|
option.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</section>
|
</section>
|
||||||
</section>
|
</section>
|
||||||
|
|
|
@ -29,7 +29,6 @@ let
|
||||||
pkgs.xz
|
pkgs.xz
|
||||||
pkgs.less
|
pkgs.less
|
||||||
pkgs.libcap
|
pkgs.libcap
|
||||||
pkgs.nano
|
|
||||||
pkgs.ncurses
|
pkgs.ncurses
|
||||||
pkgs.netcat
|
pkgs.netcat
|
||||||
config.programs.ssh.package
|
config.programs.ssh.package
|
||||||
|
@ -43,7 +42,8 @@ let
|
||||||
];
|
];
|
||||||
|
|
||||||
defaultPackages = map (pkg: setPrio ((pkg.meta.priority or 5) + 3) pkg)
|
defaultPackages = map (pkg: setPrio ((pkg.meta.priority or 5) + 3) pkg)
|
||||||
[ pkgs.perl
|
[ pkgs.nano
|
||||||
|
pkgs.perl
|
||||||
pkgs.rsync
|
pkgs.rsync
|
||||||
pkgs.strace
|
pkgs.strace
|
||||||
];
|
];
|
||||||
|
@ -75,13 +75,21 @@ in
|
||||||
default = defaultPackages;
|
default = defaultPackages;
|
||||||
example = literalExample "[]";
|
example = literalExample "[]";
|
||||||
description = ''
|
description = ''
|
||||||
Set of packages users expect from a minimal linux istall.
|
Set of default packages that aren't strictly neccessary
|
||||||
Like systemPackages, they appear in
|
for a running system, entries can be removed for a more
|
||||||
/run/current-system/sw. These packages are
|
minimal NixOS installation.
|
||||||
|
|
||||||
|
Note: If <package>pkgs.nano</package> is removed from this list,
|
||||||
|
make sure another editor is installed and the
|
||||||
|
<literal>EDITOR</literal> environment variable is set to it.
|
||||||
|
Environment variables can be set using
|
||||||
|
<option>environment.variables</option>.
|
||||||
|
|
||||||
|
Like with systemPackages, packages are installed to
|
||||||
|
<filename>/run/current-system/sw</filename>. They are
|
||||||
automatically available to all users, and are
|
automatically available to all users, and are
|
||||||
automatically updated every time you rebuild the system
|
automatically updated every time you rebuild the system
|
||||||
configuration.
|
configuration.
|
||||||
If you want a more minimal system, set it to an empty list.
|
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -593,8 +593,8 @@ in {
|
||||||
# password or an SSH authorized key. Privileged accounts are
|
# password or an SSH authorized key. Privileged accounts are
|
||||||
# root and users in the wheel group.
|
# root and users in the wheel group.
|
||||||
assertion = !cfg.mutableUsers ->
|
assertion = !cfg.mutableUsers ->
|
||||||
any id ((mapAttrsToList (name: cfg:
|
any id ((mapAttrsToList (_: cfg:
|
||||||
(name == "root"
|
(cfg.name == "root"
|
||||||
|| cfg.group == "wheel"
|
|| cfg.group == "wheel"
|
||||||
|| elem "wheel" cfg.extraGroups)
|
|| elem "wheel" cfg.extraGroups)
|
||||||
&&
|
&&
|
||||||
|
@ -615,16 +615,16 @@ in {
|
||||||
assertion = (user.hashedPassword != null)
|
assertion = (user.hashedPassword != null)
|
||||||
-> (builtins.match ".*:.*" user.hashedPassword == null);
|
-> (builtins.match ".*:.*" user.hashedPassword == null);
|
||||||
message = ''
|
message = ''
|
||||||
The password hash of user "${name}" contains a ":" character.
|
The password hash of user "${user.name}" contains a ":" character.
|
||||||
This is invalid and would break the login system because the fields
|
This is invalid and would break the login system because the fields
|
||||||
of /etc/shadow (file where hashes are stored) are colon-separated.
|
of /etc/shadow (file where hashes are stored) are colon-separated.
|
||||||
Please check the value of option `users.users."${name}".hashedPassword`.'';
|
Please check the value of option `users.users."${user.name}".hashedPassword`.'';
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
warnings =
|
warnings =
|
||||||
builtins.filter (x: x != null) (
|
builtins.filter (x: x != null) (
|
||||||
flip mapAttrsToList cfg.users (name: user:
|
flip mapAttrsToList cfg.users (_: user:
|
||||||
# This regex matches a subset of the Modular Crypto Format (MCF)[1]
|
# This regex matches a subset of the Modular Crypto Format (MCF)[1]
|
||||||
# informal standard. Since this depends largely on the OS or the
|
# informal standard. Since this depends largely on the OS or the
|
||||||
# specific implementation of crypt(3) we only support the (sane)
|
# specific implementation of crypt(3) we only support the (sane)
|
||||||
|
@ -647,9 +647,9 @@ in {
|
||||||
&& user.hashedPassword != "" # login without password
|
&& user.hashedPassword != "" # login without password
|
||||||
&& builtins.match mcf user.hashedPassword == null)
|
&& builtins.match mcf user.hashedPassword == null)
|
||||||
then ''
|
then ''
|
||||||
The password hash of user "${name}" may be invalid. You must set a
|
The password hash of user "${user.name}" may be invalid. You must set a
|
||||||
valid hash or the user will be locked out of their account. Please
|
valid hash or the user will be locked out of their account. Please
|
||||||
check the value of option `users.users."${name}".hashedPassword`.''
|
check the value of option `users.users."${user.name}".hashedPassword`.''
|
||||||
else null
|
else null
|
||||||
));
|
));
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ in {
|
||||||
rt5677-firmware
|
rt5677-firmware
|
||||||
rtl8723bs-firmware
|
rtl8723bs-firmware
|
||||||
rtl8761b-firmware
|
rtl8761b-firmware
|
||||||
rtlwifi_new-firmware
|
rtw88-firmware
|
||||||
zd1211fw
|
zd1211fw
|
||||||
alsa-firmware
|
alsa-firmware
|
||||||
sof-firmware
|
sof-firmware
|
||||||
|
|
100
third_party/nixpkgs/nixos/modules/hardware/sata.nix
vendored
Normal file
100
third_party/nixpkgs/nixos/modules/hardware/sata.nix
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkEnableOption mkIf mkOption types;
|
||||||
|
|
||||||
|
cfg = config.hardware.sata.timeout;
|
||||||
|
|
||||||
|
buildRule = d:
|
||||||
|
lib.concatStringsSep ", " [
|
||||||
|
''ACTION=="add"''
|
||||||
|
''SUBSYSTEM=="block"''
|
||||||
|
''ENV{ID_${lib.toUpper d.idBy}}=="${d.name}"''
|
||||||
|
''TAG+="systemd"''
|
||||||
|
''ENV{SYSTEMD_WANTS}="${unitName d}"''
|
||||||
|
];
|
||||||
|
|
||||||
|
devicePath = device:
|
||||||
|
"/dev/disk/by-${device.idBy}/${device.name}";
|
||||||
|
|
||||||
|
unitName = device:
|
||||||
|
"sata-timeout-${lib.strings.sanitizeDerivationName device.name}";
|
||||||
|
|
||||||
|
startScript =
|
||||||
|
pkgs.writeShellScript "sata-timeout.sh" ''
|
||||||
|
set -eEuo pipefail
|
||||||
|
|
||||||
|
device="$1"
|
||||||
|
|
||||||
|
${pkgs.smartmontools}/bin/smartctl \
|
||||||
|
-l scterc,${toString cfg.deciSeconds},${toString cfg.deciSeconds} \
|
||||||
|
--quietmode errorsonly \
|
||||||
|
"$device"
|
||||||
|
'';
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
meta.maintainers = with lib.maintainers; [ peterhoeg ];
|
||||||
|
|
||||||
|
options.hardware.sata.timeout = {
|
||||||
|
enable = mkEnableOption "SATA drive timeouts";
|
||||||
|
|
||||||
|
deciSeconds = mkOption {
|
||||||
|
example = "70";
|
||||||
|
type = types.int;
|
||||||
|
description = ''
|
||||||
|
Set SCT Error Recovery Control timeout in deciseconds for use in RAID configurations.
|
||||||
|
|
||||||
|
Values are as follows:
|
||||||
|
0 = disable SCT ERT
|
||||||
|
70 = default in consumer drives (7 seconds)
|
||||||
|
|
||||||
|
Maximum is disk dependant but probably 60 seconds.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
drives = mkOption {
|
||||||
|
description = "List of drives for which to configure the timeout.";
|
||||||
|
type = types.listOf
|
||||||
|
(types.submodule {
|
||||||
|
options = {
|
||||||
|
name = mkOption {
|
||||||
|
description = "Drive name without the full path.";
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
|
||||||
|
idBy = mkOption {
|
||||||
|
description = "The method to identify the drive.";
|
||||||
|
type = types.enum [ "path" "wwn" ];
|
||||||
|
default = "path";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
});
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.udev.extraRules = lib.concatMapStringsSep "\n" buildRule cfg.drives;
|
||||||
|
|
||||||
|
systemd.services = lib.listToAttrs (map
|
||||||
|
(e:
|
||||||
|
lib.nameValuePair (unitName e) {
|
||||||
|
description = "SATA timeout for ${e.name}";
|
||||||
|
wantedBy = [ "sata-timeout.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
ExecStart = "${startScript} '${devicePath e}'";
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateNetwork = true;
|
||||||
|
ProtectHome = "tmpfs";
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
)
|
||||||
|
cfg.drives);
|
||||||
|
|
||||||
|
systemd.targets.sata-timeout = {
|
||||||
|
description = "SATA timeout";
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
29
third_party/nixpkgs/nixos/modules/hardware/ubertooth.nix
vendored
Normal file
29
third_party/nixpkgs/nixos/modules/hardware/ubertooth.nix
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.hardware.ubertooth;
|
||||||
|
|
||||||
|
ubertoothPkg = pkgs.ubertooth.override {
|
||||||
|
udevGroup = cfg.group;
|
||||||
|
};
|
||||||
|
in {
|
||||||
|
options.hardware.ubertooth = {
|
||||||
|
enable = mkEnableOption "Enable the Ubertooth software and its udev rules.";
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "ubertooth";
|
||||||
|
example = "wheel";
|
||||||
|
description = "Group for Ubertooth's udev rules.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
environment.systemPackages = [ ubertoothPkg ];
|
||||||
|
|
||||||
|
services.udev.packages = [ ubertoothPkg ];
|
||||||
|
users.groups.${cfg.group} = {};
|
||||||
|
};
|
||||||
|
}
|
|
@ -67,9 +67,11 @@
|
||||||
./hardware/steam-hardware.nix
|
./hardware/steam-hardware.nix
|
||||||
./hardware/system-76.nix
|
./hardware/system-76.nix
|
||||||
./hardware/tuxedo-keyboard.nix
|
./hardware/tuxedo-keyboard.nix
|
||||||
|
./hardware/ubertooth.nix
|
||||||
./hardware/usb-wwan.nix
|
./hardware/usb-wwan.nix
|
||||||
./hardware/onlykey.nix
|
./hardware/onlykey.nix
|
||||||
./hardware/opentabletdriver.nix
|
./hardware/opentabletdriver.nix
|
||||||
|
./hardware/sata.nix
|
||||||
./hardware/wooting.nix
|
./hardware/wooting.nix
|
||||||
./hardware/uinput.nix
|
./hardware/uinput.nix
|
||||||
./hardware/video/amdgpu.nix
|
./hardware/video/amdgpu.nix
|
||||||
|
@ -231,6 +233,7 @@
|
||||||
./services/audio/alsa.nix
|
./services/audio/alsa.nix
|
||||||
./services/audio/jack.nix
|
./services/audio/jack.nix
|
||||||
./services/audio/icecast.nix
|
./services/audio/icecast.nix
|
||||||
|
./services/audio/jmusicbot.nix
|
||||||
./services/audio/liquidsoap.nix
|
./services/audio/liquidsoap.nix
|
||||||
./services/audio/mpd.nix
|
./services/audio/mpd.nix
|
||||||
./services/audio/mpdscribble.nix
|
./services/audio/mpdscribble.nix
|
||||||
|
@ -498,6 +501,7 @@
|
||||||
./services/misc/lifecycled.nix
|
./services/misc/lifecycled.nix
|
||||||
./services/misc/mame.nix
|
./services/misc/mame.nix
|
||||||
./services/misc/matrix-appservice-discord.nix
|
./services/misc/matrix-appservice-discord.nix
|
||||||
|
./services/misc/matrix-appservice-irc.nix
|
||||||
./services/misc/matrix-synapse.nix
|
./services/misc/matrix-synapse.nix
|
||||||
./services/misc/mautrix-telegram.nix
|
./services/misc/mautrix-telegram.nix
|
||||||
./services/misc/mbpfan.nix
|
./services/misc/mbpfan.nix
|
||||||
|
@ -513,6 +517,7 @@
|
||||||
./services/misc/nzbget.nix
|
./services/misc/nzbget.nix
|
||||||
./services/misc/nzbhydra2.nix
|
./services/misc/nzbhydra2.nix
|
||||||
./services/misc/octoprint.nix
|
./services/misc/octoprint.nix
|
||||||
|
./services/misc/ombi.nix
|
||||||
./services/misc/osrm.nix
|
./services/misc/osrm.nix
|
||||||
./services/misc/packagekit.nix
|
./services/misc/packagekit.nix
|
||||||
./services/misc/paperless.nix
|
./services/misc/paperless.nix
|
||||||
|
@ -888,6 +893,7 @@
|
||||||
./services/web-apps/atlassian/crowd.nix
|
./services/web-apps/atlassian/crowd.nix
|
||||||
./services/web-apps/atlassian/jira.nix
|
./services/web-apps/atlassian/jira.nix
|
||||||
./services/web-apps/bookstack.nix
|
./services/web-apps/bookstack.nix
|
||||||
|
./services/web-apps/calibre-web.nix
|
||||||
./services/web-apps/convos.nix
|
./services/web-apps/convos.nix
|
||||||
./services/web-apps/cryptpad.nix
|
./services/web-apps/cryptpad.nix
|
||||||
./services/web-apps/documize.nix
|
./services/web-apps/documize.nix
|
||||||
|
@ -948,6 +954,7 @@
|
||||||
./services/web-servers/nginx/default.nix
|
./services/web-servers/nginx/default.nix
|
||||||
./services/web-servers/nginx/gitweb.nix
|
./services/web-servers/nginx/gitweb.nix
|
||||||
./services/web-servers/phpfpm/default.nix
|
./services/web-servers/phpfpm/default.nix
|
||||||
|
./services/web-servers/pomerium.nix
|
||||||
./services/web-servers/unit/default.nix
|
./services/web-servers/unit/default.nix
|
||||||
./services/web-servers/shellinabox.nix
|
./services/web-servers/shellinabox.nix
|
||||||
./services/web-servers/tomcat.nix
|
./services/web-servers/tomcat.nix
|
||||||
|
|
|
@ -8,7 +8,7 @@ let
|
||||||
cfg = config.programs.mininet;
|
cfg = config.programs.mininet;
|
||||||
|
|
||||||
generatedPath = with pkgs; makeSearchPath "bin" [
|
generatedPath = with pkgs; makeSearchPath "bin" [
|
||||||
iperf ethtool iproute socat
|
iperf ethtool iproute2 socat
|
||||||
];
|
];
|
||||||
|
|
||||||
pyEnv = pkgs.python.withPackages(ps: [ ps.mininet-python ]);
|
pyEnv = pkgs.python.withPackages(ps: [ ps.mininet-python ]);
|
||||||
|
|
41
third_party/nixpkgs/nixos/modules/services/audio/jmusicbot.nix
vendored
Normal file
41
third_party/nixpkgs/nixos/modules/services/audio/jmusicbot.nix
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
let
|
||||||
|
cfg = config.services.jmusicbot;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
services.jmusicbot = {
|
||||||
|
enable = mkEnableOption "jmusicbot, a Discord music bot that's easy to set up and run yourself";
|
||||||
|
|
||||||
|
stateDir = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
description = ''
|
||||||
|
The directory where config.txt and serversettings.json is saved.
|
||||||
|
If left as the default value this directory will automatically be created before JMusicBot starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.
|
||||||
|
Untouched by the value of this option config.txt needs to be placed manually into this directory.
|
||||||
|
'';
|
||||||
|
default = "/var/lib/jmusicbot/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.jmusicbot = {
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
description = "Discord music bot that's easy to set up and run yourself!";
|
||||||
|
serviceConfig = mkMerge [{
|
||||||
|
ExecStart = "${pkgs.jmusicbot}/bin/JMusicBot";
|
||||||
|
WorkingDirectory = cfg.stateDir;
|
||||||
|
Restart = "always";
|
||||||
|
RestartSec = 20;
|
||||||
|
DynamicUser = true;
|
||||||
|
}
|
||||||
|
(mkIf (cfg.stateDir == "/var/lib/jmusicbot") { StateDirectory = "jmusicbot"; })];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with maintainers; [ SuperSandro2000 ];
|
||||||
|
}
|
|
@ -18,7 +18,12 @@ let
|
||||||
else toString value;
|
else toString value;
|
||||||
|
|
||||||
# The main PostgreSQL configuration file.
|
# The main PostgreSQL configuration file.
|
||||||
configFile = pkgs.writeText "postgresql.conf" (concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${toStr v}") cfg.settings));
|
configFile = pkgs.writeTextDir "postgresql.conf" (concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${toStr v}") cfg.settings));
|
||||||
|
|
||||||
|
configFileCheck = pkgs.runCommand "postgresql-configfile-check" {} ''
|
||||||
|
${cfg.package}/bin/postgres -D${configFile} -C config_file >/dev/null
|
||||||
|
touch $out
|
||||||
|
'';
|
||||||
|
|
||||||
groupAccessAvailable = versionAtLeast postgresql.version "11.0";
|
groupAccessAvailable = versionAtLeast postgresql.version "11.0";
|
||||||
|
|
||||||
|
@ -53,6 +58,12 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
checkConfig = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = true;
|
||||||
|
description = "Check the syntax of the configuration file at compile time";
|
||||||
|
};
|
||||||
|
|
||||||
dataDir = mkOption {
|
dataDir = mkOption {
|
||||||
type = types.path;
|
type = types.path;
|
||||||
defaultText = "/var/lib/postgresql/\${config.services.postgresql.package.psqlSchema}";
|
defaultText = "/var/lib/postgresql/\${config.services.postgresql.package.psqlSchema}";
|
||||||
|
@ -314,6 +325,8 @@ in
|
||||||
"/share/postgresql"
|
"/share/postgresql"
|
||||||
];
|
];
|
||||||
|
|
||||||
|
system.extraDependencies = lib.optional (cfg.checkConfig && pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) configFileCheck;
|
||||||
|
|
||||||
systemd.services.postgresql =
|
systemd.services.postgresql =
|
||||||
{ description = "PostgreSQL Server";
|
{ description = "PostgreSQL Server";
|
||||||
|
|
||||||
|
@ -337,7 +350,7 @@ in
|
||||||
touch "${cfg.dataDir}/.first_startup"
|
touch "${cfg.dataDir}/.first_startup"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ln -sfn "${configFile}" "${cfg.dataDir}/postgresql.conf"
|
ln -sfn "${configFile}/postgresql.conf" "${cfg.dataDir}/postgresql.conf"
|
||||||
${optionalString (cfg.recoveryConfig != null) ''
|
${optionalString (cfg.recoveryConfig != null) ''
|
||||||
ln -sfn "${pkgs.writeText "recovery.conf" cfg.recoveryConfig}" \
|
ln -sfn "${pkgs.writeText "recovery.conf" cfg.recoveryConfig}" \
|
||||||
"${cfg.dataDir}/recovery.conf"
|
"${cfg.dataDir}/recovery.conf"
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
|
json = pkgs.formats.json {};
|
||||||
cfg = config.services.pipewire.media-session;
|
cfg = config.services.pipewire.media-session;
|
||||||
enable32BitAlsaPlugins = cfg.alsa.support32Bit
|
enable32BitAlsaPlugins = cfg.alsa.support32Bit
|
||||||
&& pkgs.stdenv.isx86_64
|
&& pkgs.stdenv.isx86_64
|
||||||
|
@ -17,24 +18,13 @@ let
|
||||||
media-session = (builtins.fromJSON (builtins.readFile ./media-session.conf.json));
|
media-session = (builtins.fromJSON (builtins.readFile ./media-session.conf.json));
|
||||||
v4l2-monitor = (builtins.fromJSON (builtins.readFile ./v4l2-monitor.conf.json));
|
v4l2-monitor = (builtins.fromJSON (builtins.readFile ./v4l2-monitor.conf.json));
|
||||||
};
|
};
|
||||||
# Helpers for generating the pipewire JSON config file
|
|
||||||
mkSPAValueString = v:
|
|
||||||
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]"
|
|
||||||
else if lib.types.attrs.check v then
|
|
||||||
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}"
|
|
||||||
else if builtins.isString v then "\"${lib.generators.mkValueStringDefault { } v}\""
|
|
||||||
else lib.generators.mkValueStringDefault { } v;
|
|
||||||
|
|
||||||
mkSPAKeyValue = attrs: map (def: def.content) (
|
configs = {
|
||||||
lib.sortProperties
|
alsa-monitor = recursiveUpdate defaults.alsa-monitor cfg.config.alsa-monitor;
|
||||||
(
|
bluez-monitor = recursiveUpdate defaults.bluez-monitor cfg.config.bluez-monitor;
|
||||||
lib.mapAttrsToList
|
media-session = recursiveUpdate defaults.media-session cfg.config.media-session;
|
||||||
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ":" ] k} = ${mkSPAValueString (v._content or v)}")
|
v4l2-monitor = recursiveUpdate defaults.v4l2-monitor cfg.config.v4l2-monitor;
|
||||||
attrs
|
};
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs);
|
|
||||||
in {
|
in {
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
|
@ -62,7 +52,7 @@ in {
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
media-session = mkOption {
|
media-session = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the media session core. For details see
|
Configuration for the media session core. For details see
|
||||||
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/media-session.conf
|
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/media-session.conf
|
||||||
|
@ -71,7 +61,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
alsa-monitor = mkOption {
|
alsa-monitor = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the alsa monitor. For details see
|
Configuration for the alsa monitor. For details see
|
||||||
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/alsa-monitor.conf
|
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/alsa-monitor.conf
|
||||||
|
@ -80,7 +70,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
bluez-monitor = mkOption {
|
bluez-monitor = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the bluez5 monitor. For details see
|
Configuration for the bluez5 monitor. For details see
|
||||||
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/bluez-monitor.conf
|
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/bluez-monitor.conf
|
||||||
|
@ -89,7 +79,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
v4l2-monitor = mkOption {
|
v4l2-monitor = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the V4L2 monitor. For details see
|
Configuration for the V4L2 monitor. For details see
|
||||||
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/v4l2-monitor.conf
|
https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/${cfg.package.version}/src/daemon/media-session.d/v4l2-monitor.conf
|
||||||
|
@ -106,15 +96,34 @@ in {
|
||||||
systemd.packages = [ cfg.package ];
|
systemd.packages = [ cfg.package ];
|
||||||
systemd.user.services.pipewire-media-session.wantedBy = [ "pipewire.service" ];
|
systemd.user.services.pipewire-media-session.wantedBy = [ "pipewire.service" ];
|
||||||
|
|
||||||
environment.etc."pipewire/media-session.d/media-session.conf" = { text = toSPAJSON (recursiveUpdate defaults.media-session cfg.config.media-session); };
|
environment.etc."pipewire/media-session.d/media-session.conf" = {
|
||||||
environment.etc."pipewire/media-session.d/v4l2-monitor.conf" = { text = toSPAJSON (recursiveUpdate defaults.v4l2-monitor cfg.config.v4l2-monitor); };
|
source = json.generate "media-session.conf" configs.media-session;
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/media-session.d/v4l2-monitor.conf" = {
|
||||||
|
source = json.generate "v4l2-monitor.conf" configs.v4l2-monitor;
|
||||||
|
};
|
||||||
|
|
||||||
environment.etc."pipewire/media-session.d/with-alsa" = mkIf config.services.pipewire.alsa.enable { text = ""; };
|
environment.etc."pipewire/media-session.d/with-alsa" =
|
||||||
environment.etc."pipewire/media-session.d/alsa-monitor.conf" = mkIf config.services.pipewire.alsa.enable { text = toSPAJSON (recursiveUpdate defaults.alsa-monitor cfg.config.alsa-monitor); };
|
mkIf config.services.pipewire.alsa.enable {
|
||||||
|
text = "";
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/media-session.d/alsa-monitor.conf" =
|
||||||
|
mkIf config.services.pipewire.alsa.enable {
|
||||||
|
source = json.generate "alsa-monitor.conf" configs.alsa-monitor;
|
||||||
|
};
|
||||||
|
|
||||||
environment.etc."pipewire/media-session.d/with-pulseaudio" = mkIf config.services.pipewire.pulse.enable { text = ""; };
|
environment.etc."pipewire/media-session.d/with-pulseaudio" =
|
||||||
environment.etc."pipewire/media-session.d/bluez-monitor.conf" = mkIf config.services.pipewire.pulse.enable { text = toSPAJSON (recursiveUpdate defaults.bluez-monitor cfg.config.bluez-monitor); };
|
mkIf config.services.pipewire.pulse.enable {
|
||||||
|
text = "";
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/media-session.d/bluez-monitor.conf" =
|
||||||
|
mkIf config.services.pipewire.pulse.enable {
|
||||||
|
source = json.generate "bluez-monitor.conf" configs.bluez-monitor;
|
||||||
|
};
|
||||||
|
|
||||||
environment.etc."pipewire/media-session.d/with-jack" = mkIf config.services.pipewire.jack.enable { text = ""; };
|
environment.etc."pipewire/media-session.d/with-jack" =
|
||||||
|
mkIf config.services.pipewire.jack.enable {
|
||||||
|
text = "";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
|
json = pkgs.formats.json {};
|
||||||
cfg = config.services.pipewire;
|
cfg = config.services.pipewire;
|
||||||
enable32BitAlsaPlugins = cfg.alsa.support32Bit
|
enable32BitAlsaPlugins = cfg.alsa.support32Bit
|
||||||
&& pkgs.stdenv.isx86_64
|
&& pkgs.stdenv.isx86_64
|
||||||
|
@ -29,24 +30,13 @@ let
|
||||||
pipewire-pulse = builtins.fromJSON (builtins.readFile ./pipewire-pulse.conf.json);
|
pipewire-pulse = builtins.fromJSON (builtins.readFile ./pipewire-pulse.conf.json);
|
||||||
};
|
};
|
||||||
|
|
||||||
# Helpers for generating the pipewire JSON config file
|
configs = {
|
||||||
mkSPAValueString = v:
|
client = recursiveUpdate defaults.client cfg.config.client;
|
||||||
if builtins.isList v then "[${lib.concatMapStringsSep " " mkSPAValueString v}]"
|
client-rt = recursiveUpdate defaults.client-rt cfg.config.client-rt;
|
||||||
else if lib.types.attrs.check v then
|
jack = recursiveUpdate defaults.jack cfg.config.jack;
|
||||||
"{${lib.concatStringsSep " " (mkSPAKeyValue v)}}"
|
pipewire = recursiveUpdate defaults.pipewire cfg.config.pipewire;
|
||||||
else if builtins.isString v then "\"${lib.generators.mkValueStringDefault { } v}\""
|
pipewire-pulse = recursiveUpdate defaults.pipewire-pulse cfg.config.pipewire-pulse;
|
||||||
else lib.generators.mkValueStringDefault { } v;
|
};
|
||||||
|
|
||||||
mkSPAKeyValue = attrs: map (def: def.content) (
|
|
||||||
lib.sortProperties
|
|
||||||
(
|
|
||||||
lib.mapAttrsToList
|
|
||||||
(k: v: lib.mkOrder (v._priority or 1000) "${lib.escape [ "=" ] k} = ${mkSPAValueString (v._content or v)}")
|
|
||||||
attrs
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
toSPAJSON = attrs: lib.concatStringsSep "\n" (mkSPAKeyValue attrs);
|
|
||||||
in {
|
in {
|
||||||
|
|
||||||
meta = {
|
meta = {
|
||||||
|
@ -78,7 +68,7 @@ in {
|
||||||
|
|
||||||
config = {
|
config = {
|
||||||
client = mkOption {
|
client = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
default = {};
|
default = {};
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for pipewire clients. For details see
|
Configuration for pipewire clients. For details see
|
||||||
|
@ -87,7 +77,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
client-rt = mkOption {
|
client-rt = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
default = {};
|
default = {};
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for realtime pipewire clients. For details see
|
Configuration for realtime pipewire clients. For details see
|
||||||
|
@ -96,7 +86,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
jack = mkOption {
|
jack = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
default = {};
|
default = {};
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the pipewire daemon's jack module. For details see
|
Configuration for the pipewire daemon's jack module. For details see
|
||||||
|
@ -105,7 +95,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
pipewire = mkOption {
|
pipewire = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
default = {};
|
default = {};
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the pipewire daemon. For details see
|
Configuration for the pipewire daemon. For details see
|
||||||
|
@ -114,7 +104,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
pipewire-pulse = mkOption {
|
pipewire-pulse = mkOption {
|
||||||
type = types.attrs;
|
type = json.type;
|
||||||
default = {};
|
default = {};
|
||||||
description = ''
|
description = ''
|
||||||
Configuration for the pipewire-pulse daemon. For details see
|
Configuration for the pipewire-pulse daemon. For details see
|
||||||
|
@ -187,11 +177,21 @@ in {
|
||||||
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
|
source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
|
||||||
};
|
};
|
||||||
|
|
||||||
environment.etc."pipewire/client.conf" = { text = toSPAJSON (recursiveUpdate defaults.client cfg.config.client); };
|
environment.etc."pipewire/client.conf" = {
|
||||||
environment.etc."pipewire/client-rt.conf" = { text = toSPAJSON (recursiveUpdate defaults.client-rt cfg.config.client-rt); };
|
source = json.generate "client.conf" configs.client;
|
||||||
environment.etc."pipewire/jack.conf" = { text = toSPAJSON (recursiveUpdate defaults.jack cfg.config.jack); };
|
};
|
||||||
environment.etc."pipewire/pipewire.conf" = { text = toSPAJSON (recursiveUpdate defaults.pipewire cfg.config.pipewire); };
|
environment.etc."pipewire/client-rt.conf" = {
|
||||||
environment.etc."pipewire/pipewire-pulse.conf" = { text = toSPAJSON (recursiveUpdate defaults.pipewire-pulse cfg.config.pipewire-pulse); };
|
source = json.generate "client-rt.conf" configs.client-rt;
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/jack.conf" = {
|
||||||
|
source = json.generate "jack.conf" configs.jack;
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/pipewire.conf" = {
|
||||||
|
source = json.generate "pipewire.conf" configs.pipewire;
|
||||||
|
};
|
||||||
|
environment.etc."pipewire/pipewire-pulse.conf" = {
|
||||||
|
source = json.generate "pipewire-pulse.conf" configs.pipewire-pulse;
|
||||||
|
};
|
||||||
|
|
||||||
environment.sessionVariables.LD_LIBRARY_PATH =
|
environment.sessionVariables.LD_LIBRARY_PATH =
|
||||||
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire";
|
lib.optional cfg.jack.enable "/run/current-system/sw/lib/pipewire";
|
||||||
|
|
|
@ -30,7 +30,7 @@ let
|
||||||
};
|
};
|
||||||
|
|
||||||
backends = [ pkg netConf ] ++ optional config.services.saned.enable sanedConf ++ config.hardware.sane.extraBackends;
|
backends = [ pkg netConf ] ++ optional config.services.saned.enable sanedConf ++ config.hardware.sane.extraBackends;
|
||||||
saneConfig = pkgs.mkSaneConfig { paths = backends; };
|
saneConfig = pkgs.mkSaneConfig { paths = backends; inherit (config.hardware.sane) disabledDefaultBackends; };
|
||||||
|
|
||||||
enabled = config.hardware.sane.enable || config.services.saned.enable;
|
enabled = config.hardware.sane.enable || config.services.saned.enable;
|
||||||
|
|
||||||
|
@ -73,6 +73,16 @@ in
|
||||||
example = literalExample "[ pkgs.hplipWithPlugin ]";
|
example = literalExample "[ pkgs.hplipWithPlugin ]";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
hardware.sane.disabledDefaultBackends = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
default = [];
|
||||||
|
example = [ "v4l" ];
|
||||||
|
description = ''
|
||||||
|
Names of backends which are enabled by default but should be disabled.
|
||||||
|
See <literal>$SANE_CONFIG_DIR/dll.conf</literal> for the list of possible names.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
hardware.sane.configDir = mkOption {
|
hardware.sane.configDir = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
internal = true;
|
internal = true;
|
||||||
|
|
|
@ -13,7 +13,7 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
systemd.user.services.spacenavd = {
|
systemd.services.spacenavd = {
|
||||||
description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion";
|
description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion";
|
||||||
after = [ "syslog.target" ];
|
after = [ "syslog.target" ];
|
||||||
wantedBy = [ "graphical.target" ];
|
wantedBy = [ "graphical.target" ];
|
||||||
|
|
|
@ -159,10 +159,9 @@ in
|
||||||
###### implementation
|
###### implementation
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
systemd.services.logstash = with pkgs; {
|
systemd.services.logstash = {
|
||||||
description = "Logstash Daemon";
|
description = "Logstash Daemon";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
environment = { JAVA_HOME = jre; };
|
|
||||||
path = [ pkgs.bash ];
|
path = [ pkgs.bash ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStartPre = ''${pkgs.coreutils}/bin/mkdir -p "${cfg.dataDir}" ; ${pkgs.coreutils}/bin/chmod 700 "${cfg.dataDir}"'';
|
ExecStartPre = ''${pkgs.coreutils}/bin/mkdir -p "${cfg.dataDir}" ; ${pkgs.coreutils}/bin/chmod 700 "${cfg.dataDir}"'';
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
with lib;
|
with lib;
|
||||||
let cfg = config.services.vector;
|
let cfg = config.services.vector;
|
||||||
|
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
options.services.vector = {
|
options.services.vector = {
|
||||||
enable = mkEnableOption "Vector";
|
enable = mkEnableOption "Vector";
|
||||||
|
|
||||||
|
@ -37,15 +38,17 @@ in {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network-online.target" ];
|
after = [ "network-online.target" ];
|
||||||
requires = [ "network-online.target" ];
|
requires = [ "network-online.target" ];
|
||||||
serviceConfig = let
|
serviceConfig =
|
||||||
|
let
|
||||||
format = pkgs.formats.toml { };
|
format = pkgs.formats.toml { };
|
||||||
conf = format.generate "vector.toml" cfg.settings;
|
conf = format.generate "vector.toml" cfg.settings;
|
||||||
validateConfig = file:
|
validateConfig = file:
|
||||||
pkgs.runCommand "validate-vector-conf" { } ''
|
pkgs.runCommand "validate-vector-conf" { } ''
|
||||||
${pkgs.vector}/bin/vector validate --no-topology --no-environment "${file}"
|
${pkgs.vector}/bin/vector validate --no-environment "${file}"
|
||||||
ln -s "${file}" "$out"
|
ln -s "${file}" "$out"
|
||||||
'';
|
'';
|
||||||
in {
|
in
|
||||||
|
{
|
||||||
ExecStart = "${pkgs.vector}/bin/vector --config ${validateConfig conf}";
|
ExecStart = "${pkgs.vector}/bin/vector --config ${validateConfig conf}";
|
||||||
User = "vector";
|
User = "vector";
|
||||||
Group = "vector";
|
Group = "vector";
|
||||||
|
|
|
@ -8,31 +8,28 @@ let
|
||||||
pythonEnv = pkgs.python3.withPackages (ps: with ps;
|
pythonEnv = pkgs.python3.withPackages (ps: with ps;
|
||||||
[ etebase-server daphne ]);
|
[ etebase-server daphne ]);
|
||||||
|
|
||||||
dbConfig = {
|
iniFmt = pkgs.formats.ini {};
|
||||||
sqlite3 = ''
|
|
||||||
engine = django.db.backends.sqlite3
|
|
||||||
name = ${cfg.dataDir}/db.sqlite3
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
defaultConfigIni = toString (pkgs.writeText "etebase-server.ini" ''
|
configIni = iniFmt.generate "etebase-server.ini" cfg.settings;
|
||||||
[global]
|
|
||||||
debug = false
|
|
||||||
secret_file = ${if cfg.secretFile != null then cfg.secretFile else ""}
|
|
||||||
media_root = ${cfg.dataDir}/media
|
|
||||||
|
|
||||||
[allowed_hosts]
|
|
||||||
allowed_host1 = ${cfg.host}
|
|
||||||
|
|
||||||
[database]
|
|
||||||
${dbConfig."${cfg.database.type}"}
|
|
||||||
'');
|
|
||||||
|
|
||||||
configIni = if cfg.customIni != null then cfg.customIni else defaultConfigIni;
|
|
||||||
|
|
||||||
defaultUser = "etebase-server";
|
defaultUser = "etebase-server";
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
imports = [
|
||||||
|
(mkRemovedOptionModule
|
||||||
|
[ "services" "etebase-server" "customIni" ]
|
||||||
|
"Set the option `services.etebase-server.settings' instead.")
|
||||||
|
(mkRemovedOptionModule
|
||||||
|
[ "services" "etebase-server" "database" ]
|
||||||
|
"Set the option `services.etebase-server.settings.database' instead.")
|
||||||
|
(mkRenamedOptionModule
|
||||||
|
[ "services" "etebase-server" "secretFile" ]
|
||||||
|
[ "services" "etebase-server" "settings" "secret_file" ])
|
||||||
|
(mkRenamedOptionModule
|
||||||
|
[ "services" "etebase-server" "host" ]
|
||||||
|
[ "services" "etebase-server" "settings" "allowed_hosts" "allowed_host1" ])
|
||||||
|
];
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
services.etebase-server = {
|
services.etebase-server = {
|
||||||
enable = mkOption {
|
enable = mkOption {
|
||||||
|
@ -42,21 +39,13 @@ in
|
||||||
description = ''
|
description = ''
|
||||||
Whether to enable the Etebase server.
|
Whether to enable the Etebase server.
|
||||||
|
|
||||||
Once enabled you need to create an admin user using the
|
Once enabled you need to create an admin user by invoking the
|
||||||
shell command <literal>etebase-server createsuperuser</literal>.
|
shell command <literal>etebase-server createsuperuser</literal> with
|
||||||
|
the user specified by the <literal>user</literal> option or a superuser.
|
||||||
Then you can login and create accounts on your-etebase-server.com/admin
|
Then you can login and create accounts on your-etebase-server.com/admin
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
secretFile = mkOption {
|
|
||||||
default = null;
|
|
||||||
type = with types; nullOr str;
|
|
||||||
description = ''
|
|
||||||
The path to a file containing the secret
|
|
||||||
used as django's SECRET_KEY.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
dataDir = mkOption {
|
dataDir = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "/var/lib/etebase-server";
|
default = "/var/lib/etebase-server";
|
||||||
|
@ -77,15 +66,6 @@ in
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
host = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "0.0.0.0";
|
|
||||||
example = "localhost";
|
|
||||||
description = ''
|
|
||||||
Host to listen on.
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
unixSocket = mkOption {
|
unixSocket = mkOption {
|
||||||
type = with types; nullOr str;
|
type = with types; nullOr str;
|
||||||
default = null;
|
default = null;
|
||||||
|
@ -93,43 +73,82 @@ in
|
||||||
example = "/run/etebase-server/etebase-server.sock";
|
example = "/run/etebase-server/etebase-server.sock";
|
||||||
};
|
};
|
||||||
|
|
||||||
database = {
|
settings = mkOption {
|
||||||
type = mkOption {
|
type = lib.types.submodule {
|
||||||
type = types.enum [ "sqlite3" ];
|
freeformType = iniFmt.type;
|
||||||
default = "sqlite3";
|
|
||||||
|
options = {
|
||||||
|
global = {
|
||||||
|
debug = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
description = ''
|
description = ''
|
||||||
Database engine to use.
|
Whether to set django's DEBUG flag.
|
||||||
Currently only sqlite3 is supported.
|
|
||||||
Other options can be configured using <literal>extraConfig</literal>.
|
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
secret_file = mkOption {
|
||||||
|
|
||||||
customIni = mkOption {
|
|
||||||
type = with types; nullOr str;
|
type = with types; nullOr str;
|
||||||
default = null;
|
default = null;
|
||||||
description = ''
|
description = ''
|
||||||
Custom etebase-server.ini.
|
The path to a file containing the secret
|
||||||
|
used as django's SECRET_KEY.
|
||||||
See <literal>etebase-src/etebase-server.ini.example</literal> for available options.
|
|
||||||
|
|
||||||
Setting this option overrides the default config which is generated from the options
|
|
||||||
<literal>secretFile</literal>, <literal>host</literal> and <literal>database</literal>.
|
|
||||||
'';
|
'';
|
||||||
example = literalExample ''
|
};
|
||||||
[global]
|
static_root = mkOption {
|
||||||
debug = false
|
type = types.str;
|
||||||
secret_file = /path/to/secret
|
default = "${cfg.dataDir}/static";
|
||||||
media_root = /path/to/media
|
defaultText = "\${config.services.etebase-server.dataDir}/static";
|
||||||
|
description = "The directory for static files.";
|
||||||
[allowed_hosts]
|
};
|
||||||
allowed_host1 = example.com
|
media_root = mkOption {
|
||||||
|
type = types.str;
|
||||||
[database]
|
default = "${cfg.dataDir}/media";
|
||||||
engine = django.db.backends.sqlite3
|
defaultText = "\${config.services.etebase-server.dataDir}/media";
|
||||||
name = db.sqlite3
|
description = "The media directory.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
allowed_hosts = {
|
||||||
|
allowed_host1 = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "0.0.0.0";
|
||||||
|
example = "localhost";
|
||||||
|
description = ''
|
||||||
|
The main host that is allowed access.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
database = {
|
||||||
|
engine = mkOption {
|
||||||
|
type = types.enum [ "django.db.backends.sqlite3" "django.db.backends.postgresql" ];
|
||||||
|
default = "django.db.backends.sqlite3";
|
||||||
|
description = "The database engine to use.";
|
||||||
|
};
|
||||||
|
name = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "${cfg.dataDir}/db.sqlite3";
|
||||||
|
defaultText = "\${config.services.etebase-server.dataDir}/db.sqlite3";
|
||||||
|
description = "The database name.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
default = {};
|
||||||
|
description = ''
|
||||||
|
Configuration for <package>etebase-server</package>. Refer to
|
||||||
|
<link xlink:href="https://github.com/etesync/server/blob/master/etebase-server.ini.example" />
|
||||||
|
and <link xlink:href="https://github.com/etesync/server/wiki" />
|
||||||
|
for details on supported values.
|
||||||
|
'';
|
||||||
|
example = {
|
||||||
|
global = {
|
||||||
|
debug = true;
|
||||||
|
media_root = "/path/to/media";
|
||||||
|
};
|
||||||
|
allowed_hosts = {
|
||||||
|
allowed_host2 = "localhost";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
user = mkOption {
|
user = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
|
@ -166,14 +185,15 @@ in
|
||||||
WorkingDirectory = cfg.dataDir;
|
WorkingDirectory = cfg.dataDir;
|
||||||
};
|
};
|
||||||
environment = {
|
environment = {
|
||||||
PYTHONPATH="${pythonEnv}/${pkgs.python3.sitePackages}";
|
PYTHONPATH = "${pythonEnv}/${pkgs.python3.sitePackages}";
|
||||||
ETEBASE_EASY_CONFIG_PATH="${configIni}";
|
ETEBASE_EASY_CONFIG_PATH = configIni;
|
||||||
};
|
};
|
||||||
preStart = ''
|
preStart = ''
|
||||||
# Auto-migrate on first run or if the package has changed
|
# Auto-migrate on first run or if the package has changed
|
||||||
versionFile="${cfg.dataDir}/src-version"
|
versionFile="${cfg.dataDir}/src-version"
|
||||||
if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
|
if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
|
||||||
${pythonEnv}/bin/etebase-server migrate
|
${pythonEnv}/bin/etebase-server migrate
|
||||||
|
${pythonEnv}/bin/etebase-server collectstatic
|
||||||
echo ${pkgs.etebase-server} > "$versionFile"
|
echo ${pkgs.etebase-server} > "$versionFile"
|
||||||
fi
|
fi
|
||||||
'';
|
'';
|
||||||
|
|
|
@ -116,7 +116,11 @@ let
|
||||||
omniauth.enabled = false;
|
omniauth.enabled = false;
|
||||||
shared.path = "${cfg.statePath}/shared";
|
shared.path = "${cfg.statePath}/shared";
|
||||||
gitaly.client_path = "${cfg.packages.gitaly}/bin";
|
gitaly.client_path = "${cfg.packages.gitaly}/bin";
|
||||||
backup.path = "${cfg.backupPath}";
|
backup = {
|
||||||
|
path = cfg.backup.path;
|
||||||
|
keep_time = cfg.backup.keepTime;
|
||||||
|
upload = cfg.backup.uploadOptions;
|
||||||
|
};
|
||||||
gitlab_shell = {
|
gitlab_shell = {
|
||||||
path = "${cfg.packages.gitlab-shell}";
|
path = "${cfg.packages.gitlab-shell}";
|
||||||
hooks_path = "${cfg.statePath}/shell/hooks";
|
hooks_path = "${cfg.statePath}/shell/hooks";
|
||||||
|
@ -207,6 +211,7 @@ in {
|
||||||
|
|
||||||
imports = [
|
imports = [
|
||||||
(mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
|
(mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
|
||||||
|
(mkRenamedOptionModule [ "services" "gitlab" "backupPath" ] [ "services" "gitlab" "backup" "path" ])
|
||||||
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
|
(mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
|
||||||
];
|
];
|
||||||
|
|
||||||
|
@ -260,7 +265,7 @@ in {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "/var/gitlab/state";
|
default = "/var/gitlab/state";
|
||||||
description = ''
|
description = ''
|
||||||
Gitlab state directory. Configuration, repositories and
|
GitLab state directory. Configuration, repositories and
|
||||||
logs, among other things, are stored here.
|
logs, among other things, are stored here.
|
||||||
|
|
||||||
The directory will be created automatically if it doesn't
|
The directory will be created automatically if it doesn't
|
||||||
|
@ -270,17 +275,108 @@ in {
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
backupPath = mkOption {
|
backup.startAt = mkOption {
|
||||||
|
type = with types; either str (listOf str);
|
||||||
|
default = [];
|
||||||
|
example = "03:00";
|
||||||
|
description = ''
|
||||||
|
The time(s) to run automatic backup of GitLab
|
||||||
|
state. Specified in systemd's time format; see
|
||||||
|
<citerefentry><refentrytitle>systemd.time</refentrytitle>
|
||||||
|
<manvolnum>7</manvolnum></citerefentry>.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
backup.path = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = cfg.statePath + "/backup";
|
default = cfg.statePath + "/backup";
|
||||||
description = "Gitlab path for backups.";
|
description = "GitLab path for backups.";
|
||||||
|
};
|
||||||
|
|
||||||
|
backup.keepTime = mkOption {
|
||||||
|
type = types.int;
|
||||||
|
default = 0;
|
||||||
|
example = 48;
|
||||||
|
apply = x: x * 60 * 60;
|
||||||
|
description = ''
|
||||||
|
How long to keep the backups around, in
|
||||||
|
hours. <literal>0</literal> means <quote>keep
|
||||||
|
forever</quote>.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
backup.skip = mkOption {
|
||||||
|
type = with types;
|
||||||
|
let value = enum [
|
||||||
|
"db"
|
||||||
|
"uploads"
|
||||||
|
"builds"
|
||||||
|
"artifacts"
|
||||||
|
"lfs"
|
||||||
|
"registry"
|
||||||
|
"pages"
|
||||||
|
"repositories"
|
||||||
|
"tar"
|
||||||
|
];
|
||||||
|
in
|
||||||
|
either value (listOf value);
|
||||||
|
default = [];
|
||||||
|
example = [ "artifacts" "lfs" ];
|
||||||
|
apply = x: if isString x then x else concatStringsSep "," x;
|
||||||
|
description = ''
|
||||||
|
Directories to exclude from the backup. The example excludes
|
||||||
|
CI artifacts and LFS objects from the backups. The
|
||||||
|
<literal>tar</literal> option skips the creation of a tar
|
||||||
|
file.
|
||||||
|
|
||||||
|
Refer to <link xlink:href="https://docs.gitlab.com/ee/raketasks/backup_restore.html#excluding-specific-directories-from-the-backup"/>
|
||||||
|
for more information.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
backup.uploadOptions = mkOption {
|
||||||
|
type = types.attrs;
|
||||||
|
default = {};
|
||||||
|
example = literalExample ''
|
||||||
|
{
|
||||||
|
# Fog storage connection settings, see http://fog.io/storage/
|
||||||
|
connection = {
|
||||||
|
provider = "AWS";
|
||||||
|
region = "eu-north-1";
|
||||||
|
aws_access_key_id = "AKIAXXXXXXXXXXXXXXXX";
|
||||||
|
aws_secret_access_key = { _secret = config.deployment.keys.aws_access_key.path; };
|
||||||
|
};
|
||||||
|
|
||||||
|
# The remote 'directory' to store your backups in.
|
||||||
|
# For S3, this would be the bucket name.
|
||||||
|
remote_directory = "my-gitlab-backups";
|
||||||
|
|
||||||
|
# Use multipart uploads when file size reaches 100MB, see
|
||||||
|
# http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
|
||||||
|
multipart_chunk_size = 104857600;
|
||||||
|
|
||||||
|
# Turns on AWS Server-Side Encryption with Amazon S3-Managed Keys for backups, this is optional
|
||||||
|
encryption = "AES256";
|
||||||
|
|
||||||
|
# Specifies Amazon S3 storage class to use for backups, this is optional
|
||||||
|
storage_class = "STANDARD";
|
||||||
|
};
|
||||||
|
'';
|
||||||
|
description = ''
|
||||||
|
GitLab automatic upload specification. Tells GitLab to
|
||||||
|
upload the backup to a remote location when done.
|
||||||
|
|
||||||
|
Attributes specified here are added under
|
||||||
|
<literal>production -> backup -> upload</literal> in
|
||||||
|
<filename>config/gitlab.yml</filename>.
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
databaseHost = mkOption {
|
databaseHost = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "";
|
default = "";
|
||||||
description = ''
|
description = ''
|
||||||
Gitlab database hostname. An empty string means <quote>use
|
GitLab database hostname. An empty string means <quote>use
|
||||||
local unix socket connection</quote>.
|
local unix socket connection</quote>.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -289,7 +385,7 @@ in {
|
||||||
type = with types; nullOr path;
|
type = with types; nullOr path;
|
||||||
default = null;
|
default = null;
|
||||||
description = ''
|
description = ''
|
||||||
File containing the Gitlab database user password.
|
File containing the GitLab database user password.
|
||||||
|
|
||||||
This should be a string, not a nix path, since nix paths are
|
This should be a string, not a nix path, since nix paths are
|
||||||
copied into the world-readable nix store.
|
copied into the world-readable nix store.
|
||||||
|
@ -310,13 +406,13 @@ in {
|
||||||
databaseName = mkOption {
|
databaseName = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "gitlab";
|
default = "gitlab";
|
||||||
description = "Gitlab database name.";
|
description = "GitLab database name.";
|
||||||
};
|
};
|
||||||
|
|
||||||
databaseUsername = mkOption {
|
databaseUsername = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "gitlab";
|
default = "gitlab";
|
||||||
description = "Gitlab database user.";
|
description = "GitLab database user.";
|
||||||
};
|
};
|
||||||
|
|
||||||
databasePool = mkOption {
|
databasePool = mkOption {
|
||||||
|
@ -360,14 +456,14 @@ in {
|
||||||
host = mkOption {
|
host = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = config.networking.hostName;
|
default = config.networking.hostName;
|
||||||
description = "Gitlab host name. Used e.g. for copy-paste URLs.";
|
description = "GitLab host name. Used e.g. for copy-paste URLs.";
|
||||||
};
|
};
|
||||||
|
|
||||||
port = mkOption {
|
port = mkOption {
|
||||||
type = types.int;
|
type = types.int;
|
||||||
default = 8080;
|
default = 8080;
|
||||||
description = ''
|
description = ''
|
||||||
Gitlab server port for copy-paste URLs, e.g. 80 or 443 if you're
|
GitLab server port for copy-paste URLs, e.g. 80 or 443 if you're
|
||||||
service over https.
|
service over https.
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
@ -420,26 +516,26 @@ in {
|
||||||
address = mkOption {
|
address = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "localhost";
|
default = "localhost";
|
||||||
description = "Address of the SMTP server for Gitlab.";
|
description = "Address of the SMTP server for GitLab.";
|
||||||
};
|
};
|
||||||
|
|
||||||
port = mkOption {
|
port = mkOption {
|
||||||
type = types.int;
|
type = types.int;
|
||||||
default = 25;
|
default = 25;
|
||||||
description = "Port of the SMTP server for Gitlab.";
|
description = "Port of the SMTP server for GitLab.";
|
||||||
};
|
};
|
||||||
|
|
||||||
username = mkOption {
|
username = mkOption {
|
||||||
type = with types; nullOr str;
|
type = with types; nullOr str;
|
||||||
default = null;
|
default = null;
|
||||||
description = "Username of the SMTP server for Gitlab.";
|
description = "Username of the SMTP server for GitLab.";
|
||||||
};
|
};
|
||||||
|
|
||||||
passwordFile = mkOption {
|
passwordFile = mkOption {
|
||||||
type = types.nullOr types.path;
|
type = types.nullOr types.path;
|
||||||
default = null;
|
default = null;
|
||||||
description = ''
|
description = ''
|
||||||
File containing the password of the SMTP server for Gitlab.
|
File containing the password of the SMTP server for GitLab.
|
||||||
|
|
||||||
This should be a string, not a nix path, since nix paths
|
This should be a string, not a nix path, since nix paths
|
||||||
are copied into the world-readable nix store.
|
are copied into the world-readable nix store.
|
||||||
|
@ -720,7 +816,7 @@ in {
|
||||||
"d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
|
"d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
|
||||||
"d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
|
"d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
|
||||||
"z ${gitlabEnv.HOME}/.ssh/authorized_keys 0600 ${cfg.user} ${cfg.group} -"
|
"z ${gitlabEnv.HOME}/.ssh/authorized_keys 0600 ${cfg.user} ${cfg.group} -"
|
||||||
"d ${cfg.backupPath} 0750 ${cfg.user} ${cfg.group} -"
|
"d ${cfg.backup.path} 0750 ${cfg.user} ${cfg.group} -"
|
||||||
"d ${cfg.statePath} 0750 ${cfg.user} ${cfg.group} -"
|
"d ${cfg.statePath} 0750 ${cfg.user} ${cfg.group} -"
|
||||||
"d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
|
"d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
|
||||||
"d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
|
"d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
|
||||||
|
@ -1053,6 +1149,23 @@ in {
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
systemd.services.gitlab-backup = {
|
||||||
|
after = [ "gitlab.service" ];
|
||||||
|
bindsTo = [ "gitlab.service" ];
|
||||||
|
startAt = cfg.backup.startAt;
|
||||||
|
environment = {
|
||||||
|
RAILS_ENV = "production";
|
||||||
|
CRON = "1";
|
||||||
|
} // optionalAttrs (stringLength cfg.backup.skip > 0) {
|
||||||
|
SKIP = cfg.backup.skip;
|
||||||
|
};
|
||||||
|
serviceConfig = {
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
ExecStart = "${gitlab-rake}/bin/gitlab-rake gitlab:backup:create";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
meta.doc = ./gitlab.xml;
|
meta.doc = ./gitlab.xml;
|
||||||
|
|
|
@ -3,15 +3,15 @@
|
||||||
xmlns:xi="http://www.w3.org/2001/XInclude"
|
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||||
version="5.0"
|
version="5.0"
|
||||||
xml:id="module-services-gitlab">
|
xml:id="module-services-gitlab">
|
||||||
<title>Gitlab</title>
|
<title>GitLab</title>
|
||||||
<para>
|
<para>
|
||||||
Gitlab is a feature-rich git hosting service.
|
GitLab is a feature-rich git hosting service.
|
||||||
</para>
|
</para>
|
||||||
<section xml:id="module-services-gitlab-prerequisites">
|
<section xml:id="module-services-gitlab-prerequisites">
|
||||||
<title>Prerequisites</title>
|
<title>Prerequisites</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
The gitlab service exposes only an Unix socket at
|
The <literal>gitlab</literal> service exposes only an Unix socket at
|
||||||
<literal>/run/gitlab/gitlab-workhorse.socket</literal>. You need to
|
<literal>/run/gitlab/gitlab-workhorse.socket</literal>. You need to
|
||||||
configure a webserver to proxy HTTP requests to the socket.
|
configure a webserver to proxy HTTP requests to the socket.
|
||||||
</para>
|
</para>
|
||||||
|
@ -39,7 +39,7 @@
|
||||||
<title>Configuring</title>
|
<title>Configuring</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
Gitlab depends on both PostgreSQL and Redis and will automatically enable
|
GitLab depends on both PostgreSQL and Redis and will automatically enable
|
||||||
both services. In the case of PostgreSQL, a database and a role will be
|
both services. In the case of PostgreSQL, a database and a role will be
|
||||||
created.
|
created.
|
||||||
</para>
|
</para>
|
||||||
|
@ -85,20 +85,20 @@ services.gitlab = {
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
If you're setting up a new Gitlab instance, generate new
|
If you're setting up a new GitLab instance, generate new
|
||||||
secrets. You for instance use <literal>tr -dc A-Za-z0-9 <
|
secrets. You for instance use <literal>tr -dc A-Za-z0-9 <
|
||||||
/dev/urandom | head -c 128 > /var/keys/gitlab/db</literal> to
|
/dev/urandom | head -c 128 > /var/keys/gitlab/db</literal> to
|
||||||
generate a new db secret. Make sure the files can be read by, and
|
generate a new db secret. Make sure the files can be read by, and
|
||||||
only by, the user specified by <link
|
only by, the user specified by <link
|
||||||
linkend="opt-services.gitlab.user">services.gitlab.user</link>. Gitlab
|
linkend="opt-services.gitlab.user">services.gitlab.user</link>. GitLab
|
||||||
encrypts sensitive data stored in the database. If you're restoring
|
encrypts sensitive data stored in the database. If you're restoring
|
||||||
an existing Gitlab instance, you must specify the secrets secret
|
an existing GitLab instance, you must specify the secrets secret
|
||||||
from <literal>config/secrets.yml</literal> located in your Gitlab
|
from <literal>config/secrets.yml</literal> located in your GitLab
|
||||||
state folder.
|
state folder.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
When <literal>icoming_mail.enabled</literal> is set to <literal>true</literal>
|
When <literal>incoming_mail.enabled</literal> is set to <literal>true</literal>
|
||||||
in <link linkend="opt-services.gitlab.extraConfig">extraConfig</link> an additional
|
in <link linkend="opt-services.gitlab.extraConfig">extraConfig</link> an additional
|
||||||
service called <literal>gitlab-mailroom</literal> is enabled for fetching incoming mail.
|
service called <literal>gitlab-mailroom</literal> is enabled for fetching incoming mail.
|
||||||
</para>
|
</para>
|
||||||
|
@ -112,21 +112,40 @@ services.gitlab = {
|
||||||
<section xml:id="module-services-gitlab-maintenance">
|
<section xml:id="module-services-gitlab-maintenance">
|
||||||
<title>Maintenance</title>
|
<title>Maintenance</title>
|
||||||
|
|
||||||
|
<section xml:id="module-services-gitlab-maintenance-backups">
|
||||||
|
<title>Backups</title>
|
||||||
<para>
|
<para>
|
||||||
You can run Gitlab's rake tasks with <literal>gitlab-rake</literal> which
|
Backups can be configured with the options in <link
|
||||||
will be available on the system when gitlab is enabled. You will have to run
|
linkend="opt-services.gitlab.backup.keepTime">services.gitlab.backup</link>. Use
|
||||||
the command as the user that you configured to run gitlab with.
|
the <link
|
||||||
|
linkend="opt-services.gitlab.backup.startAt">services.gitlab.backup.startAt</link>
|
||||||
|
option to configure regular backups.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
For example, to backup a Gitlab instance:
|
To run a manual backup, start the <literal>gitlab-backup</literal> service:
|
||||||
<screen>
|
<screen>
|
||||||
<prompt>$ </prompt>sudo -u git -H gitlab-rake gitlab:backup:create
|
<prompt>$ </prompt>systemctl start gitlab-backup.service
|
||||||
</screen>
|
</screen>
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section xml:id="module-services-gitlab-maintenance-rake">
|
||||||
|
<title>Rake tasks</title>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
You can run GitLab's rake tasks with <literal>gitlab-rake</literal>
|
||||||
|
which will be available on the system when GitLab is enabled. You
|
||||||
|
will have to run the command as the user that you configured to run
|
||||||
|
GitLab with.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
A list of all availabe rake tasks can be obtained by running:
|
A list of all availabe rake tasks can be obtained by running:
|
||||||
<screen>
|
<screen>
|
||||||
<prompt>$ </prompt>sudo -u git -H gitlab-rake -T
|
<prompt>$ </prompt>sudo -u git -H gitlab-rake -T
|
||||||
</screen>
|
</screen>
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
|
</section>
|
||||||
</chapter>
|
</chapter>
|
||||||
|
|
|
@ -183,12 +183,12 @@ in {
|
||||||
};
|
};
|
||||||
|
|
||||||
package = mkOption {
|
package = mkOption {
|
||||||
default = pkgs.home-assistant.overrideAttrs (oldAttrs: {
|
default = pkgs.home-assistant.overridePythonAttrs (oldAttrs: {
|
||||||
doInstallCheck = false;
|
doCheck = false;
|
||||||
});
|
});
|
||||||
defaultText = literalExample ''
|
defaultText = literalExample ''
|
||||||
pkgs.home-assistant.overrideAttrs (oldAttrs: {
|
pkgs.home-assistant.overridePythonAttrs (oldAttrs: {
|
||||||
doInstallCheck = false;
|
doCheck = false;
|
||||||
})
|
})
|
||||||
'';
|
'';
|
||||||
type = types.package;
|
type = types.package;
|
||||||
|
|
|
@ -53,7 +53,7 @@ in
|
||||||
description = "MAME TUN/TAP Ethernet interface";
|
description = "MAME TUN/TAP Ethernet interface";
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
|
|
228
third_party/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix
vendored
Normal file
228
third_party/nixpkgs/nixos/modules/services/misc/matrix-appservice-irc.nix
vendored
Normal file
|
@ -0,0 +1,228 @@
|
||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.matrix-appservice-irc;
|
||||||
|
|
||||||
|
pkg = pkgs.matrix-appservice-irc;
|
||||||
|
bin = "${pkg}/bin/matrix-appservice-irc";
|
||||||
|
|
||||||
|
jsonType = (pkgs.formats.json {}).type;
|
||||||
|
|
||||||
|
configFile = pkgs.runCommandNoCC "matrix-appservice-irc.yml" {
|
||||||
|
# Because this program will be run at build time, we need `nativeBuildInputs`
|
||||||
|
nativeBuildInputs = [ (pkgs.python3.withPackages (ps: [ ps.pyyaml ps.jsonschema ])) ];
|
||||||
|
preferLocalBuild = true;
|
||||||
|
|
||||||
|
config = builtins.toJSON cfg.settings;
|
||||||
|
passAsFile = [ "config" ];
|
||||||
|
} ''
|
||||||
|
# The schema is given as yaml, we need to convert it to json
|
||||||
|
python -c 'import json; import yaml; import sys; json.dump(yaml.safe_load(sys.stdin), sys.stdout)' \
|
||||||
|
< ${pkg}/lib/node_modules/matrix-appservice-irc/config.schema.yml \
|
||||||
|
> config.schema.json
|
||||||
|
python -m jsonschema config.schema.json -i $configPath
|
||||||
|
cp "$configPath" "$out"
|
||||||
|
'';
|
||||||
|
registrationFile = "/var/lib/matrix-appservice-irc/registration.yml";
|
||||||
|
in {
|
||||||
|
options.services.matrix-appservice-irc = with types; {
|
||||||
|
enable = mkEnableOption "the Matrix/IRC bridge";
|
||||||
|
|
||||||
|
port = mkOption {
|
||||||
|
type = port;
|
||||||
|
description = "The port to listen on";
|
||||||
|
default = 8009;
|
||||||
|
};
|
||||||
|
|
||||||
|
needBindingCap = mkOption {
|
||||||
|
type = bool;
|
||||||
|
description = "Whether the daemon needs to bind to ports below 1024 (e.g. for the ident service)";
|
||||||
|
default = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
passwordEncryptionKeyLength = mkOption {
|
||||||
|
type = ints.unsigned;
|
||||||
|
description = "Length of the key to encrypt IRC passwords with";
|
||||||
|
default = 4096;
|
||||||
|
example = 8192;
|
||||||
|
};
|
||||||
|
|
||||||
|
registrationUrl = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = ''
|
||||||
|
The URL where the application service is listening for homeserver requests,
|
||||||
|
from the Matrix homeserver perspective.
|
||||||
|
'';
|
||||||
|
example = "http://localhost:8009";
|
||||||
|
};
|
||||||
|
|
||||||
|
localpart = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = "The user_id localpart to assign to the appservice";
|
||||||
|
default = "appservice-irc";
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = mkOption {
|
||||||
|
description = ''
|
||||||
|
Configuration for the appservice, see
|
||||||
|
<link xlink:href="https://github.com/matrix-org/matrix-appservice-irc/blob/${pkgs.matrix-appservice-irc.version}/config.sample.yaml"/>
|
||||||
|
for supported values
|
||||||
|
'';
|
||||||
|
default = {};
|
||||||
|
type = submodule {
|
||||||
|
freeformType = jsonType;
|
||||||
|
|
||||||
|
options = {
|
||||||
|
homeserver = mkOption {
|
||||||
|
description = "Homeserver configuration";
|
||||||
|
default = {};
|
||||||
|
type = submodule {
|
||||||
|
freeformType = jsonType;
|
||||||
|
|
||||||
|
options = {
|
||||||
|
url = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = "The URL to the home server for client-server API calls";
|
||||||
|
};
|
||||||
|
|
||||||
|
domain = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = ''
|
||||||
|
The 'domain' part for user IDs on this home server. Usually
|
||||||
|
(but not always) is the "domain name" part of the homeserver URL.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
database = mkOption {
|
||||||
|
default = {};
|
||||||
|
description = "Configuration for the database";
|
||||||
|
type = submodule {
|
||||||
|
freeformType = jsonType;
|
||||||
|
|
||||||
|
options = {
|
||||||
|
engine = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = "Which database engine to use";
|
||||||
|
default = "nedb";
|
||||||
|
example = "postgres";
|
||||||
|
};
|
||||||
|
|
||||||
|
connectionString = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = "The database connection string";
|
||||||
|
default = "nedb://var/lib/matrix-appservice-irc/data";
|
||||||
|
example = "postgres://username:password@host:port/databasename";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
ircService = mkOption {
|
||||||
|
default = {};
|
||||||
|
description = "IRC bridge configuration";
|
||||||
|
type = submodule {
|
||||||
|
freeformType = jsonType;
|
||||||
|
|
||||||
|
options = {
|
||||||
|
passwordEncryptionKeyPath = mkOption {
|
||||||
|
type = str;
|
||||||
|
description = ''
|
||||||
|
Location of the key with which IRC passwords are encrypted
|
||||||
|
for storage. Will be generated on first run if not present.
|
||||||
|
'';
|
||||||
|
default = "/var/lib/matrix-appservice-irc/passkey.pem";
|
||||||
|
};
|
||||||
|
|
||||||
|
servers = mkOption {
|
||||||
|
type = submodule { freeformType = jsonType; };
|
||||||
|
description = "IRC servers to connect to";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.matrix-appservice-irc = {
|
||||||
|
description = "Matrix-IRC bridge";
|
||||||
|
before = [ "matrix-synapse.service" ]; # So the registration can be used by Synapse
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
preStart = ''
|
||||||
|
umask 077
|
||||||
|
# Generate key for crypting passwords
|
||||||
|
if ! [ -f "${cfg.settings.ircService.passwordEncryptionKeyPath}" ]; then
|
||||||
|
${pkgs.openssl}/bin/openssl genpkey \
|
||||||
|
-out "${cfg.settings.ircService.passwordEncryptionKeyPath}" \
|
||||||
|
-outform PEM \
|
||||||
|
-algorithm RSA \
|
||||||
|
-pkeyopt "rsa_keygen_bits:${toString cfg.passwordEncryptionKeyLength}"
|
||||||
|
fi
|
||||||
|
# Generate registration file
|
||||||
|
if ! [ -f "${registrationFile}" ]; then
|
||||||
|
# The easy case: the file has not been generated yet
|
||||||
|
${bin} --generate-registration --file ${registrationFile} --config ${configFile} --url ${cfg.registrationUrl} --localpart ${cfg.localpart}
|
||||||
|
else
|
||||||
|
# The tricky case: we already have a generation file. Because the NixOS configuration might have changed, we need to
|
||||||
|
# regenerate it. But this would give the service a new random ID and tokens, so we need to back up and restore them.
|
||||||
|
# 1. Backup
|
||||||
|
id=$(grep "^id:.*$" ${registrationFile})
|
||||||
|
hs_token=$(grep "^hs_token:.*$" ${registrationFile})
|
||||||
|
as_token=$(grep "^as_token:.*$" ${registrationFile})
|
||||||
|
# 2. Regenerate
|
||||||
|
${bin} --generate-registration --file ${registrationFile} --config ${configFile} --url ${cfg.registrationUrl} --localpart ${cfg.localpart}
|
||||||
|
# 3. Restore
|
||||||
|
sed -i "s/^id:.*$/$id/g" ${registrationFile}
|
||||||
|
sed -i "s/^hs_token:.*$/$hs_token/g" ${registrationFile}
|
||||||
|
sed -i "s/^as_token:.*$/$as_token/g" ${registrationFile}
|
||||||
|
fi
|
||||||
|
# Allow synapse access to the registration
|
||||||
|
if ${getBin pkgs.glibc}/bin/getent group matrix-synapse > /dev/null; then
|
||||||
|
chgrp matrix-synapse ${registrationFile}
|
||||||
|
chmod g+r ${registrationFile}
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
|
||||||
|
serviceConfig = rec {
|
||||||
|
Type = "simple";
|
||||||
|
ExecStart = "${bin} --config ${configFile} --file ${registrationFile} --port ${toString cfg.port}";
|
||||||
|
|
||||||
|
ProtectHome = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
StateDirectory = "matrix-appservice-irc";
|
||||||
|
StateDirectoryMode = "755";
|
||||||
|
|
||||||
|
User = "matrix-appservice-irc";
|
||||||
|
Group = "matrix-appservice-irc";
|
||||||
|
|
||||||
|
CapabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.needBindingCap) "CAP_NET_BIND_SERVICE";
|
||||||
|
AmbientCapabilities = CapabilityBoundingSet;
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
|
||||||
|
LockPersonality = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
PrivateMounts = true;
|
||||||
|
SystemCallFilter = "~@aio @clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @setuid @swap";
|
||||||
|
SystemCallArchitectures = "native";
|
||||||
|
RestrictAddressFamilies = "AF_INET AF_INET6";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups.matrix-appservice-irc = {};
|
||||||
|
users.users.matrix-appservice-irc = {
|
||||||
|
description = "Service user for the Matrix-IRC bridge";
|
||||||
|
group = "matrix-appservice-irc";
|
||||||
|
isSystemUser = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
80
third_party/nixpkgs/nixos/modules/services/misc/ombi.nix
vendored
Normal file
80
third_party/nixpkgs/nixos/modules/services/misc/ombi.nix
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
{ config, pkgs, lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let cfg = config.services.ombi;
|
||||||
|
|
||||||
|
in {
|
||||||
|
options = {
|
||||||
|
services.ombi = {
|
||||||
|
enable = mkEnableOption ''
|
||||||
|
Ombi.
|
||||||
|
Optionally see <link xlink:href="https://docs.ombi.app/info/reverse-proxy"/>
|
||||||
|
on how to set up a reverse proxy
|
||||||
|
'';
|
||||||
|
|
||||||
|
dataDir = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "/var/lib/ombi";
|
||||||
|
description = "The directory where Ombi stores its data files.";
|
||||||
|
};
|
||||||
|
|
||||||
|
port = mkOption {
|
||||||
|
type = types.port;
|
||||||
|
default = 5000;
|
||||||
|
description = "The port for the Ombi web interface.";
|
||||||
|
};
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = "Open ports in the firewall for the Ombi web interface.";
|
||||||
|
};
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "ombi";
|
||||||
|
description = "User account under which Ombi runs.";
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "ombi";
|
||||||
|
description = "Group under which Ombi runs.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.tmpfiles.rules = [
|
||||||
|
"d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
|
||||||
|
];
|
||||||
|
|
||||||
|
systemd.services.ombi = {
|
||||||
|
description = "Ombi";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
ExecStart = "${pkgs.ombi}/bin/Ombi --storage '${cfg.dataDir}' --host 'http://*:${toString cfg.port}'";
|
||||||
|
Restart = "on-failure";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
|
allowedTCPPorts = [ cfg.port ];
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users = mkIf (cfg.user == "ombi") {
|
||||||
|
ombi = {
|
||||||
|
group = cfg.group;
|
||||||
|
home = cfg.dataDir;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups = mkIf (cfg.group == "ombi") { ombi = { }; };
|
||||||
|
};
|
||||||
|
}
|
|
@ -225,7 +225,7 @@ in {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute ];
|
environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
|
||||||
|
|
||||||
users.users.datadog = {
|
users.users.datadog = {
|
||||||
description = "Datadog Agent User";
|
description = "Datadog Agent User";
|
||||||
|
@ -239,7 +239,7 @@ in {
|
||||||
|
|
||||||
systemd.services = let
|
systemd.services = let
|
||||||
makeService = attrs: recursiveUpdate {
|
makeService = attrs: recursiveUpdate {
|
||||||
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.iproute ];
|
path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.iproute2 ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
User = "datadog";
|
User = "datadog";
|
||||||
|
|
|
@ -15,6 +15,7 @@ let
|
||||||
SERVER_PROTOCOL = cfg.protocol;
|
SERVER_PROTOCOL = cfg.protocol;
|
||||||
SERVER_HTTP_ADDR = cfg.addr;
|
SERVER_HTTP_ADDR = cfg.addr;
|
||||||
SERVER_HTTP_PORT = cfg.port;
|
SERVER_HTTP_PORT = cfg.port;
|
||||||
|
SERVER_SOCKET = cfg.socket;
|
||||||
SERVER_DOMAIN = cfg.domain;
|
SERVER_DOMAIN = cfg.domain;
|
||||||
SERVER_ROOT_URL = cfg.rootUrl;
|
SERVER_ROOT_URL = cfg.rootUrl;
|
||||||
SERVER_STATIC_ROOT_PATH = cfg.staticRootPath;
|
SERVER_STATIC_ROOT_PATH = cfg.staticRootPath;
|
||||||
|
@ -291,6 +292,12 @@ in {
|
||||||
type = types.int;
|
type = types.int;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
socket = mkOption {
|
||||||
|
description = "Listening socket.";
|
||||||
|
default = "/run/grafana/grafana.sock";
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
|
||||||
domain = mkOption {
|
domain = mkOption {
|
||||||
description = "The public facing domain name used to access grafana from a browser.";
|
description = "The public facing domain name used to access grafana from a browser.";
|
||||||
default = "localhost";
|
default = "localhost";
|
||||||
|
@ -622,6 +629,8 @@ in {
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
WorkingDirectory = cfg.dataDir;
|
WorkingDirectory = cfg.dataDir;
|
||||||
User = "grafana";
|
User = "grafana";
|
||||||
|
RuntimeDirectory = "grafana";
|
||||||
|
RuntimeDirectoryMode = "0755";
|
||||||
};
|
};
|
||||||
preStart = ''
|
preStart = ''
|
||||||
ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
|
ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
|
||||||
|
|
|
@ -124,6 +124,17 @@ in {
|
||||||
};
|
};
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enableAnalyticsReporting = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Enable reporting of anonymous usage statistics to Netdata Inc. via either
|
||||||
|
Google Analytics (in versions prior to 1.29.4), or Netdata Inc.'s
|
||||||
|
self-hosted PostHog (in versions 1.29.4 and later).
|
||||||
|
See: <link xlink:href="https://learn.netdata.cloud/docs/agent/anonymous-statistics"/>
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -140,8 +151,12 @@ in {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
path = (with pkgs; [ curl gawk which ]) ++ lib.optional cfg.python.enable
|
path = (with pkgs; [ curl gawk which ]) ++ lib.optional cfg.python.enable
|
||||||
(pkgs.python3.withPackages cfg.python.extraPackages);
|
(pkgs.python3.withPackages cfg.python.extraPackages);
|
||||||
|
environment = {
|
||||||
|
PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
|
||||||
|
} // lib.optionalAttrs (!cfg.enableAnalyticsReporting) {
|
||||||
|
DO_NOT_TRACK = "1";
|
||||||
|
};
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Environment="PYTHONPATH=${cfg.package}/libexec/netdata/python.d/python_modules";
|
|
||||||
ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c ${configFile}";
|
ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c ${configFile}";
|
||||||
ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
|
ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
|
||||||
TimeoutStopSec = 60;
|
TimeoutStopSec = 60;
|
||||||
|
|
|
@ -28,6 +28,7 @@ let
|
||||||
"blackbox"
|
"blackbox"
|
||||||
"collectd"
|
"collectd"
|
||||||
"dnsmasq"
|
"dnsmasq"
|
||||||
|
"domain"
|
||||||
"dovecot"
|
"dovecot"
|
||||||
"fritzbox"
|
"fritzbox"
|
||||||
"json"
|
"json"
|
||||||
|
@ -43,6 +44,7 @@ let
|
||||||
"nginx"
|
"nginx"
|
||||||
"nginxlog"
|
"nginxlog"
|
||||||
"node"
|
"node"
|
||||||
|
"openldap"
|
||||||
"openvpn"
|
"openvpn"
|
||||||
"postfix"
|
"postfix"
|
||||||
"postgres"
|
"postgres"
|
||||||
|
|
19
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
vendored
Normal file
19
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
{ config, lib, pkgs, options }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.prometheus.exporters.domain;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
port = 9222;
|
||||||
|
serviceOpts = {
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = ''
|
||||||
|
${pkgs.prometheus-domain-exporter}/bin/domain_exporter \
|
||||||
|
--bind ${cfg.listenAddress}:${toString cfg.port} \
|
||||||
|
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
67
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix
vendored
Normal file
67
third_party/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.prometheus.exporters.openldap;
|
||||||
|
in {
|
||||||
|
port = 9330;
|
||||||
|
extraOpts = {
|
||||||
|
ldapCredentialFile = mkOption {
|
||||||
|
type = types.path;
|
||||||
|
example = "/run/keys/ldap_pass";
|
||||||
|
description = ''
|
||||||
|
Environment file to contain the credentials to authenticate against
|
||||||
|
<package>openldap</package>.
|
||||||
|
|
||||||
|
The file should look like this:
|
||||||
|
<programlisting>
|
||||||
|
---
|
||||||
|
ldapUser: "cn=monitoring,cn=Monitor"
|
||||||
|
ldapPass: "secret"
|
||||||
|
</programlisting>
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
protocol = mkOption {
|
||||||
|
default = "tcp";
|
||||||
|
example = "udp";
|
||||||
|
type = types.str;
|
||||||
|
description = ''
|
||||||
|
Which protocol to use to connect against <package>openldap</package>.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
ldapAddr = mkOption {
|
||||||
|
default = "localhost:389";
|
||||||
|
type = types.str;
|
||||||
|
description = ''
|
||||||
|
Address of the <package>openldap</package>-instance.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
metricsPath = mkOption {
|
||||||
|
default = "/metrics";
|
||||||
|
type = types.str;
|
||||||
|
description = ''
|
||||||
|
URL path where metrics should be exposed.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
interval = mkOption {
|
||||||
|
default = "30s";
|
||||||
|
type = types.str;
|
||||||
|
example = "1m";
|
||||||
|
description = ''
|
||||||
|
Scrape interval of the exporter.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
serviceOpts.serviceConfig = {
|
||||||
|
ExecStart = ''
|
||||||
|
${pkgs.prometheus-openldap-exporter}/bin/openldap_exporter \
|
||||||
|
--promAddr ${cfg.listenAddress}:${toString cfg.port} \
|
||||||
|
--metrPath ${cfg.metricsPath} \
|
||||||
|
--ldapNet ${cfg.protocol} \
|
||||||
|
--interval ${cfg.interval} \
|
||||||
|
--config ${cfg.ldapCredentialFile} \
|
||||||
|
${concatStringsSep " \\\n " cfg.extraFlags}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
|
@ -113,7 +113,7 @@ in {
|
||||||
description = "scollector metrics collector (part of Bosun)";
|
description = "scollector metrics collector (part of Bosun)";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
path = [ pkgs.coreutils pkgs.iproute ];
|
path = [ pkgs.coreutils pkgs.iproute2 ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
User = cfg.user;
|
User = cfg.user;
|
||||||
|
|
|
@ -191,7 +191,7 @@ in
|
||||||
ExecStop = "${cfg.package}/bin/consul leave";
|
ExecStop = "${cfg.package}/bin/consul leave";
|
||||||
});
|
});
|
||||||
|
|
||||||
path = with pkgs; [ iproute gnugrep gawk consul ];
|
path = with pkgs; [ iproute2 gnugrep gawk consul ];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
mkdir -m 0700 -p ${dataDir}
|
mkdir -m 0700 -p ${dataDir}
|
||||||
chown -R consul ${dataDir}
|
chown -R consul ${dataDir}
|
||||||
|
|
|
@ -10,7 +10,7 @@ let
|
||||||
name = "ircd-hybrid-service";
|
name = "ircd-hybrid-service";
|
||||||
scripts = [ "=>/bin" ./control.in ];
|
scripts = [ "=>/bin" ./control.in ];
|
||||||
substFiles = [ "=>/conf" ./ircd.conf ];
|
substFiles = [ "=>/conf" ./ircd.conf ];
|
||||||
inherit (pkgs) ircdHybrid coreutils su iproute gnugrep procps;
|
inherit (pkgs) ircdHybrid coreutils su iproute2 gnugrep procps;
|
||||||
|
|
||||||
ipv6Enabled = boolToString config.networking.enableIPv6;
|
ipv6Enabled = boolToString config.networking.enableIPv6;
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ in
|
||||||
|
|
||||||
config = mkIf cfg.enable {
|
config = mkIf cfg.enable {
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.libreswan pkgs.iproute ];
|
environment.systemPackages = [ pkgs.libreswan pkgs.iproute2 ];
|
||||||
|
|
||||||
systemd.services.ipsec = {
|
systemd.services.ipsec = {
|
||||||
description = "Internet Key Exchange (IKE) Protocol Daemon for IPsec";
|
description = "Internet Key Exchange (IKE) Protocol Daemon for IPsec";
|
||||||
|
|
|
@ -465,7 +465,7 @@ in {
|
||||||
restartTriggers = [ configFile overrideNameserversScript ];
|
restartTriggers = [ configFile overrideNameserversScript ];
|
||||||
|
|
||||||
# useful binaries for user-specified hooks
|
# useful binaries for user-specified hooks
|
||||||
path = [ pkgs.iproute pkgs.util-linux pkgs.coreutils ];
|
path = [ pkgs.iproute2 pkgs.util-linux pkgs.coreutils ];
|
||||||
aliases = [ "dbus-org.freedesktop.nm-dispatcher.service" ];
|
aliases = [ "dbus-org.freedesktop.nm-dispatcher.service" ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ let
|
||||||
wantedBy = optional cfg.autoStart "multi-user.target";
|
wantedBy = optional cfg.autoStart "multi-user.target";
|
||||||
after = [ "network.target" ];
|
after = [ "network.target" ];
|
||||||
|
|
||||||
path = [ pkgs.iptables pkgs.iproute pkgs.nettools ];
|
path = [ pkgs.iptables pkgs.iproute2 pkgs.nettools ];
|
||||||
|
|
||||||
serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
|
serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
|
||||||
serviceConfig.Restart = "always";
|
serviceConfig.Restart = "always";
|
||||||
|
|
|
@ -132,7 +132,7 @@ in
|
||||||
{ table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
|
{ table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
|
||||||
];
|
];
|
||||||
in {
|
in {
|
||||||
path = [ pkgs.iptables pkgs.iproute pkgs.procps ];
|
path = [ pkgs.iptables pkgs.iproute2 pkgs.procps ];
|
||||||
|
|
||||||
preStart = ''
|
preStart = ''
|
||||||
# Cleanup old iptables entries which might be still there
|
# Cleanup old iptables entries which might be still there
|
||||||
|
|
|
@ -63,7 +63,7 @@ in {
|
||||||
description = "strongSwan IPsec IKEv1/IKEv2 daemon using swanctl";
|
description = "strongSwan IPsec IKEv1/IKEv2 daemon using swanctl";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "network-online.target" ];
|
after = [ "network-online.target" ];
|
||||||
path = with pkgs; [ kmod iproute iptables util-linux ];
|
path = with pkgs; [ kmod iproute2 iptables util-linux ];
|
||||||
environment = {
|
environment = {
|
||||||
STRONGSWAN_CONF = pkgs.writeTextFile {
|
STRONGSWAN_CONF = pkgs.writeTextFile {
|
||||||
name = "strongswan.conf";
|
name = "strongswan.conf";
|
||||||
|
|
|
@ -152,7 +152,7 @@ in
|
||||||
systemd.services.strongswan = {
|
systemd.services.strongswan = {
|
||||||
description = "strongSwan IPSec Service";
|
description = "strongSwan IPSec Service";
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
path = with pkgs; [ kmod iproute iptables util-linux ]; # XXX Linux
|
path = with pkgs; [ kmod iproute2 iptables util-linux ]; # XXX Linux
|
||||||
after = [ "network-online.target" ];
|
after = [ "network-online.target" ];
|
||||||
environment = {
|
environment = {
|
||||||
STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
|
STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
|
||||||
|
|
|
@ -63,7 +63,7 @@ let
|
||||||
|
|
||||||
preSetup = mkOption {
|
preSetup = mkOption {
|
||||||
example = literalExample ''
|
example = literalExample ''
|
||||||
${pkgs.iproute}/bin/ip netns add foo
|
${pkgs.iproute2}/bin/ip netns add foo
|
||||||
'';
|
'';
|
||||||
default = "";
|
default = "";
|
||||||
type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
|
type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
|
||||||
|
@ -278,7 +278,7 @@ let
|
||||||
wantedBy = [ "multi-user.target" "wireguard-${interfaceName}.service" ];
|
wantedBy = [ "multi-user.target" "wireguard-${interfaceName}.service" ];
|
||||||
environment.DEVICE = interfaceName;
|
environment.DEVICE = interfaceName;
|
||||||
environment.WG_ENDPOINT_RESOLUTION_RETRIES = "infinity";
|
environment.WG_ENDPOINT_RESOLUTION_RETRIES = "infinity";
|
||||||
path = with pkgs; [ iproute wireguard-tools ];
|
path = with pkgs; [ iproute2 wireguard-tools ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
|
@ -333,7 +333,7 @@ let
|
||||||
after = [ "network.target" "network-online.target" ];
|
after = [ "network.target" "network-online.target" ];
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
environment.DEVICE = name;
|
environment.DEVICE = name;
|
||||||
path = with pkgs; [ kmod iproute wireguard-tools ];
|
path = with pkgs; [ kmod iproute2 wireguard-tools ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
|
|
|
@ -243,7 +243,7 @@ in
|
||||||
restartTriggers = [ fail2banConf jailConf pathsConf ];
|
restartTriggers = [ fail2banConf jailConf pathsConf ];
|
||||||
reloadIfChanged = true;
|
reloadIfChanged = true;
|
||||||
|
|
||||||
path = [ cfg.package cfg.packageFirewall pkgs.iproute ];
|
path = [ cfg.package cfg.packageFirewall pkgs.iproute2 ];
|
||||||
|
|
||||||
unitConfig.Documentation = "man:fail2ban(1)";
|
unitConfig.Documentation = "man:fail2ban(1)";
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,26 @@ in
|
||||||
services.privacyidea = {
|
services.privacyidea = {
|
||||||
enable = mkEnableOption "PrivacyIDEA";
|
enable = mkEnableOption "PrivacyIDEA";
|
||||||
|
|
||||||
|
environmentFile = mkOption {
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
default = null;
|
||||||
|
example = "/root/privacyidea.env";
|
||||||
|
description = ''
|
||||||
|
File to load as environment file. Environment variables
|
||||||
|
from this file will be interpolated into the config file
|
||||||
|
using <package>envsubst</package> which is helpful for specifying
|
||||||
|
secrets:
|
||||||
|
<programlisting>
|
||||||
|
{ <xref linkend="opt-services.privacyidea.secretKey" /> = "$SECRET"; }
|
||||||
|
</programlisting>
|
||||||
|
|
||||||
|
The environment-file can now specify the actual secret key:
|
||||||
|
<programlisting>
|
||||||
|
SECRET=veryverytopsecret
|
||||||
|
</programlisting>
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
stateDir = mkOption {
|
stateDir = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
default = "/var/lib/privacyidea";
|
default = "/var/lib/privacyidea";
|
||||||
|
@ -206,7 +226,7 @@ in
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "postgresql.service" ];
|
after = [ "postgresql.service" ];
|
||||||
path = with pkgs; [ openssl ];
|
path = with pkgs; [ openssl ];
|
||||||
environment.PRIVACYIDEA_CONFIGFILE = piCfgFile;
|
environment.PRIVACYIDEA_CONFIGFILE = "${cfg.stateDir}/privacyidea.cfg";
|
||||||
preStart = let
|
preStart = let
|
||||||
pi-manage = "${pkgs.sudo}/bin/sudo -u privacyidea -HE ${penv}/bin/pi-manage";
|
pi-manage = "${pkgs.sudo}/bin/sudo -u privacyidea -HE ${penv}/bin/pi-manage";
|
||||||
pgsu = config.services.postgresql.superUser;
|
pgsu = config.services.postgresql.superUser;
|
||||||
|
@ -214,6 +234,10 @@ in
|
||||||
in ''
|
in ''
|
||||||
mkdir -p ${cfg.stateDir} /run/privacyidea
|
mkdir -p ${cfg.stateDir} /run/privacyidea
|
||||||
chown ${cfg.user}:${cfg.group} -R ${cfg.stateDir} /run/privacyidea
|
chown ${cfg.user}:${cfg.group} -R ${cfg.stateDir} /run/privacyidea
|
||||||
|
umask 077
|
||||||
|
${lib.getBin pkgs.envsubst}/bin/envsubst -o ${cfg.stateDir}/privacyidea.cfg \
|
||||||
|
-i "${piCfgFile}"
|
||||||
|
chown ${cfg.user}:${cfg.group} ${cfg.stateDir}/privacyidea.cfg
|
||||||
if ! test -e "${cfg.stateDir}/db-created"; then
|
if ! test -e "${cfg.stateDir}/db-created"; then
|
||||||
${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createuser --no-superuser --no-createdb --no-createrole ${cfg.user}
|
${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createuser --no-superuser --no-createdb --no-createrole ${cfg.user}
|
||||||
${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createdb --owner ${cfg.user} privacyidea
|
${pkgs.sudo}/bin/sudo -u ${pgsu} ${psql}/bin/createdb --owner ${cfg.user} privacyidea
|
||||||
|
@ -231,6 +255,7 @@ in
|
||||||
Type = "notify";
|
Type = "notify";
|
||||||
ExecStart = "${uwsgi}/bin/uwsgi --json ${piuwsgi}";
|
ExecStart = "${uwsgi}/bin/uwsgi --json ${piuwsgi}";
|
||||||
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
|
||||||
|
EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
|
||||||
ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
|
ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
|
||||||
NotifyAccess = "main";
|
NotifyAccess = "main";
|
||||||
KillSignal = "SIGQUIT";
|
KillSignal = "SIGQUIT";
|
||||||
|
|
|
@ -108,8 +108,8 @@ in {
|
||||||
partOf = optional config.networking.firewall.enable "firewall.service";
|
partOf = optional config.networking.firewall.enable "firewall.service";
|
||||||
|
|
||||||
path = with pkgs; if config.networking.nftables.enable
|
path = with pkgs; if config.networking.nftables.enable
|
||||||
then [ nftables iproute systemd ]
|
then [ nftables iproute2 systemd ]
|
||||||
else [ iptables ipset iproute systemd ];
|
else [ iptables ipset iproute2 systemd ];
|
||||||
|
|
||||||
# The sshguard ipsets must exist before we invoke
|
# The sshguard ipsets must exist before we invoke
|
||||||
# iptables. sshguard creates the ipsets after startup if
|
# iptables. sshguard creates the ipsets after startup if
|
||||||
|
|
165
third_party/nixpkgs/nixos/modules/services/web-apps/calibre-web.nix
vendored
Normal file
165
third_party/nixpkgs/nixos/modules/services/web-apps/calibre-web.nix
vendored
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.calibre-web;
|
||||||
|
|
||||||
|
inherit (lib) concatStringsSep mkEnableOption mkIf mkOption optional optionalString types;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options = {
|
||||||
|
services.calibre-web = {
|
||||||
|
enable = mkEnableOption "Calibre-Web";
|
||||||
|
|
||||||
|
listen = {
|
||||||
|
ip = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "::1";
|
||||||
|
description = ''
|
||||||
|
IP address that Calibre-Web should listen on.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
port = mkOption {
|
||||||
|
type = types.port;
|
||||||
|
default = 8083;
|
||||||
|
description = ''
|
||||||
|
Listen port for Calibre-Web.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
dataDir = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "calibre-web";
|
||||||
|
description = ''
|
||||||
|
The directory below <filename>/var/lib</filename> where Calibre-Web stores its data.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
user = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "calibre-web";
|
||||||
|
description = "User account under which Calibre-Web runs.";
|
||||||
|
};
|
||||||
|
|
||||||
|
group = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "calibre-web";
|
||||||
|
description = "Group account under which Calibre-Web runs.";
|
||||||
|
};
|
||||||
|
|
||||||
|
openFirewall = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Open ports in the firewall for the server.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
options = {
|
||||||
|
calibreLibrary = mkOption {
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
Path to Calibre library.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
enableBookConversion = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Configure path to the Calibre's ebook-convert in the DB.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
enableBookUploading = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Allow books to be uploaded via Calibre-Web UI.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
reverseProxyAuth = {
|
||||||
|
enable = mkOption {
|
||||||
|
type = types.bool;
|
||||||
|
default = false;
|
||||||
|
description = ''
|
||||||
|
Enable authorization using auth proxy.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
header = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "";
|
||||||
|
description = ''
|
||||||
|
Auth proxy header name.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.calibre-web = let
|
||||||
|
appDb = "/var/lib/${cfg.dataDir}/app.db";
|
||||||
|
gdriveDb = "/var/lib/${cfg.dataDir}/gdrive.db";
|
||||||
|
calibreWebCmd = "${pkgs.calibre-web}/bin/calibre-web -p ${appDb} -g ${gdriveDb}";
|
||||||
|
|
||||||
|
settings = concatStringsSep ", " (
|
||||||
|
[
|
||||||
|
"config_port = ${toString cfg.listen.port}"
|
||||||
|
"config_uploading = ${if cfg.options.enableBookUploading then "1" else "0"}"
|
||||||
|
"config_allow_reverse_proxy_header_login = ${if cfg.options.reverseProxyAuth.enable then "1" else "0"}"
|
||||||
|
"config_reverse_proxy_login_header_name = '${cfg.options.reverseProxyAuth.header}'"
|
||||||
|
]
|
||||||
|
++ optional (cfg.options.calibreLibrary != null) "config_calibre_dir = '${cfg.options.calibreLibrary}'"
|
||||||
|
++ optional cfg.options.enableBookConversion "config_converterpath = '${pkgs.calibre}/bin/ebook-convert'"
|
||||||
|
);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
description = "Web app for browsing, reading and downloading eBooks stored in a Calibre database";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "simple";
|
||||||
|
User = cfg.user;
|
||||||
|
Group = cfg.group;
|
||||||
|
|
||||||
|
StateDirectory = cfg.dataDir;
|
||||||
|
ExecStartPre = pkgs.writeShellScript "calibre-web-pre-start" (
|
||||||
|
''
|
||||||
|
__RUN_MIGRATIONS_AND_EXIT=1 ${calibreWebCmd}
|
||||||
|
|
||||||
|
${pkgs.sqlite}/bin/sqlite3 ${appDb} "update settings set ${settings}"
|
||||||
|
'' + optionalString (cfg.options.calibreLibrary != null) ''
|
||||||
|
test -f ${cfg.options.calibreLibrary}/metadata.db || { echo "Invalid Calibre library"; exit 1; }
|
||||||
|
''
|
||||||
|
);
|
||||||
|
|
||||||
|
ExecStart = "${calibreWebCmd} -i ${cfg.listen.ip}";
|
||||||
|
Restart = "on-failure";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall = mkIf cfg.openFirewall {
|
||||||
|
allowedTCPPorts = [ cfg.listen.port ];
|
||||||
|
};
|
||||||
|
|
||||||
|
users.users = mkIf (cfg.user == "calibre-web") {
|
||||||
|
calibre-web = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = cfg.group;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
users.groups = mkIf (cfg.group == "calibre-web") {
|
||||||
|
calibre-web = {};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
meta.maintainers = with lib.maintainers; [ pborzenkov ];
|
||||||
|
}
|
|
@ -87,8 +87,8 @@ in {
|
||||||
SystemCallFilter = [
|
SystemCallFilter = [
|
||||||
"@system-service"
|
"@system-service"
|
||||||
|
|
||||||
"~@chown" "~@cpu-emulation" "~@debug" "~@ipc" "~@keyring" "~@memlock"
|
"~@chown" "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock"
|
||||||
"~@module" "~@obsolete" "~@privileged" "~@process" "~@raw-io"
|
"~@module" "~@obsolete" "~@privileged" "~@raw-io"
|
||||||
"~@resources" "~@setuid"
|
"~@resources" "~@setuid"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
|
@ -154,9 +154,9 @@ let
|
||||||
|
|
||||||
${optionalString (cfg.recommendedProxySettings) ''
|
${optionalString (cfg.recommendedProxySettings) ''
|
||||||
proxy_redirect off;
|
proxy_redirect off;
|
||||||
proxy_connect_timeout 90;
|
proxy_connect_timeout 60;
|
||||||
proxy_send_timeout 90;
|
proxy_send_timeout 60;
|
||||||
proxy_read_timeout 90;
|
proxy_read_timeout 60;
|
||||||
proxy_http_version 1.0;
|
proxy_http_version 1.0;
|
||||||
include ${recommendedProxyConfig};
|
include ${recommendedProxyConfig};
|
||||||
''}
|
''}
|
||||||
|
|
131
third_party/nixpkgs/nixos/modules/services/web-servers/pomerium.nix
vendored
Normal file
131
third_party/nixpkgs/nixos/modules/services/web-servers/pomerium.nix
vendored
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
let
|
||||||
|
format = pkgs.formats.yaml {};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.pomerium = {
|
||||||
|
enable = mkEnableOption "the Pomerium authenticating reverse proxy";
|
||||||
|
|
||||||
|
configFile = mkOption {
|
||||||
|
type = with types; nullOr path;
|
||||||
|
default = null;
|
||||||
|
description = "Path to Pomerium config YAML. If set, overrides services.pomerium.settings.";
|
||||||
|
};
|
||||||
|
|
||||||
|
useACMEHost = mkOption {
|
||||||
|
type = with types; nullOr str;
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
If set, use a NixOS-generated ACME certificate with the specified name.
|
||||||
|
|
||||||
|
Note that this will require you to use a non-HTTP-based challenge, or
|
||||||
|
disable Pomerium's in-built HTTP redirect server by setting
|
||||||
|
http_redirect_addr to null and use a different HTTP server for serving
|
||||||
|
the challenge response.
|
||||||
|
|
||||||
|
If you're using an HTTP-based challenge, you should use the
|
||||||
|
Pomerium-native autocert option instead.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
settings = mkOption {
|
||||||
|
description = ''
|
||||||
|
The contents of Pomerium's config.yaml, in Nix expressions.
|
||||||
|
|
||||||
|
Specifying configFile will override this in its entirety.
|
||||||
|
|
||||||
|
See <link xlink:href="https://pomerium.io/reference/">the Pomerium
|
||||||
|
configuration reference</link> for more information about what to put
|
||||||
|
here.
|
||||||
|
'';
|
||||||
|
default = {};
|
||||||
|
type = format.type;
|
||||||
|
};
|
||||||
|
|
||||||
|
secretsFile = mkOption {
|
||||||
|
type = with types; nullOr path;
|
||||||
|
default = null;
|
||||||
|
description = ''
|
||||||
|
Path to file containing secrets for Pomerium, in systemd
|
||||||
|
EnvironmentFile format. See the systemd.exec(5) man page.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = let
|
||||||
|
cfg = config.services.pomerium;
|
||||||
|
cfgFile = if cfg.configFile != null then cfg.configFile else (format.generate "pomerium.yaml" cfg.settings);
|
||||||
|
in mkIf cfg.enable ({
|
||||||
|
systemd.services.pomerium = {
|
||||||
|
description = "Pomerium authenticating reverse proxy";
|
||||||
|
wants = [ "network.target" ] ++ (optional (cfg.useACMEHost != null) "acme-finished-${cfg.useACMEHost}.target");
|
||||||
|
after = [ "network.target" ] ++ (optional (cfg.useACMEHost != null) "acme-finished-${cfg.useACMEHost}.target");
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
environment = optionalAttrs (cfg.useACMEHost != null) {
|
||||||
|
CERTIFICATE_FILE = "fullchain.pem";
|
||||||
|
CERTIFICATE_KEY_FILE = "key.pem";
|
||||||
|
};
|
||||||
|
startLimitIntervalSec = 60;
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
DynamicUser = true;
|
||||||
|
StateDirectory = [ "pomerium" ];
|
||||||
|
ExecStart = "${pkgs.pomerium}/bin/pomerium -config ${cfgFile}";
|
||||||
|
|
||||||
|
PrivateUsers = false; # breaks CAP_NET_BIND_SERVICE
|
||||||
|
MemoryDenyWriteExecute = false; # breaks LuaJIT
|
||||||
|
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
PrivateTmp = true;
|
||||||
|
PrivateDevices = true;
|
||||||
|
DevicePolicy = "closed";
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ProtectHome = true;
|
||||||
|
ProtectControlGroups = true;
|
||||||
|
ProtectKernelModules = true;
|
||||||
|
ProtectKernelTunables = true;
|
||||||
|
ProtectKernelLogs = true;
|
||||||
|
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
|
||||||
|
RestrictNamespaces = true;
|
||||||
|
RestrictRealtime = true;
|
||||||
|
RestrictSUIDSGID = true;
|
||||||
|
LockPersonality = true;
|
||||||
|
SystemCallArchitectures = "native";
|
||||||
|
|
||||||
|
EnvironmentFile = cfg.secretsFile;
|
||||||
|
AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
|
||||||
|
CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
|
||||||
|
|
||||||
|
WorkingDirectory = mkIf (cfg.useACMEHost != null) "$CREDENTIALS_DIRECTORY";
|
||||||
|
LoadCredential = optionals (cfg.useACMEHost != null) [
|
||||||
|
"fullchain.pem:/var/lib/acme/${cfg.useACMEHost}/fullchain.pem"
|
||||||
|
"key.pem:/var/lib/acme/${cfg.useACMEHost}/key.pem"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# postRun hooks on cert renew can't be used to restart Nginx since renewal
|
||||||
|
# runs as the unprivileged acme user. sslTargets are added to wantedBy + before
|
||||||
|
# which allows the acme-finished-$cert.target to signify the successful updating
|
||||||
|
# of certs end-to-end.
|
||||||
|
systemd.services.pomerium-config-reload = mkIf (cfg.useACMEHost != null) {
|
||||||
|
# TODO(lukegb): figure out how to make config reloading work with credentials.
|
||||||
|
|
||||||
|
wantedBy = [ "acme-finished-${cfg.useACMEHost}.target" "multi-user.target" ];
|
||||||
|
# Before the finished targets, after the renew services.
|
||||||
|
before = [ "acme-finished-${cfg.useACMEHost}.target" ];
|
||||||
|
after = [ "acme-${cfg.useACMEHost}.service" ];
|
||||||
|
# Block reloading if not all certs exist yet.
|
||||||
|
unitConfig.ConditionPathExists = [ "${certs.${cfg.useACMEHost}.directory}/fullchain.pem" ];
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
TimeoutSec = 60;
|
||||||
|
ExecCondition = "/run/current-system/systemd/bin/systemctl -q is-active pomerium.service";
|
||||||
|
ExecStart = "/run/current-system/systemd/bin/systemctl restart pomerium.service";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
|
@ -14,6 +14,16 @@ in
|
||||||
default = false;
|
default = false;
|
||||||
description = "Enable the kodi multimedia center.";
|
description = "Enable the kodi multimedia center.";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
default = pkgs.kodi;
|
||||||
|
defaultText = "pkgs.kodi";
|
||||||
|
example = "pkgs.kodi.withPackages (p: with p; [ jellyfin pvr-iptvsimple vfs-sftp ])";
|
||||||
|
description = ''
|
||||||
|
Package that should be used for Kodi.
|
||||||
|
'';
|
||||||
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -21,11 +31,11 @@ in
|
||||||
services.xserver.desktopManager.session = [{
|
services.xserver.desktopManager.session = [{
|
||||||
name = "kodi";
|
name = "kodi";
|
||||||
start = ''
|
start = ''
|
||||||
LIRC_SOCKET_PATH=/run/lirc/lircd ${pkgs.kodi}/bin/kodi --standalone &
|
LIRC_SOCKET_PATH=/run/lirc/lircd ${cfg.package}/bin/kodi --standalone &
|
||||||
waitPID=$!
|
waitPID=$!
|
||||||
'';
|
'';
|
||||||
}];
|
}];
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.kodi ];
|
environment.systemPackages = [ cfg.package ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,7 @@ let
|
||||||
# Kernel module loading.
|
# Kernel module loading.
|
||||||
"systemd-modules-load.service"
|
"systemd-modules-load.service"
|
||||||
"kmod-static-nodes.service"
|
"kmod-static-nodes.service"
|
||||||
|
"modprobe@.service"
|
||||||
|
|
||||||
# Filesystems.
|
# Filesystems.
|
||||||
"systemd-fsck@.service"
|
"systemd-fsck@.service"
|
||||||
|
@ -1187,9 +1188,12 @@ in
|
||||||
systemd.services.systemd-remount-fs.unitConfig.ConditionVirtualization = "!container";
|
systemd.services.systemd-remount-fs.unitConfig.ConditionVirtualization = "!container";
|
||||||
systemd.services.systemd-random-seed.unitConfig.ConditionVirtualization = "!container";
|
systemd.services.systemd-random-seed.unitConfig.ConditionVirtualization = "!container";
|
||||||
|
|
||||||
boot.kernel.sysctl = mkIf (!cfg.coredump.enable) {
|
boot.kernel.sysctl."kernel.core_pattern" = mkIf (!cfg.coredump.enable) "core";
|
||||||
"kernel.core_pattern" = "core";
|
|
||||||
};
|
# Increase numeric PID range (set directly instead of copying a one-line file from systemd)
|
||||||
|
# https://github.com/systemd/systemd/pull/12226
|
||||||
|
boot.kernel.sysctl."kernel.pid_max" = mkIf pkgs.stdenv.is64bit (lib.mkDefault 4194304);
|
||||||
|
|
||||||
boot.kernelParams = optional (!cfg.enableUnifiedCgroupHierarchy) "systemd.unified_cgroup_hierarchy=0";
|
boot.kernelParams = optional (!cfg.enableUnifiedCgroupHierarchy) "systemd.unified_cgroup_hierarchy=0";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -302,7 +302,7 @@ in
|
||||||
};
|
};
|
||||||
|
|
||||||
services.zfs.autoScrub = {
|
services.zfs.autoScrub = {
|
||||||
enable = mkEnableOption "Enables periodic scrubbing of ZFS pools.";
|
enable = mkEnableOption "periodic scrubbing of ZFS pools";
|
||||||
|
|
||||||
interval = mkOption {
|
interval = mkOption {
|
||||||
default = "Sun, 02:00";
|
default = "Sun, 02:00";
|
||||||
|
|
|
@ -101,7 +101,7 @@ let
|
||||||
|
|
||||||
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
|
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
|
||||||
|
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
|
@ -185,7 +185,7 @@ let
|
||||||
# Restart rather than stop+start this unit to prevent the
|
# Restart rather than stop+start this unit to prevent the
|
||||||
# network from dying during switch-to-configuration.
|
# network from dying during switch-to-configuration.
|
||||||
stopIfChanged = false;
|
stopIfChanged = false;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
script =
|
script =
|
||||||
''
|
''
|
||||||
state="/run/nixos/network/addresses/${i.name}"
|
state="/run/nixos/network/addresses/${i.name}"
|
||||||
|
@ -258,7 +258,7 @@ let
|
||||||
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
|
wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
|
||||||
partOf = [ "network-setup.service" ];
|
partOf = [ "network-setup.service" ];
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Type = "oneshot";
|
Type = "oneshot";
|
||||||
RemainAfterExit = true;
|
RemainAfterExit = true;
|
||||||
|
@ -284,7 +284,7 @@ let
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
script = ''
|
script = ''
|
||||||
# Remove Dead Interfaces
|
# Remove Dead Interfaces
|
||||||
echo "Removing old bridge ${n}..."
|
echo "Removing old bridge ${n}..."
|
||||||
|
@ -372,7 +372,7 @@ let
|
||||||
wants = deps; # if one or more interface fails, the switch should continue to run
|
wants = deps; # if one or more interface fails, the switch should continue to run
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute config.virtualisation.vswitch.package ];
|
path = [ pkgs.iproute2 config.virtualisation.vswitch.package ];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
echo "Resetting Open vSwitch ${n}..."
|
echo "Resetting Open vSwitch ${n}..."
|
||||||
ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
|
ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
|
||||||
|
@ -413,7 +413,7 @@ let
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute pkgs.gawk ];
|
path = [ pkgs.iproute2 pkgs.gawk ];
|
||||||
script = ''
|
script = ''
|
||||||
echo "Destroying old bond ${n}..."
|
echo "Destroying old bond ${n}..."
|
||||||
${destroyBond n}
|
${destroyBond n}
|
||||||
|
@ -451,7 +451,7 @@ let
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
script = ''
|
script = ''
|
||||||
# Remove Dead Interfaces
|
# Remove Dead Interfaces
|
||||||
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
||||||
|
@ -476,7 +476,7 @@ let
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
script = ''
|
script = ''
|
||||||
# Remove Dead Interfaces
|
# Remove Dead Interfaces
|
||||||
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
||||||
|
@ -504,7 +504,7 @@ let
|
||||||
before = [ "network-setup.service" ];
|
before = [ "network-setup.service" ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
script = ''
|
script = ''
|
||||||
# Remove Dead Interfaces
|
# Remove Dead Interfaces
|
||||||
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
ip link show "${n}" >/dev/null 2>&1 && ip link delete "${n}"
|
||||||
|
|
|
@ -259,7 +259,7 @@ in
|
||||||
wants = deps; # if one or more interface fails, the switch should continue to run
|
wants = deps; # if one or more interface fails, the switch should continue to run
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
path = [ pkgs.iproute config.virtualisation.vswitch.package ];
|
path = [ pkgs.iproute2 config.virtualisation.vswitch.package ];
|
||||||
preStart = ''
|
preStart = ''
|
||||||
echo "Resetting Open vSwitch ${n}..."
|
echo "Resetting Open vSwitch ${n}..."
|
||||||
ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
|
ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
|
||||||
|
|
|
@ -1171,7 +1171,7 @@ in
|
||||||
wantedBy = [ "network.target" ];
|
wantedBy = [ "network.target" ];
|
||||||
after = [ "network-pre.target" ];
|
after = [ "network-pre.target" ];
|
||||||
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
|
unitConfig.ConditionCapability = "CAP_NET_ADMIN";
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
serviceConfig.Type = "oneshot";
|
serviceConfig.Type = "oneshot";
|
||||||
serviceConfig.RemainAfterExit = true;
|
serviceConfig.RemainAfterExit = true;
|
||||||
script = ''
|
script = ''
|
||||||
|
@ -1249,7 +1249,7 @@ in
|
||||||
${optionalString (current.type == "mesh" && current.meshID!=null) "${pkgs.iw}/bin/iw dev ${device} set meshid ${current.meshID}"}
|
${optionalString (current.type == "mesh" && current.meshID!=null) "${pkgs.iw}/bin/iw dev ${device} set meshid ${current.meshID}"}
|
||||||
${optionalString (current.type == "monitor" && current.flags!=null) "${pkgs.iw}/bin/iw dev ${device} set monitor ${current.flags}"}
|
${optionalString (current.type == "monitor" && current.flags!=null) "${pkgs.iw}/bin/iw dev ${device} set monitor ${current.flags}"}
|
||||||
${optionalString (current.type == "managed" && current.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${device} set 4addr ${if current.fourAddr then "on" else "off"}"}
|
${optionalString (current.type == "managed" && current.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${device} set 4addr ${if current.fourAddr then "on" else "off"}"}
|
||||||
${optionalString (current.mac != null) "${pkgs.iproute}/bin/ip link set dev ${device} address ${current.mac}"}
|
${optionalString (current.mac != null) "${pkgs.iproute2}/bin/ip link set dev ${device} address ${current.mac}"}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Udev script to execute for a new WLAN interface. The script configures the new WLAN interface.
|
# Udev script to execute for a new WLAN interface. The script configures the new WLAN interface.
|
||||||
|
@ -1260,7 +1260,7 @@ in
|
||||||
${optionalString (new.type == "mesh" && new.meshID!=null) "${pkgs.iw}/bin/iw dev ${device} set meshid ${new.meshID}"}
|
${optionalString (new.type == "mesh" && new.meshID!=null) "${pkgs.iw}/bin/iw dev ${device} set meshid ${new.meshID}"}
|
||||||
${optionalString (new.type == "monitor" && new.flags!=null) "${pkgs.iw}/bin/iw dev ${device} set monitor ${new.flags}"}
|
${optionalString (new.type == "monitor" && new.flags!=null) "${pkgs.iw}/bin/iw dev ${device} set monitor ${new.flags}"}
|
||||||
${optionalString (new.type == "managed" && new.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${device} set 4addr ${if new.fourAddr then "on" else "off"}"}
|
${optionalString (new.type == "managed" && new.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${device} set 4addr ${if new.fourAddr then "on" else "off"}"}
|
||||||
${optionalString (new.mac != null) "${pkgs.iproute}/bin/ip link set dev ${device} address ${new.mac}"}
|
${optionalString (new.mac != null) "${pkgs.iproute2}/bin/ip link set dev ${device} address ${new.mac}"}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Udev attributes for systemd to name the device and to create a .device target.
|
# Udev attributes for systemd to name the device and to create a .device target.
|
||||||
|
|
|
@ -119,7 +119,7 @@ in
|
||||||
wants = [ "network-online.target" ];
|
wants = [ "network-online.target" ];
|
||||||
after = [ "network-online.target" ];
|
after = [ "network-online.target" ];
|
||||||
|
|
||||||
path = [ pkgs.wget pkgs.iproute ];
|
path = [ pkgs.wget pkgs.iproute2 ];
|
||||||
|
|
||||||
script =
|
script =
|
||||||
''
|
''
|
||||||
|
|
|
@ -19,7 +19,7 @@ with lib;
|
||||||
wantedBy = [ "multi-user.target" "sshd.service" ];
|
wantedBy = [ "multi-user.target" "sshd.service" ];
|
||||||
before = [ "sshd.service" ];
|
before = [ "sshd.service" ];
|
||||||
|
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
|
|
||||||
script =
|
script =
|
||||||
''
|
''
|
||||||
|
|
|
@ -110,7 +110,7 @@ in
|
||||||
systemd.services.google-network-daemon = {
|
systemd.services.google-network-daemon = {
|
||||||
description = "Google Compute Engine Network Daemon";
|
description = "Google Compute Engine Network Daemon";
|
||||||
after = [ "network-online.target" "network.target" "google-instance-setup.service" ];
|
after = [ "network-online.target" "network.target" "google-instance-setup.service" ];
|
||||||
path = with pkgs; [ iproute ];
|
path = with pkgs; [ iproute2 ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
ExecStart = "${gce}/bin/google_network_daemon";
|
ExecStart = "${gce}/bin/google_network_daemon";
|
||||||
StandardOutput="journal+console";
|
StandardOutput="journal+console";
|
||||||
|
|
|
@ -739,7 +739,7 @@ in
|
||||||
|
|
||||||
unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
|
unitConfig.RequiresMountsFor = "/var/lib/containers/%i";
|
||||||
|
|
||||||
path = [ pkgs.iproute ];
|
path = [ pkgs.iproute2 ];
|
||||||
|
|
||||||
environment = {
|
environment = {
|
||||||
root = "/var/lib/containers/%i";
|
root = "/var/lib/containers/%i";
|
||||||
|
|
|
@ -17,7 +17,7 @@ in {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
after = [ "xe-linux-distribution.service" ];
|
after = [ "xe-linux-distribution.service" ];
|
||||||
requires = [ "proc-xen.mount" ];
|
requires = [ "proc-xen.mount" ];
|
||||||
path = [ pkgs.coreutils pkgs.iproute ];
|
path = [ pkgs.coreutils pkgs.iproute2 ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
PIDFile = "/run/xe-daemon.pid";
|
PIDFile = "/run/xe-daemon.pid";
|
||||||
ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
|
ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
|
||||||
|
|
|
@ -248,7 +248,7 @@ in
|
||||||
# Xen provides udev rules.
|
# Xen provides udev rules.
|
||||||
services.udev.packages = [ cfg.package ];
|
services.udev.packages = [ cfg.package ];
|
||||||
|
|
||||||
services.udev.path = [ pkgs.bridge-utils pkgs.iproute ];
|
services.udev.path = [ pkgs.bridge-utils pkgs.iproute2 ];
|
||||||
|
|
||||||
systemd.services.xen-store = {
|
systemd.services.xen-store = {
|
||||||
description = "Xen Store Daemon";
|
description = "Xen Store Daemon";
|
||||||
|
|
3
third_party/nixpkgs/nixos/tests/agda.nix
vendored
3
third_party/nixpkgs/nixos/tests/agda.nix
vendored
|
@ -3,8 +3,9 @@ import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
let
|
let
|
||||||
hello-world = pkgs.writeText "hello-world" ''
|
hello-world = pkgs.writeText "hello-world" ''
|
||||||
open import IO
|
open import IO
|
||||||
|
open import Level
|
||||||
|
|
||||||
main = run(putStrLn "Hello World!")
|
main = run {0ℓ} (putStrLn "Hello World!")
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
|
|
@ -49,6 +49,7 @@ in
|
||||||
cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
|
cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
|
||||||
cage = handleTest ./cage.nix {};
|
cage = handleTest ./cage.nix {};
|
||||||
cagebreak = handleTest ./cagebreak.nix {};
|
cagebreak = handleTest ./cagebreak.nix {};
|
||||||
|
calibre-web = handleTest ./calibre-web.nix {};
|
||||||
cassandra_2_1 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_1; };
|
cassandra_2_1 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_1; };
|
||||||
cassandra_2_2 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_2; };
|
cassandra_2_2 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_2_2; };
|
||||||
cassandra_3_0 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_0; };
|
cassandra_3_0 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_0; };
|
||||||
|
@ -110,6 +111,7 @@ in
|
||||||
ergo = handleTest ./ergo.nix {};
|
ergo = handleTest ./ergo.nix {};
|
||||||
etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
|
etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
|
||||||
etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
|
etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
|
||||||
|
etebase-server = handleTest ./etebase-server.nix {};
|
||||||
etesync-dav = handleTest ./etesync-dav.nix {};
|
etesync-dav = handleTest ./etesync-dav.nix {};
|
||||||
fancontrol = handleTest ./fancontrol.nix {};
|
fancontrol = handleTest ./fancontrol.nix {};
|
||||||
fcitx = handleTest ./fcitx {};
|
fcitx = handleTest ./fcitx {};
|
||||||
|
@ -225,6 +227,7 @@ in
|
||||||
mariadb-galera-mariabackup = handleTest ./mysql/mariadb-galera-mariabackup.nix {};
|
mariadb-galera-mariabackup = handleTest ./mysql/mariadb-galera-mariabackup.nix {};
|
||||||
mariadb-galera-rsync = handleTest ./mysql/mariadb-galera-rsync.nix {};
|
mariadb-galera-rsync = handleTest ./mysql/mariadb-galera-rsync.nix {};
|
||||||
matomo = handleTest ./matomo.nix {};
|
matomo = handleTest ./matomo.nix {};
|
||||||
|
matrix-appservice-irc = handleTest ./matrix-appservice-irc.nix {};
|
||||||
matrix-synapse = handleTest ./matrix-synapse.nix {};
|
matrix-synapse = handleTest ./matrix-synapse.nix {};
|
||||||
mediawiki = handleTest ./mediawiki.nix {};
|
mediawiki = handleTest ./mediawiki.nix {};
|
||||||
memcached = handleTest ./memcached.nix {};
|
memcached = handleTest ./memcached.nix {};
|
||||||
|
@ -287,6 +290,7 @@ in
|
||||||
nzbget = handleTest ./nzbget.nix {};
|
nzbget = handleTest ./nzbget.nix {};
|
||||||
nzbhydra2 = handleTest ./nzbhydra2.nix {};
|
nzbhydra2 = handleTest ./nzbhydra2.nix {};
|
||||||
oh-my-zsh = handleTest ./oh-my-zsh.nix {};
|
oh-my-zsh = handleTest ./oh-my-zsh.nix {};
|
||||||
|
ombi = handleTest ./ombi.nix {};
|
||||||
openarena = handleTest ./openarena.nix {};
|
openarena = handleTest ./openarena.nix {};
|
||||||
openldap = handleTest ./openldap.nix {};
|
openldap = handleTest ./openldap.nix {};
|
||||||
opensmtpd = handleTest ./opensmtpd.nix {};
|
opensmtpd = handleTest ./opensmtpd.nix {};
|
||||||
|
@ -318,6 +322,7 @@ in
|
||||||
plikd = handleTest ./plikd.nix {};
|
plikd = handleTest ./plikd.nix {};
|
||||||
plotinus = handleTest ./plotinus.nix {};
|
plotinus = handleTest ./plotinus.nix {};
|
||||||
podman = handleTestOn ["x86_64-linux"] ./podman.nix {};
|
podman = handleTestOn ["x86_64-linux"] ./podman.nix {};
|
||||||
|
pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {};
|
||||||
postfix = handleTest ./postfix.nix {};
|
postfix = handleTest ./postfix.nix {};
|
||||||
postfix-raise-smtpd-tls-security-level = handleTest ./postfix-raise-smtpd-tls-security-level.nix {};
|
postfix-raise-smtpd-tls-security-level = handleTest ./postfix-raise-smtpd-tls-security-level.nix {};
|
||||||
postgis = handleTest ./postgis.nix {};
|
postgis = handleTest ./postgis.nix {};
|
||||||
|
|
53
third_party/nixpkgs/nixos/tests/calibre-web.nix
vendored
Normal file
53
third_party/nixpkgs/nixos/tests/calibre-web.nix
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
import ./make-test-python.nix (
|
||||||
|
{ pkgs, lib, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
port = 3142;
|
||||||
|
defaultPort = 8083;
|
||||||
|
in
|
||||||
|
with lib;
|
||||||
|
{
|
||||||
|
name = "calibre-web";
|
||||||
|
meta.maintainers = with pkgs.lib.maintainers; [ pborzenkov ];
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
default = { ... }: {
|
||||||
|
services.calibre-web.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
customized = { pkgs, ... }: {
|
||||||
|
services.calibre-web = {
|
||||||
|
enable = true;
|
||||||
|
listen.port = port;
|
||||||
|
options = {
|
||||||
|
calibreLibrary = "/tmp/books";
|
||||||
|
reverseProxyAuth = {
|
||||||
|
enable = true;
|
||||||
|
header = "X-User";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
environment.systemPackages = [ pkgs.calibre ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
default.wait_for_unit("calibre-web.service")
|
||||||
|
default.wait_for_open_port(${toString defaultPort})
|
||||||
|
default.succeed(
|
||||||
|
"curl --fail 'http://localhost:${toString defaultPort}/basicconfig' | grep -q 'Basic Configuration'"
|
||||||
|
)
|
||||||
|
|
||||||
|
customized.succeed(
|
||||||
|
"mkdir /tmp/books && calibredb --library-path /tmp/books add -e --title test-book"
|
||||||
|
)
|
||||||
|
customized.succeed("systemctl restart calibre-web")
|
||||||
|
customized.wait_for_unit("calibre-web.service")
|
||||||
|
customized.wait_for_open_port(${toString port})
|
||||||
|
customized.succeed(
|
||||||
|
"curl --fail -H X-User:admin 'http://localhost:${toString port}' | grep -q test-book"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
2
third_party/nixpkgs/nixos/tests/croc.nix
vendored
2
third_party/nixpkgs/nixos/tests/croc.nix
vendored
|
@ -6,7 +6,7 @@ let
|
||||||
pass = pkgs.writeText "pass" "PassRelay";
|
pass = pkgs.writeText "pass" "PassRelay";
|
||||||
in {
|
in {
|
||||||
name = "croc";
|
name = "croc";
|
||||||
meta = with pkgs.stdenv.lib.maintainers; {
|
meta = with pkgs.lib.maintainers; {
|
||||||
maintainers = [ hax404 julm ];
|
maintainers = [ hax404 julm ];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
1
third_party/nixpkgs/nixos/tests/elk.nix
vendored
1
third_party/nixpkgs/nixos/tests/elk.nix
vendored
|
@ -120,6 +120,7 @@ let
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
passthru.elkPackages = elk;
|
||||||
testScript = ''
|
testScript = ''
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
50
third_party/nixpkgs/nixos/tests/etebase-server.nix
vendored
Normal file
50
third_party/nixpkgs/nixos/tests/etebase-server.nix
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
dataDir = "/var/lib/foobar";
|
||||||
|
|
||||||
|
in {
|
||||||
|
name = "etebase-server";
|
||||||
|
meta = with pkgs.lib.maintainers; {
|
||||||
|
maintainers = [ felschr ];
|
||||||
|
};
|
||||||
|
|
||||||
|
machine = { pkgs, ... }:
|
||||||
|
{
|
||||||
|
services.etebase-server = {
|
||||||
|
inherit dataDir;
|
||||||
|
enable = true;
|
||||||
|
settings.global.secret_file =
|
||||||
|
toString (pkgs.writeText "secret" "123456");
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("etebase-server.service")
|
||||||
|
machine.wait_for_open_port(8001)
|
||||||
|
|
||||||
|
with subtest("Database & src-version were created"):
|
||||||
|
machine.wait_for_file("${dataDir}/src-version")
|
||||||
|
assert (
|
||||||
|
"${pkgs.etebase-server}"
|
||||||
|
in machine.succeed("cat ${dataDir}/src-version")
|
||||||
|
)
|
||||||
|
machine.wait_for_file("${dataDir}/db.sqlite3")
|
||||||
|
machine.wait_for_file("${dataDir}/static")
|
||||||
|
|
||||||
|
with subtest("Only allow access from allowed_hosts"):
|
||||||
|
machine.succeed("curl -sSfL http://0.0.0.0:8001/")
|
||||||
|
machine.fail("curl -sSfL http://127.0.0.1:8001/")
|
||||||
|
machine.fail("curl -sSfL http://localhost:8001/")
|
||||||
|
|
||||||
|
with subtest("Run tests"):
|
||||||
|
machine.succeed("etebase-server check")
|
||||||
|
machine.succeed("etebase-server test")
|
||||||
|
|
||||||
|
with subtest("Create superuser"):
|
||||||
|
machine.succeed(
|
||||||
|
"etebase-server createsuperuser --no-input --username admin --email root@localhost"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
)
|
41
third_party/nixpkgs/nixos/tests/gitlab.nix
vendored
41
third_party/nixpkgs/nixos/tests/gitlab.nix
vendored
|
@ -34,6 +34,8 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
enableImap = true;
|
enableImap = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
systemd.services.gitlab-backup.environment.BACKUP = "dump";
|
||||||
|
|
||||||
services.gitlab = {
|
services.gitlab = {
|
||||||
enable = true;
|
enable = true;
|
||||||
databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
|
databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
|
||||||
|
@ -64,7 +66,7 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript =
|
testScript = { nodes, ... }:
|
||||||
let
|
let
|
||||||
auth = pkgs.writeText "auth.json" (builtins.toJSON {
|
auth = pkgs.writeText "auth.json" (builtins.toJSON {
|
||||||
grant_type = "password";
|
grant_type = "password";
|
||||||
|
@ -83,19 +85,22 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
content = "some content";
|
content = "some content";
|
||||||
commit_message = "create a new file";
|
commit_message = "create a new file";
|
||||||
});
|
});
|
||||||
in
|
|
||||||
''
|
|
||||||
gitlab.start()
|
|
||||||
|
|
||||||
|
# Wait for all GitLab services to be fully started.
|
||||||
|
waitForServices = ''
|
||||||
gitlab.wait_for_unit("gitaly.service")
|
gitlab.wait_for_unit("gitaly.service")
|
||||||
gitlab.wait_for_unit("gitlab-workhorse.service")
|
gitlab.wait_for_unit("gitlab-workhorse.service")
|
||||||
gitlab.wait_for_unit("gitlab-pages.service")
|
gitlab.wait_for_unit("gitlab-pages.service")
|
||||||
gitlab.wait_for_unit("gitlab-mailroom.service")
|
gitlab.wait_for_unit("gitlab-mailroom.service")
|
||||||
gitlab.wait_for_unit("gitlab.service")
|
gitlab.wait_for_unit("gitlab.service")
|
||||||
gitlab.wait_for_unit("gitlab-sidekiq.service")
|
gitlab.wait_for_unit("gitlab-sidekiq.service")
|
||||||
gitlab.wait_for_file("/var/gitlab/state/tmp/sockets/gitlab.socket")
|
gitlab.wait_for_file("${nodes.gitlab.config.services.gitlab.statePath}/tmp/sockets/gitlab.socket")
|
||||||
gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
|
gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
|
||||||
|
'';
|
||||||
|
|
||||||
|
# The actual test of GitLab. Only push data to GitLab if
|
||||||
|
# `doSetup` is is true.
|
||||||
|
test = doSetup: ''
|
||||||
gitlab.succeed(
|
gitlab.succeed(
|
||||||
"curl -isSf http://gitlab | grep -i location | grep -q http://gitlab/users/sign_in"
|
"curl -isSf http://gitlab | grep -i location | grep -q http://gitlab/users/sign_in"
|
||||||
)
|
)
|
||||||
|
@ -105,12 +110,14 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
gitlab.succeed(
|
gitlab.succeed(
|
||||||
"echo \"Authorization: Bearer \$(curl -X POST -H 'Content-Type: application/json' -d @${auth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers"
|
"echo \"Authorization: Bearer \$(curl -X POST -H 'Content-Type: application/json' -d @${auth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers"
|
||||||
)
|
)
|
||||||
|
'' + optionalString doSetup ''
|
||||||
gitlab.succeed(
|
gitlab.succeed(
|
||||||
"curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${createProject} http://gitlab/api/v4/projects"
|
"curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${createProject} http://gitlab/api/v4/projects"
|
||||||
)
|
)
|
||||||
gitlab.succeed(
|
gitlab.succeed(
|
||||||
"curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${putFile} http://gitlab/api/v4/projects/1/repository/files/some-file.txt"
|
"curl -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${putFile} http://gitlab/api/v4/projects/1/repository/files/some-file.txt"
|
||||||
)
|
)
|
||||||
|
'' + ''
|
||||||
gitlab.succeed(
|
gitlab.succeed(
|
||||||
"curl -H @/tmp/headers http://gitlab/api/v4/projects/1/repository/archive.tar.gz > /tmp/archive.tar.gz"
|
"curl -H @/tmp/headers http://gitlab/api/v4/projects/1/repository/archive.tar.gz > /tmp/archive.tar.gz"
|
||||||
)
|
)
|
||||||
|
@ -120,4 +127,28 @@ import ./make-test-python.nix ({ pkgs, lib, ...} : with lib; {
|
||||||
gitlab.succeed("test -s /tmp/archive.tar.gz")
|
gitlab.succeed("test -s /tmp/archive.tar.gz")
|
||||||
gitlab.succeed("test -s /tmp/archive.tar.bz2")
|
gitlab.succeed("test -s /tmp/archive.tar.bz2")
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
in ''
|
||||||
|
gitlab.start()
|
||||||
|
''
|
||||||
|
+ waitForServices
|
||||||
|
+ test true
|
||||||
|
+ ''
|
||||||
|
gitlab.systemctl("start gitlab-backup.service")
|
||||||
|
gitlab.wait_for_unit("gitlab-backup.service")
|
||||||
|
gitlab.wait_for_file("${nodes.gitlab.config.services.gitlab.statePath}/backup/dump_gitlab_backup.tar")
|
||||||
|
gitlab.systemctl("stop postgresql.service gitlab.target")
|
||||||
|
gitlab.succeed(
|
||||||
|
"find ${nodes.gitlab.config.services.gitlab.statePath} -mindepth 1 -maxdepth 1 -not -name backup -execdir rm -r {} +"
|
||||||
|
)
|
||||||
|
gitlab.succeed("systemd-tmpfiles --create")
|
||||||
|
gitlab.succeed("rm -rf ${nodes.gitlab.config.services.postgresql.dataDir}")
|
||||||
|
gitlab.systemctl("start gitlab-config.service gitlab-postgresql.service")
|
||||||
|
gitlab.succeed(
|
||||||
|
"sudo -u gitlab -H gitlab-rake gitlab:backup:restore RAILS_ENV=production BACKUP=dump force=yes"
|
||||||
|
)
|
||||||
|
gitlab.systemctl("start gitlab.target")
|
||||||
|
''
|
||||||
|
+ waitForServices
|
||||||
|
+ test false;
|
||||||
})
|
})
|
||||||
|
|
162
third_party/nixpkgs/nixos/tests/matrix-appservice-irc.nix
vendored
Normal file
162
third_party/nixpkgs/nixos/tests/matrix-appservice-irc.nix
vendored
Normal file
|
@ -0,0 +1,162 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, ... }:
|
||||||
|
let
|
||||||
|
homeserverUrl = "http://homeserver:8448";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "matrix-appservice-irc";
|
||||||
|
meta = {
|
||||||
|
maintainers = pkgs.matrix-appservice-irc.meta.maintainers;
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
homeserver = { pkgs, ... }: {
|
||||||
|
# We'll switch to this once the config is copied into place
|
||||||
|
specialisation.running.configuration = {
|
||||||
|
services.matrix-synapse = {
|
||||||
|
enable = true;
|
||||||
|
database_type = "sqlite3";
|
||||||
|
app_service_config_files = [ "/registration.yml" ];
|
||||||
|
|
||||||
|
enable_registration = true;
|
||||||
|
|
||||||
|
listeners = [
|
||||||
|
# The default but tls=false
|
||||||
|
{
|
||||||
|
"bind_address" = "";
|
||||||
|
"port" = 8448;
|
||||||
|
"resources" = [
|
||||||
|
{ "compress" = true; "names" = [ "client" "webclient" ]; }
|
||||||
|
{ "compress" = false; "names" = [ "federation" ]; }
|
||||||
|
];
|
||||||
|
"tls" = false;
|
||||||
|
"type" = "http";
|
||||||
|
"x_forwarded" = false;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ 8448 ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
ircd = { pkgs, ... }: {
|
||||||
|
services.ngircd = {
|
||||||
|
enable = true;
|
||||||
|
config = ''
|
||||||
|
[Global]
|
||||||
|
Name = ircd.ircd
|
||||||
|
Info = Server Info Text
|
||||||
|
AdminInfo1 = _
|
||||||
|
|
||||||
|
[Channel]
|
||||||
|
Name = #test
|
||||||
|
Topic = a cool place
|
||||||
|
|
||||||
|
[Options]
|
||||||
|
PAM = no
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
networking.firewall.allowedTCPPorts = [ 6667 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
appservice = { pkgs, ... }: {
|
||||||
|
services.matrix-appservice-irc = {
|
||||||
|
enable = true;
|
||||||
|
registrationUrl = "http://appservice:8009";
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
homeserver.url = homeserverUrl;
|
||||||
|
homeserver.domain = "homeserver";
|
||||||
|
|
||||||
|
ircService.servers."ircd" = {
|
||||||
|
name = "IRCd";
|
||||||
|
port = 6667;
|
||||||
|
dynamicChannels = {
|
||||||
|
enabled = true;
|
||||||
|
aliasTemplate = "#irc_$CHANNEL";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ 8009 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
client = { pkgs, ... }: {
|
||||||
|
environment.systemPackages = [
|
||||||
|
(pkgs.writers.writePython3Bin "do_test"
|
||||||
|
{ libraries = [ pkgs.python3Packages.matrix-client ]; } ''
|
||||||
|
import socket
|
||||||
|
from matrix_client.client import MatrixClient
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
matrix = MatrixClient("${homeserverUrl}")
|
||||||
|
matrix.register_with_password(username="alice", password="foobar")
|
||||||
|
|
||||||
|
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
irc.connect(("ircd", 6667))
|
||||||
|
irc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||||
|
irc.send(b"USER bob bob bob :bob\n")
|
||||||
|
irc.send(b"NICK bob\n")
|
||||||
|
|
||||||
|
m_room = matrix.join_room("#irc_#test:homeserver")
|
||||||
|
irc.send(b"JOIN #test\n")
|
||||||
|
|
||||||
|
# plenty of time for the joins to happen
|
||||||
|
sleep(10)
|
||||||
|
|
||||||
|
m_room.send_text("hi from matrix")
|
||||||
|
irc.send(b"PRIVMSG #test :hi from irc \r\n")
|
||||||
|
|
||||||
|
print("Waiting for irc message...")
|
||||||
|
while True:
|
||||||
|
buf = irc.recv(10000)
|
||||||
|
if b"hi from matrix" in buf:
|
||||||
|
break
|
||||||
|
|
||||||
|
print("Waiting for matrix message...")
|
||||||
|
|
||||||
|
|
||||||
|
def callback(room, e):
|
||||||
|
if "hi from irc" in e['content']['body']:
|
||||||
|
exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
m_room.add_listener(callback, "m.room.message")
|
||||||
|
matrix.listen_forever()
|
||||||
|
''
|
||||||
|
)
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
ircd.wait_for_unit("ngircd.service")
|
||||||
|
ircd.wait_for_open_port(6667)
|
||||||
|
|
||||||
|
with subtest("start the appservice"):
|
||||||
|
appservice.wait_for_unit("matrix-appservice-irc.service")
|
||||||
|
appservice.wait_for_open_port(8009)
|
||||||
|
|
||||||
|
with subtest("copy the registration file"):
|
||||||
|
appservice.copy_from_vm("/var/lib/matrix-appservice-irc/registration.yml")
|
||||||
|
homeserver.copy_from_host(
|
||||||
|
pathlib.Path(os.environ.get("out", os.getcwd())) / "registration.yml", "/"
|
||||||
|
)
|
||||||
|
homeserver.succeed("chmod 444 /registration.yml")
|
||||||
|
|
||||||
|
with subtest("start the homeserver"):
|
||||||
|
homeserver.succeed(
|
||||||
|
"/run/current-system/specialisation/running/bin/switch-to-configuration test >&2"
|
||||||
|
)
|
||||||
|
|
||||||
|
homeserver.wait_for_unit("matrix-synapse.service")
|
||||||
|
homeserver.wait_for_open_port(8448)
|
||||||
|
|
||||||
|
with subtest("ensure messages can be exchanged"):
|
||||||
|
client.succeed("do_test")
|
||||||
|
'';
|
||||||
|
|
||||||
|
})
|
|
@ -2,7 +2,7 @@ import ./../make-test-python.nix ({ pkgs, ...} :
|
||||||
|
|
||||||
let
|
let
|
||||||
mysqlenv-common = pkgs.buildEnv { name = "mysql-path-env-common"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
|
mysqlenv-common = pkgs.buildEnv { name = "mysql-path-env-common"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ bash gawk gnutar inetutils which ]; };
|
||||||
mysqlenv-mariabackup = pkgs.buildEnv { name = "mysql-path-env-mariabackup"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ gzip iproute netcat procps pv socat ]; };
|
mysqlenv-mariabackup = pkgs.buildEnv { name = "mysql-path-env-mariabackup"; pathsToLink = [ "/bin" ]; paths = with pkgs; [ gzip iproute2 netcat procps pv socat ]; };
|
||||||
|
|
||||||
in {
|
in {
|
||||||
name = "mariadb-galera-mariabackup";
|
name = "mariadb-galera-mariabackup";
|
||||||
|
|
18
third_party/nixpkgs/nixos/tests/ombi.nix
vendored
Normal file
18
third_party/nixpkgs/nixos/tests/ombi.nix
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
import ./make-test-python.nix ({ lib, ... }:
|
||||||
|
|
||||||
|
with lib;
|
||||||
|
|
||||||
|
{
|
||||||
|
name = "ombi";
|
||||||
|
meta.maintainers = with maintainers; [ woky ];
|
||||||
|
|
||||||
|
nodes.machine =
|
||||||
|
{ pkgs, ... }:
|
||||||
|
{ services.ombi.enable = true; };
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("ombi.service")
|
||||||
|
machine.wait_for_open_port("5000")
|
||||||
|
machine.succeed("curl --fail http://localhost:5000/")
|
||||||
|
'';
|
||||||
|
})
|
102
third_party/nixpkgs/nixos/tests/pomerium.nix
vendored
Normal file
102
third_party/nixpkgs/nixos/tests/pomerium.nix
vendored
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
||||||
|
name = "pomerium";
|
||||||
|
meta = with lib.maintainers; {
|
||||||
|
maintainers = [ lukegb ];
|
||||||
|
};
|
||||||
|
|
||||||
|
nodes = let base = myIP: { pkgs, lib, ... }: {
|
||||||
|
virtualisation.vlans = [ 1 ];
|
||||||
|
networking = {
|
||||||
|
dhcpcd.enable = false;
|
||||||
|
firewall.allowedTCPPorts = [ 80 443 ];
|
||||||
|
hosts = {
|
||||||
|
"192.168.1.1" = [ "pomerium" "pom-auth" ];
|
||||||
|
"192.168.1.2" = [ "backend" "dummy-oidc" ];
|
||||||
|
};
|
||||||
|
interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||||
|
{ address = myIP; prefixLength = 24; }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}; in {
|
||||||
|
pomerium = { pkgs, lib, ... }: {
|
||||||
|
imports = [ (base "192.168.1.1") ];
|
||||||
|
services.pomerium = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
address = ":80";
|
||||||
|
insecure_server = true;
|
||||||
|
authenticate_service_url = "http://pom-auth";
|
||||||
|
|
||||||
|
idp_provider = "oidc";
|
||||||
|
idp_scopes = [ "oidc" ];
|
||||||
|
idp_client_id = "dummy";
|
||||||
|
idp_provider_url = "http://dummy-oidc";
|
||||||
|
|
||||||
|
policy = [{
|
||||||
|
from = "https://my.website";
|
||||||
|
to = "http://192.168.1.2";
|
||||||
|
allow_public_unauthenticated_access = true;
|
||||||
|
preserve_host_header = true;
|
||||||
|
} {
|
||||||
|
from = "https://login.required";
|
||||||
|
to = "http://192.168.1.2";
|
||||||
|
allowed_domains = [ "my.domain" ];
|
||||||
|
preserve_host_header = true;
|
||||||
|
}];
|
||||||
|
};
|
||||||
|
secretsFile = pkgs.writeText "pomerium-secrets" ''
|
||||||
|
# 12345678901234567890123456789012 in base64
|
||||||
|
COOKIE_SECRET=MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=
|
||||||
|
IDP_CLIENT_SECRET=dummy
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
backend = { pkgs, lib, ... }: {
|
||||||
|
imports = [ (base "192.168.1.2") ];
|
||||||
|
services.nginx.enable = true;
|
||||||
|
services.nginx.virtualHosts."my.website" = {
|
||||||
|
root = pkgs.runCommand "testdir" {} ''
|
||||||
|
mkdir "$out"
|
||||||
|
echo hello world > "$out/index.html"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
services.nginx.virtualHosts."dummy-oidc" = {
|
||||||
|
root = pkgs.runCommand "testdir" {} ''
|
||||||
|
mkdir -p "$out/.well-known"
|
||||||
|
cat <<EOF >"$out/.well-known/openid-configuration"
|
||||||
|
{
|
||||||
|
"issuer": "http://dummy-oidc",
|
||||||
|
"authorization_endpoint": "http://dummy-oidc/auth.txt",
|
||||||
|
"token_endpoint": "http://dummy-oidc/token",
|
||||||
|
"jwks_uri": "http://dummy-oidc/jwks.json",
|
||||||
|
"userinfo_endpoint": "http://dummy-oidc/userinfo",
|
||||||
|
"id_token_signing_alg_values_supported": ["RS256"]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
echo hello I am login page >"$out/auth.txt"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = { ... }: ''
|
||||||
|
backend.wait_for_unit("nginx")
|
||||||
|
backend.wait_for_open_port(80)
|
||||||
|
|
||||||
|
pomerium.wait_for_unit("pomerium")
|
||||||
|
pomerium.wait_for_open_port(80)
|
||||||
|
|
||||||
|
with subtest("no authentication required"):
|
||||||
|
pomerium.succeed(
|
||||||
|
"curl --resolve my.website:80:127.0.0.1 http://my.website | grep -q 'hello world'"
|
||||||
|
)
|
||||||
|
|
||||||
|
with subtest("login required"):
|
||||||
|
pomerium.succeed(
|
||||||
|
"curl -I --resolve login.required:80:127.0.0.1 http://login.required | grep -q pom-auth"
|
||||||
|
)
|
||||||
|
pomerium.succeed(
|
||||||
|
"curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep -q 'hello I am login page'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
})
|
12
third_party/nixpkgs/nixos/tests/privacyidea.nix
vendored
12
third_party/nixpkgs/nixos/tests/privacyidea.nix
vendored
|
@ -12,10 +12,16 @@ import ./make-test-python.nix ({ pkgs, ...} : rec {
|
||||||
|
|
||||||
services.privacyidea = {
|
services.privacyidea = {
|
||||||
enable = true;
|
enable = true;
|
||||||
secretKey = "testing";
|
secretKey = "$SECRET_KEY";
|
||||||
pepper = "testing";
|
pepper = "$PEPPER";
|
||||||
adminPasswordFile = pkgs.writeText "admin-password" "testing";
|
adminPasswordFile = pkgs.writeText "admin-password" "testing";
|
||||||
adminEmail = "root@localhost";
|
adminEmail = "root@localhost";
|
||||||
|
|
||||||
|
# Don't try this at home!
|
||||||
|
environmentFile = pkgs.writeText "pi-secrets.env" ''
|
||||||
|
SECRET_KEY=testing
|
||||||
|
PEPPER=testing
|
||||||
|
'';
|
||||||
};
|
};
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -29,6 +35,8 @@ import ./make-test-python.nix ({ pkgs, ...} : rec {
|
||||||
machine.start()
|
machine.start()
|
||||||
machine.wait_for_unit("multi-user.target")
|
machine.wait_for_unit("multi-user.target")
|
||||||
machine.succeed("curl --fail http://localhost | grep privacyIDEA")
|
machine.succeed("curl --fail http://localhost | grep privacyIDEA")
|
||||||
|
machine.succeed("grep \"SECRET_KEY = 'testing'\" /var/lib/privacyidea/privacyidea.cfg")
|
||||||
|
machine.succeed("grep \"PI_PEPPER = 'testing'\" /var/lib/privacyidea/privacyidea.cfg")
|
||||||
machine.succeed(
|
machine.succeed(
|
||||||
"curl --fail http://localhost/auth -F username=admin -F password=testing | grep token"
|
"curl --fail http://localhost/auth -F username=admin -F password=testing | grep token"
|
||||||
)
|
)
|
||||||
|
|
|
@ -201,6 +201,22 @@ let
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Access to WHOIS server is required to properly test this exporter, so
|
||||||
|
# just perform basic sanity check that the exporter is running and returns
|
||||||
|
# a failure.
|
||||||
|
domain = {
|
||||||
|
exporterConfig = {
|
||||||
|
enable = true;
|
||||||
|
};
|
||||||
|
exporterTest = ''
|
||||||
|
wait_for_unit("prometheus-domain-exporter.service")
|
||||||
|
wait_for_open_port(9222)
|
||||||
|
succeed(
|
||||||
|
"curl -sSf 'http://localhost:9222/probe?target=nixos.org' | grep -q 'domain_probe_success 0'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
dovecot = {
|
dovecot = {
|
||||||
exporterConfig = {
|
exporterConfig = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -603,6 +619,66 @@ let
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
openldap = {
|
||||||
|
exporterConfig = {
|
||||||
|
enable = true;
|
||||||
|
ldapCredentialFile = "${pkgs.writeText "exporter.yml" ''
|
||||||
|
ldapUser: "cn=root,dc=example"
|
||||||
|
ldapPass: "notapassword"
|
||||||
|
''}";
|
||||||
|
};
|
||||||
|
metricProvider = {
|
||||||
|
services.openldap = {
|
||||||
|
enable = true;
|
||||||
|
settings.children = {
|
||||||
|
"cn=schema".includes = [
|
||||||
|
"${pkgs.openldap}/etc/schema/core.ldif"
|
||||||
|
"${pkgs.openldap}/etc/schema/cosine.ldif"
|
||||||
|
"${pkgs.openldap}/etc/schema/inetorgperson.ldif"
|
||||||
|
"${pkgs.openldap}/etc/schema/nis.ldif"
|
||||||
|
];
|
||||||
|
"olcDatabase={1}mdb" = {
|
||||||
|
attrs = {
|
||||||
|
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
|
||||||
|
olcDatabase = "{1}mdb";
|
||||||
|
olcDbDirectory = "/var/db/openldap";
|
||||||
|
olcSuffix = "dc=example";
|
||||||
|
olcRootDN = {
|
||||||
|
# cn=root,dc=example
|
||||||
|
base64 = "Y249cm9vdCxkYz1leGFtcGxl";
|
||||||
|
};
|
||||||
|
olcRootPW = {
|
||||||
|
path = "${pkgs.writeText "rootpw" "notapassword"}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
"olcDatabase={2}monitor".attrs = {
|
||||||
|
objectClass = [ "olcDatabaseConfig" ];
|
||||||
|
olcDatabase = "{2}monitor";
|
||||||
|
olcAccess = [ "to dn.subtree=cn=monitor by users read" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
declarativeContents."dc=example" = ''
|
||||||
|
dn: dc=example
|
||||||
|
objectClass: domain
|
||||||
|
dc: example
|
||||||
|
|
||||||
|
dn: ou=users,dc=example
|
||||||
|
objectClass: organizationalUnit
|
||||||
|
ou: users
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
exporterTest = ''
|
||||||
|
wait_for_unit("prometheus-openldap-exporter.service")
|
||||||
|
wait_for_open_port(389)
|
||||||
|
wait_for_open_port(9330)
|
||||||
|
wait_until_succeeds(
|
||||||
|
"curl -sSf http://localhost:9330/metrics | grep -q 'openldap_scrape{result=\"ok\"} 1'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
openvpn = {
|
openvpn = {
|
||||||
exporterConfig = {
|
exporterConfig = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
|
|
@ -52,9 +52,9 @@ import ../make-test-python.nix ({ pkgs, lib, ...} :
|
||||||
inherit (wg-snakeoil-keys.peer0) publicKey;
|
inherit (wg-snakeoil-keys.peer0) publicKey;
|
||||||
};
|
};
|
||||||
|
|
||||||
postSetup = let inherit (pkgs) iproute; in ''
|
postSetup = let inherit (pkgs) iproute2; in ''
|
||||||
${iproute}/bin/ip route replace 10.23.42.1/32 dev wg0
|
${iproute2}/bin/ip route replace 10.23.42.1/32 dev wg0
|
||||||
${iproute}/bin/ip route replace fc00::1/128 dev wg0
|
${iproute2}/bin/ip route replace fc00::1/128 dev wg0
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
59
third_party/nixpkgs/pkgs/applications/accessibility/svkbd/default.nix
vendored
Normal file
59
third_party/nixpkgs/pkgs/applications/accessibility/svkbd/default.nix
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
{ lib
|
||||||
|
, stdenv
|
||||||
|
, fetchurl
|
||||||
|
, writeText
|
||||||
|
, pkg-config
|
||||||
|
, libX11
|
||||||
|
, libXft
|
||||||
|
, libXi
|
||||||
|
, libXinerama
|
||||||
|
, libXtst
|
||||||
|
, layout ? null
|
||||||
|
, conf ? null
|
||||||
|
, patches ? [ ]
|
||||||
|
}:
|
||||||
|
|
||||||
|
stdenv.mkDerivation rec {
|
||||||
|
pname = "svkbd";
|
||||||
|
version = "0.3";
|
||||||
|
|
||||||
|
src = fetchurl {
|
||||||
|
url = "https://dl.suckless.org/tools/svkbd-${version}.tar.gz";
|
||||||
|
sha256 = "108khx665d7dlzs04iy4g1nw3fyqpy6kd0afrwiapaibgv4xhfsk";
|
||||||
|
};
|
||||||
|
|
||||||
|
inherit patches;
|
||||||
|
|
||||||
|
postPatch = let
|
||||||
|
configFile = if lib.isDerivation conf || lib.isPath conf then
|
||||||
|
conf
|
||||||
|
else
|
||||||
|
writeText "config.def.h" conf;
|
||||||
|
in lib.optionalString (conf != null) ''
|
||||||
|
cp ${configFile} config.def.h
|
||||||
|
'';
|
||||||
|
|
||||||
|
nativeBuildInputs = [
|
||||||
|
pkg-config
|
||||||
|
];
|
||||||
|
|
||||||
|
buildInputs = [
|
||||||
|
libX11
|
||||||
|
libXft
|
||||||
|
libXi
|
||||||
|
libXinerama
|
||||||
|
libXtst
|
||||||
|
];
|
||||||
|
|
||||||
|
makeFlags = [
|
||||||
|
"PREFIX=${placeholder "out"}"
|
||||||
|
] ++ lib.optional (layout != null) "LAYOUT=${layout}";
|
||||||
|
|
||||||
|
meta = with lib; {
|
||||||
|
description = "Simple virtual keyboard";
|
||||||
|
homepage = "https://tools.suckless.org/x/svkbd/";
|
||||||
|
license = licenses.mit;
|
||||||
|
platforms = platforms.linux;
|
||||||
|
maintainers = with maintainers; [ dotlambda ];
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
{ lib, stdenv, fetchurl, alsaLib, jack2Full, minixml, pkg-config }:
|
{ lib, stdenv, fetchurl, alsaLib, jack2, minixml, pkg-config }:
|
||||||
|
|
||||||
stdenv.mkDerivation rec {
|
stdenv.mkDerivation rec {
|
||||||
name = packageName + "-" + version ;
|
name = packageName + "-" + version ;
|
||||||
|
@ -13,7 +13,7 @@ stdenv.mkDerivation rec {
|
||||||
doCheck = false;
|
doCheck = false;
|
||||||
|
|
||||||
nativeBuildInputs = [ pkg-config ];
|
nativeBuildInputs = [ pkg-config ];
|
||||||
buildInputs = [ alsaLib minixml jack2Full ];
|
buildInputs = [ alsaLib minixml jack2 ];
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
description = "Tool for storing/restoring JACK and/or ALSA connections to/from cml files";
|
description = "Tool for storing/restoring JACK and/or ALSA connections to/from cml files";
|
||||||
|
|
|
@ -15,6 +15,8 @@ mkDerivation rec {
|
||||||
sha256 = "0kz8wixjmy4yxq2gk11ybswryxb6alfymd3bzcar9xinscllhh3a";
|
sha256 = "0kz8wixjmy4yxq2gk11ybswryxb6alfymd3bzcar9xinscllhh3a";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
outputs = [ "out" "doc" ];
|
||||||
|
|
||||||
nativeBuildInputs = [ extra-cmake-modules kdoctools ];
|
nativeBuildInputs = [ extra-cmake-modules kdoctools ];
|
||||||
|
|
||||||
propagatedBuildInputs = [
|
propagatedBuildInputs = [
|
||||||
|
|
|
@ -18,7 +18,7 @@ in appimageTools.wrapType2 {
|
||||||
|
|
||||||
install -m 444 -D ${appimageContents}/${pname}.desktop -t $out/share/applications
|
install -m 444 -D ${appimageContents}/${pname}.desktop -t $out/share/applications
|
||||||
substituteInPlace $out/share/applications/${pname}.desktop \
|
substituteInPlace $out/share/applications/${pname}.desktop \
|
||||||
--replace 'Exec=AppRun' 'Exec=$out/bin/apple-music-electron'
|
--replace "Exec=AppRun" "Exec=$out/bin/apple-music-electron"
|
||||||
cp -r ${appimageContents}/usr/share/icons $out/share
|
cp -r ${appimageContents}/usr/share/icons $out/share
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ stdenv.mkDerivation rec {
|
||||||
|
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://downloads.bitwig.com/stable/${version}/${pname}-${version}.deb";
|
url = "https://downloads.bitwig.com/stable/${version}/${pname}-${version}.deb";
|
||||||
sha256 = "10nf29zr0xg9mxmknkc39jh3y9kpxzy5wg1v0s3kkd180lja9zpn";
|
sha256 = "sha256-k7L6CU2lY9192tfaWtVOxq9BCY7FZZdxmHT8EA+ZFsk=";
|
||||||
};
|
};
|
||||||
|
|
||||||
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook ];
|
nativeBuildInputs = [ dpkg makeWrapper wrapGAppsHook ];
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{ faust
|
{ faust
|
||||||
, gtk2
|
, gtk2
|
||||||
, jack2Full
|
, jack2
|
||||||
, alsaLib
|
, alsaLib
|
||||||
, opencv
|
, opencv
|
||||||
, libsndfile
|
, libsndfile
|
||||||
|
@ -18,7 +18,7 @@ faust.wrapWithBuildEnv {
|
||||||
|
|
||||||
propagatedBuildInputs = [
|
propagatedBuildInputs = [
|
||||||
gtk2
|
gtk2
|
||||||
jack2Full
|
jack2
|
||||||
alsaLib
|
alsaLib
|
||||||
opencv
|
opencv
|
||||||
libsndfile
|
libsndfile
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue