Project import generated by Copybara.
GitOrigin-RevId: 301aada7a64812853f2e2634a530ef5d34505048
This commit is contained in:
parent
20617e22f1
commit
c594a97518
2716 changed files with 133265 additions and 40023 deletions
|
@ -19,8 +19,8 @@ jobs:
|
|||
# we don't limit this action to only NixOS repo since the checks are cheap and useful developer feedback
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/cachix-action@v10
|
||||
- uses: cachix/install-nix-action@v18
|
||||
- uses: cachix/cachix-action@v11
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
|
|
|
@ -21,7 +21,7 @@ jobs:
|
|||
id: ismerge
|
||||
run: |
|
||||
ISMERGE=$(curl -H 'Accept: application/vnd.github.groot-preview+json' -H "authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/${{ env.GITHUB_REPOSITORY }}/commits/${{ env.GITHUB_SHA }}/pulls | jq -r '.[] | select(.merge_commit_sha == "${{ env.GITHUB_SHA }}") | any')
|
||||
echo "::set-output name=ismerge::$ISMERGE"
|
||||
echo "ismerge=$ISMERGE" >> $GITHUB_OUTPUT
|
||||
# github events are eventually consistent, so wait until changes propagate to thier DB
|
||||
- run: sleep 60
|
||||
if: steps.ismerge.outputs.ismerge != 'true'
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
with:
|
||||
# nixpkgs commit is pinned so that it doesn't break
|
||||
# editorconfig-checker 2.4.0
|
||||
|
|
|
@ -18,11 +18,11 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
- uses: cachix/cachix-action@v10
|
||||
- uses: cachix/cachix-action@v11
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
|
|
|
@ -18,11 +18,11 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
- uses: cachix/cachix-action@v10
|
||||
- uses: cachix/cachix-action@v11
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
|
|
|
@ -18,11 +18,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
with:
|
||||
# explicitly enable sandbox
|
||||
extra_nix_config: sandbox = true
|
||||
- uses: cachix/cachix-action@v10
|
||||
- uses: cachix/cachix-action@v11
|
||||
with:
|
||||
# This cache is for the nixpkgs repo checks and should not be trusted or used elsewhere.
|
||||
name: nixpkgs-ci
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
with:
|
||||
# pull_request_target checks out the base branch by default
|
||||
ref: refs/pull/${{ github.event.pull_request.number }}/merge
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
- name: Check DocBook files generated from Markdown are consistent
|
||||
run: |
|
||||
nixos/doc/manual/md-to-db.sh
|
||||
|
|
33
third_party/nixpkgs/.github/workflows/ofborg-pending.yml
vendored
Normal file
33
third_party/nixpkgs/.github/workflows/ofborg-pending.yml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
name: "Set pending OfBorg status"
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
# Sets the ofborg-eval status to "pending" to signal that we are waiting for
|
||||
# OfBorg even if it is running late. The status will be overwritten by OfBorg
|
||||
# once it starts evaluation.
|
||||
|
||||
# WARNING:
|
||||
# When extending this action, be aware that $GITHUB_TOKEN allows (restricted) write access to
|
||||
# the GitHub repository. This means that it should not evaluate user input in a
|
||||
# way that allows code injection.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
action:
|
||||
if: github.repository_owner == 'NixOS'
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Set pending OfBorg status"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
curl \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: Bearer $GITHUB_TOKEN" \
|
||||
-d '{"context": "ofborg-eval", "state": "pending", "description": "Waiting for OfBorg..."}' \
|
||||
"https://api.github.com/repos/NixOS/nixpkgs/commits/${{ github.event.pull_request.head.sha }}/statuses"
|
|
@ -1,26 +0,0 @@
|
|||
name: "clear pending status"
|
||||
|
||||
on:
|
||||
check_suite:
|
||||
types: [ completed ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
action:
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: clear pending status
|
||||
if: github.repository_owner == 'NixOS' && github.event.check_suite.app.name == 'OfBorg'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
curl \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-d '{"state": "success", "target_url": " ", "description": " ", "context": "Wait for ofborg"}' \
|
||||
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.check_suite.head_sha }}"
|
|
@ -1,30 +0,0 @@
|
|||
name: "set pending status"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
|
||||
# WARNING:
|
||||
# When extending this action, be aware that $GITHUB_TOKEN allows write access to
|
||||
# the GitHub repository. This means that it should not evaluate user input in a
|
||||
# way that allows code injection.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
action:
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: set pending status
|
||||
if: github.repository_owner == 'NixOS'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
curl \
|
||||
-X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-d '{"state": "pending", "target_url": " ", "description": "This pending status will be cleared when ofborg starts eval.", "context": "Wait for ofborg"}' \
|
||||
"https://api.github.com/repos/NixOS/nixpkgs/statuses/${{ github.event.pull_request.head.sha }}"
|
|
@ -1,8 +1,8 @@
|
|||
name: "Update terraform-providers"
|
||||
|
||||
on:
|
||||
#schedule:
|
||||
# - cron: "14 3 * * 0"
|
||||
schedule:
|
||||
- cron: "0 3 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
|
@ -17,40 +17,39 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: cachix/install-nix-action@v17
|
||||
- uses: cachix/install-nix-action@v18
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixpkgs-unstable
|
||||
- name: setup
|
||||
id: setup
|
||||
run: |
|
||||
echo ::set-output name=title::"terraform-providers: update $(date -u +"%Y-%m-%d")"
|
||||
echo "title=terraform-providers: update $(date -u +"%Y-%m-%d")" >> $GITHUB_OUTPUT
|
||||
- name: update terraform-providers
|
||||
run: |
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git config user.name "github-actions[bot]"
|
||||
pushd pkgs/applications/networking/cluster/terraform-providers
|
||||
./update-all-providers --no-build
|
||||
git commit -m "${{ steps.setup.outputs.title }}" providers.json
|
||||
popd
|
||||
echo | nix-shell \
|
||||
maintainers/scripts/update.nix \
|
||||
--argstr commit true \
|
||||
--argstr keep-going true \
|
||||
--argstr max-workers 2 \
|
||||
--argstr path terraform-providers
|
||||
- name: clean repo
|
||||
run: |
|
||||
git clean -f
|
||||
- name: create PR
|
||||
uses: peter-evans/create-pull-request@v4
|
||||
with:
|
||||
body: |
|
||||
Automatic update by [update-terraform-providers](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/update-terraform-providers.yml) action.
|
||||
|
||||
https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}
|
||||
|
||||
Check that all providers build with:
|
||||
```
|
||||
@ofborg build terraform.full
|
||||
```
|
||||
branch: terraform-providers-update
|
||||
delete-branch: false
|
||||
labels: "2.status: work-in-progress"
|
||||
title: ${{ steps.setup.outputs.title }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: comment on failure
|
||||
uses: peter-evans/create-or-update-comment@v2
|
||||
if: ${{ failure() }}
|
||||
with:
|
||||
issue-number: 153416
|
||||
body: |
|
||||
Automatic update of terraform providers [failed](https://github.com/NixOS/nixpkgs/actions/runs/${{ github.run_id }}).
|
||||
|
|
1
third_party/nixpkgs/.mailmap
vendored
1
third_party/nixpkgs/.mailmap
vendored
|
@ -1 +1,2 @@
|
|||
Daniel Løvbrøtte Olsen <me@dandellion.xyz> <daniel.olsen99@gmail.com>
|
||||
Sandro <sandro.jaeckel@gmail.com>
|
||||
|
|
1
third_party/nixpkgs/doc/builders/images.xml
vendored
1
third_party/nixpkgs/doc/builders/images.xml
vendored
|
@ -9,4 +9,5 @@
|
|||
<xi:include href="images/dockertools.section.xml" />
|
||||
<xi:include href="images/ocitools.section.xml" />
|
||||
<xi:include href="images/snaptools.section.xml" />
|
||||
<xi:include href="images/portableservice.section.xml" />
|
||||
</chapter>
|
||||
|
|
81
third_party/nixpkgs/doc/builders/images/portableservice.section.md
vendored
Normal file
81
third_party/nixpkgs/doc/builders/images/portableservice.section.md
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
# pkgs.portableService {#sec-pkgs-portableService}
|
||||
|
||||
`pkgs.portableService` is a function to create _portable service images_,
|
||||
as read-only, immutable, `squashfs` archives.
|
||||
|
||||
systemd supports a concept of [Portable Services](https://systemd.io/PORTABLE_SERVICES/).
|
||||
Portable Services are a delivery method for system services that uses two specific features of container management:
|
||||
|
||||
* Applications are bundled. I.e. multiple services, their binaries and
|
||||
all their dependencies are packaged in an image, and are run directly from it.
|
||||
* Stricter default security policies, i.e. sandboxing of applications.
|
||||
|
||||
This allows using Nix to build images which can be run on many recent Linux distributions.
|
||||
|
||||
The primary tool for interacting with Portable Services is `portablectl`,
|
||||
and they are managed by the `systemd-portabled` system service.
|
||||
|
||||
:::{.note}
|
||||
Portable services are supported starting with systemd 239 (released on 2018-06-22).
|
||||
:::
|
||||
|
||||
A very simple example of using `portableService` is described below:
|
||||
|
||||
[]{#ex-pkgs-portableService}
|
||||
|
||||
```nix
|
||||
pkgs.portableService {
|
||||
pname = "demo";
|
||||
version = "1.0";
|
||||
units = [ demo-service demo-socket ];
|
||||
}
|
||||
```
|
||||
|
||||
The above example will build an squashfs archive image in `result/$pname_$version.raw`. The image will contain the
|
||||
file system structure as required by the portable service specification, and a subset of the Nix store with all the
|
||||
dependencies of the two derivations in the `units` list.
|
||||
`units` must be a list of derivations, and their names must be prefixed with the service name (`"demo"` in this case).
|
||||
Otherwise `systemd-portabled` will ignore them.
|
||||
|
||||
:::{.Note}
|
||||
The `.raw` file extension of the image is required by the portable services specification.
|
||||
:::
|
||||
|
||||
Some other options available are:
|
||||
- `description`, `homepage`
|
||||
|
||||
Are added to the `/etc/os-release` in the image and are shown by the portable services tooling.
|
||||
Default to empty values, not added to os-release.
|
||||
- `symlinks`
|
||||
|
||||
A list of attribute sets {object, symlink}. Symlinks will be created in the root filesystem of the image to
|
||||
objects in the Nix store. Defaults to an empty list.
|
||||
- `contents`
|
||||
|
||||
A list of additional derivations to be included in the image Nix store, as-is. Defaults to an empty list.
|
||||
- `squashfsTools`
|
||||
|
||||
Defaults to `pkgs.squashfsTools`, allows you to override the package that provides `mksquashfs`.
|
||||
- `squash-compression`, `squash-block-size`
|
||||
|
||||
Options to `mksquashfs`. Default to `"xz -Xdict-size 100%"` and `"1M"` respectively.
|
||||
|
||||
A typical usage of `symlinks` would be:
|
||||
```nix
|
||||
symlinks = [
|
||||
{ object = "${pkgs.cacert}/etc/ssl"; symlink = "/etc/ssl"; }
|
||||
{ object = "${pkgs.bash}/bin/bash"; symlink = "/bin/sh"; }
|
||||
{ object = "${pkgs.php}/bin/php"; symlink = "/usr/bin/php"; }
|
||||
];
|
||||
```
|
||||
to create these symlinks for legacy applications that assume them existing globally.
|
||||
|
||||
Once the image is created, and deployed on a host in `/var/lib/portables/`, you can attach the image and run the service. As root run:
|
||||
```console
|
||||
portablectl attach demo_1.0.raw
|
||||
systemctl enable --now demo.socket
|
||||
systemctl enable --now demo.service
|
||||
```
|
||||
:::{.Note}
|
||||
See the [man page](https://www.freedesktop.org/software/systemd/man/portablectl.html) of `portablectl` for more info on its usage.
|
||||
:::
|
|
@ -480,15 +480,23 @@ Preferred source hash type is sha256. There are several ways to get it.
|
|||
|
||||
4. Extracting hash from local source tarball can be done with `sha256sum`. Use `nix-prefetch-url file:///path/to/tarball` if you want base32 hash.
|
||||
|
||||
5. Fake hash: set fake hash in package expression, perform build and extract correct hash from error Nix prints.
|
||||
5. Fake hash: set the hash to one of
|
||||
|
||||
For package updates it is enough to change one symbol to make hash fake. For new packages, you can use `lib.fakeSha256`, `lib.fakeSha512` or any other fake hash.
|
||||
- `""`
|
||||
- `lib.fakeHash`
|
||||
- `lib.fakeSha256`
|
||||
- `lib.fakeSha512`
|
||||
|
||||
in the package expression, attempt build and extract correct hash from error messages.
|
||||
|
||||
:::{.warning}
|
||||
You must use one of these four fake hashes and not some arbitrarily-chosen hash.
|
||||
|
||||
See [](#sec-source-hashes-security).
|
||||
:::
|
||||
|
||||
This is last resort method when reconstructing source URL is non-trivial and `nix-prefetch-url -A` isn’t applicable (for example, [one of `kodi` dependencies](https://github.com/NixOS/nixpkgs/blob/d2ab091dd308b99e4912b805a5eb088dd536adb9/pkgs/applications/video/kodi/default.nix#L73)). The easiest way then would be replace hash with a fake one and rebuild. Nix build will fail and error message will contain desired hash.
|
||||
|
||||
::: {.warning}
|
||||
This method has security problems. Check below for details.
|
||||
:::
|
||||
|
||||
### Obtaining hashes securely {#sec-source-hashes-security}
|
||||
|
||||
|
@ -500,7 +508,7 @@ Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead of f
|
|||
|
||||
- `https://` URLs are secure in methods 1, 2, 3;
|
||||
|
||||
- `https://` URLs are not secure in method 5. When obtaining hashes with fake hash method, TLS checks are disabled. So refetch source hash from several different networks to exclude MITM scenario. Alternatively, use fake hash method to make Nix error, but instead of extracting hash from error, extract `https://` URL and prefetch it with method 1.
|
||||
- `https://` URLs are secure in method 5 *only if* you use one of the listed fake hashes. If you use any other hash, `fetchurl` will pass `--insecure` to `curl` and may then degrade to HTTP in case of TLS certificate expiration.
|
||||
|
||||
## Patches {#sec-patches}
|
||||
|
||||
|
|
4
third_party/nixpkgs/doc/hooks/autoconf.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/autoconf.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### Autoconf {#setup-hook-autoconf}
|
||||
|
||||
The `autoreconfHook` derivation adds `autoreconfPhase`, which runs autoreconf, libtoolize and automake, essentially preparing the configure script in autotools-based builds. Most autotools-based packages come with the configure script pre-generated, but this hook is necessary for a few packages and when you need to patch the package’s configure scripts.
|
4
third_party/nixpkgs/doc/hooks/automake.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/automake.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### Automake {#setup-hook-automake}
|
||||
|
||||
Adds the `share/aclocal` subdirectory of each build input to the `ACLOCAL_PATH` environment variable.
|
12
third_party/nixpkgs/doc/hooks/autopatchelf.section.md
vendored
Normal file
12
third_party/nixpkgs/doc/hooks/autopatchelf.section.md
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
|
||||
### autoPatchelfHook {#setup-hook-autopatchelfhook}
|
||||
|
||||
This is a special setup hook which helps in packaging proprietary software in that it automatically tries to find missing shared library dependencies of ELF files based on the given `buildInputs` and `nativeBuildInputs`.
|
||||
|
||||
You can also specify a `runtimeDependencies` variable which lists dependencies to be unconditionally added to rpath of all executables. This is useful for programs that use dlopen 3 to load libraries at runtime.
|
||||
|
||||
In certain situations you may want to run the main command (`autoPatchelf`) of the setup hook on a file or a set of directories instead of unconditionally patching all outputs. This can be done by setting the `dontAutoPatchelf` environment variable to a non-empty value.
|
||||
|
||||
By default `autoPatchelf` will fail as soon as any ELF file requires a dependency which cannot be resolved via the given build inputs. In some situations you might prefer to just leave missing dependencies unpatched and continue to patch the rest. This can be achieved by setting the `autoPatchelfIgnoreMissingDeps` environment variable to a non-empty value. `autoPatchelfIgnoreMissingDeps` can be set to a list like `autoPatchelfIgnoreMissingDeps = [ "libcuda.so.1" "libcudart.so.1" ];` or to simply `[ "*" ]` to ignore all missing dependencies.
|
||||
|
||||
The `autoPatchelf` command also recognizes a `--no-recurse` command line flag, which prevents it from recursing into subdirectories.
|
18
third_party/nixpkgs/doc/hooks/breakpoint.section.md
vendored
Normal file
18
third_party/nixpkgs/doc/hooks/breakpoint.section.md
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
|
||||
### breakpointHook {#breakpointhook}
|
||||
|
||||
This hook will make a build pause instead of stopping when a failure happens. It prevents nix from cleaning up the build environment immediately and allows the user to attach to a build environment using the `cntr` command. Upon build error it will print instructions on how to use `cntr`, which can be used to enter the environment for debugging. Installing cntr and running the command will provide shell access to the build sandbox of failed build. At `/var/lib/cntr` the sandboxed filesystem is mounted. All commands and files of the system are still accessible within the shell. To execute commands from the sandbox use the cntr exec subcommand. `cntr` is only supported on Linux-based platforms. To use it first add `cntr` to your `environment.systemPackages` on NixOS or alternatively to the root user on non-NixOS systems. Then in the package that is supposed to be inspected, add `breakpointHook` to `nativeBuildInputs`.
|
||||
|
||||
```nix
|
||||
nativeBuildInputs = [ breakpointHook ];
|
||||
```
|
||||
|
||||
When a build failure happens there will be an instruction printed that shows how to attach with `cntr` to the build sandbox.
|
||||
|
||||
::: {.note}
|
||||
::: {.title}
|
||||
Caution with remote builds
|
||||
:::
|
||||
|
||||
This won’t work with remote builds as the build environment is on a different machine and can’t be accessed by `cntr`. Remote builds can be turned off by setting `--option builders ''` for `nix-build` or `--builders ''` for `nix build`.
|
||||
:::
|
4
third_party/nixpkgs/doc/hooks/cmake.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/cmake.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### cmake {#cmake}
|
||||
|
||||
Overrides the default configure phase to run the CMake command. By default, we use the Make generator of CMake. In addition, dependencies are added automatically to `CMAKE_PREFIX_PATH` so that packages are correctly detected by CMake. Some additional flags are passed in to give similar behavior to configure-based packages. You can disable this hook’s behavior by setting `configurePhase` to a custom value, or by setting `dontUseCmakeConfigure`. `cmakeFlags` controls flags passed only to CMake. By default, parallel building is enabled as CMake supports parallel building almost everywhere. When Ninja is also in use, CMake will detect that and use the ninja generator.
|
4
third_party/nixpkgs/doc/hooks/gdk-pixbuf.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/gdk-pixbuf.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### gdk-pixbuf {#setup-hook-gdk-pixbuf}
|
||||
|
||||
Exports `GDK_PIXBUF_MODULE_FILE` environment variable to the builder. Add librsvg package to `buildInputs` to get svg support. See also the [setup hook description in GNOME platform docs](#ssec-gnome-hooks-gdk-pixbuf).
|
4
third_party/nixpkgs/doc/hooks/ghc.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/ghc.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### GHC {#ghc}
|
||||
|
||||
Creates a temporary package database and registers every Haskell build input in it (TODO: how?).
|
4
third_party/nixpkgs/doc/hooks/gnome.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/gnome.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### GNOME platform {#gnome-platform}
|
||||
|
||||
Hooks related to GNOME platform and related libraries like GLib, GTK and GStreamer are described in [](#sec-language-gnome).
|
26
third_party/nixpkgs/doc/hooks/index.xml
vendored
26
third_party/nixpkgs/doc/hooks/index.xml
vendored
|
@ -6,5 +6,31 @@
|
|||
<para>
|
||||
Nixpkgs has several hook packages that augment the stdenv phases.
|
||||
</para>
|
||||
<para>
|
||||
The stdenv built-in hooks are documented in <xref linkend="ssec-setup-hooks"/>.
|
||||
</para>
|
||||
<xi:include href="./autoconf.section.xml" />
|
||||
<xi:include href="./automake.section.xml" />
|
||||
<xi:include href="./autopatchelf.section.xml" />
|
||||
<xi:include href="./breakpoint.section.xml" />
|
||||
<xi:include href="./cmake.section.xml" />
|
||||
<xi:include href="./gdk-pixbuf.section.xml" />
|
||||
<xi:include href="./ghc.section.xml" />
|
||||
<xi:include href="./gnome.section.xml" />
|
||||
<xi:include href="./installShellFiles.section.xml" />
|
||||
<xi:include href="./libiconv.section.xml" />
|
||||
<xi:include href="./libxml2.section.xml" />
|
||||
<xi:include href="./meson.section.xml" />
|
||||
<xi:include href="./ninja.section.xml" />
|
||||
<xi:include href="./perl.section.xml" />
|
||||
<xi:include href="./pkg-config.section.xml" />
|
||||
<xi:include href="./postgresql-test-hook.section.xml" />
|
||||
<xi:include href="./python.section.xml" />
|
||||
<xi:include href="./qt-4.section.xml" />
|
||||
<xi:include href="./scons.section.xml" />
|
||||
<xi:include href="./tetex-tex-live.section.xml" />
|
||||
<xi:include href="./unzip.section.xml" />
|
||||
<xi:include href="./validatePkgConfig.section.xml" />
|
||||
<xi:include href="./waf.section.xml" />
|
||||
<xi:include href="./xcbuild.section.xml" />
|
||||
</chapter>
|
||||
|
|
26
third_party/nixpkgs/doc/hooks/installShellFiles.section.md
vendored
Normal file
26
third_party/nixpkgs/doc/hooks/installShellFiles.section.md
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
|
||||
### `installShellFiles` {#installshellfiles}
|
||||
|
||||
This hook helps with installing manpages and shell completion files. It exposes 2 shell functions `installManPage` and `installShellCompletion` that can be used from your `postInstall` hook.
|
||||
|
||||
The `installManPage` function takes one or more paths to manpages to install. The manpages must have a section suffix, and may optionally be compressed (with `.gz` suffix). This function will place them into the correct directory.
|
||||
|
||||
The `installShellCompletion` function takes one or more paths to shell completion files. By default it will autodetect the shell type from the completion file extension, but you may also specify it by passing one of `--bash`, `--fish`, or `--zsh`. These flags apply to all paths listed after them (up until another shell flag is given). Each path may also have a custom installation name provided by providing a flag `--name NAME` before the path. If this flag is not provided, zsh completions will be renamed automatically such that `foobar.zsh` becomes `_foobar`. A root name may be provided for all paths using the flag `--cmd NAME`; this synthesizes the appropriate name depending on the shell (e.g. `--cmd foo` will synthesize the name `foo.bash` for bash and `_foo` for zsh). The path may also be a fifo or named fd (such as produced by `<(cmd)`), in which case the shell and name must be provided.
|
||||
|
||||
```nix
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
postInstall = ''
|
||||
installManPage doc/foobar.1 doc/barfoo.3
|
||||
# explicit behavior
|
||||
installShellCompletion --bash --name foobar.bash share/completions.bash
|
||||
installShellCompletion --fish --name foobar.fish share/completions.fish
|
||||
installShellCompletion --zsh --name _foobar share/completions.zsh
|
||||
# implicit behavior
|
||||
installShellCompletion share/completions/foobar.{bash,fish,zsh}
|
||||
# using named fd
|
||||
installShellCompletion --cmd foobar \
|
||||
--bash <($out/bin/foobar --bash-completion) \
|
||||
--fish <($out/bin/foobar --fish-completion) \
|
||||
--zsh <($out/bin/foobar --zsh-completion)
|
||||
'';
|
||||
```
|
4
third_party/nixpkgs/doc/hooks/libiconv.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/libiconv.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### libiconv, libintl {#libiconv-libintl}
|
||||
|
||||
A few libraries automatically add to `NIX_LDFLAGS` their library, making their symbols automatically available to the linker. This includes libiconv and libintl (gettext). This is done to provide compatibility between GNU Linux, where libiconv and libintl are bundled in, and other systems where that might not be the case. Sometimes, this behavior is not desired. To disable this behavior, set `dontAddExtraLibs`.
|
4
third_party/nixpkgs/doc/hooks/libxml2.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/libxml2.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### libxml2 {#setup-hook-libxml2}
|
||||
|
||||
Adds every file named `catalog.xml` found under the `xml/dtd` and `xml/xsl` subdirectories of each build input to the `XML_CATALOG_FILES` environment variable.
|
26
third_party/nixpkgs/doc/hooks/meson.section.md
vendored
Normal file
26
third_party/nixpkgs/doc/hooks/meson.section.md
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
|
||||
### Meson {#meson}
|
||||
|
||||
Overrides the configure phase to run meson to generate Ninja files. To run these files, you should accompany Meson with ninja. By default, `enableParallelBuilding` is enabled as Meson supports parallel building almost everywhere.
|
||||
|
||||
#### Variables controlling Meson {#variables-controlling-meson}
|
||||
|
||||
##### `mesonFlags` {#mesonflags}
|
||||
|
||||
Controls the flags passed to meson.
|
||||
|
||||
##### `mesonBuildType` {#mesonbuildtype}
|
||||
|
||||
Which [`--buildtype`](https://mesonbuild.com/Builtin-options.html#core-options) to pass to Meson. We default to `plain`.
|
||||
|
||||
##### `mesonAutoFeatures` {#mesonautofeatures}
|
||||
|
||||
What value to set [`-Dauto_features=`](https://mesonbuild.com/Builtin-options.html#core-options) to. We default to `enabled`.
|
||||
|
||||
##### `mesonWrapMode` {#mesonwrapmode}
|
||||
|
||||
What value to set [`-Dwrap_mode=`](https://mesonbuild.com/Builtin-options.html#core-options) to. We default to `nodownload` as we disallow network access.
|
||||
|
||||
##### `dontUseMesonConfigure` {#dontusemesonconfigure}
|
||||
|
||||
Disables using Meson’s `configurePhase`.
|
4
third_party/nixpkgs/doc/hooks/ninja.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/ninja.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### ninja {#ninja}
|
||||
|
||||
Overrides the build, install, and check phase to run ninja instead of make. You can disable this behavior with the `dontUseNinjaBuild`, `dontUseNinjaInstall`, and `dontUseNinjaCheck`, respectively. Parallel building is enabled by default in Ninja.
|
4
third_party/nixpkgs/doc/hooks/perl.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/perl.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### Perl {#setup-hook-perl}
|
||||
|
||||
Adds the `lib/site_perl` subdirectory of each build input to the `PERL5LIB` environment variable. For instance, if `buildInputs` contains Perl, then the `lib/site_perl` subdirectory of each input is added to the `PERL5LIB` environment variable.
|
4
third_party/nixpkgs/doc/hooks/pkg-config.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/pkg-config.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### pkg-config {#setup-hook-pkg-config}
|
||||
|
||||
Adds the `lib/pkgconfig` and `share/pkgconfig` subdirectories of each build input to the `PKG_CONFIG_PATH` environment variable.
|
4
third_party/nixpkgs/doc/hooks/python.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/python.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### Python {#setup-hook-python}
|
||||
|
||||
Adds the `lib/${python.libPrefix}/site-packages` subdirectory of each build input to the `PYTHONPATH` environment variable.
|
4
third_party/nixpkgs/doc/hooks/qt-4.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/qt-4.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### Qt 4 {#qt-4}
|
||||
|
||||
Sets the `QTDIR` environment variable to Qt’s path.
|
4
third_party/nixpkgs/doc/hooks/scons.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/scons.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### scons {#scons}
|
||||
|
||||
Overrides the build, install, and check phases. This uses the scons build system as a replacement for make. scons does not provide a configure phase, so everything is managed at build and install time.
|
4
third_party/nixpkgs/doc/hooks/tetex-tex-live.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/tetex-tex-live.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### teTeX / TeX Live {#tetex-tex-live}
|
||||
|
||||
Adds the `share/texmf-nix` subdirectory of each build input to the `TEXINPUTS` environment variable.
|
4
third_party/nixpkgs/doc/hooks/unzip.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/unzip.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### unzip {#unzip}
|
||||
|
||||
This setup hook will allow you to unzip .zip files specified in `$src`. There are many similar packages like `unrar`, `undmg`, etc.
|
4
third_party/nixpkgs/doc/hooks/validatePkgConfig.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/validatePkgConfig.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### validatePkgConfig {#validatepkgconfig}
|
||||
|
||||
The `validatePkgConfig` hook validates all pkg-config (`.pc`) files in a package. This helps catching some common errors in pkg-config files, such as undefined variables.
|
4
third_party/nixpkgs/doc/hooks/waf.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/waf.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### wafHook {#wafhook}
|
||||
|
||||
Overrides the configure, build, and install phases. This will run the “waf” script used by many projects. If `wafPath` (default `./waf`) doesn’t exist, it will copy the version of waf available in Nixpkgs. `wafFlags` can be used to pass flags to the waf script.
|
4
third_party/nixpkgs/doc/hooks/xcbuild.section.md
vendored
Normal file
4
third_party/nixpkgs/doc/hooks/xcbuild.section.md
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
|
||||
### xcbuildHook {#xcbuildhook}
|
||||
|
||||
Overrides the build and install phases to run the "xcbuild" command. This hook is needed when a project only comes with build files for the XCode build system. You can disable this behavior by setting buildPhase and configurePhase to a custom value. xcbuildFlags controls flags passed only to xcbuild.
|
169
third_party/nixpkgs/doc/stdenv/stdenv.chapter.md
vendored
169
third_party/nixpkgs/doc/stdenv/stdenv.chapter.md
vendored
|
@ -1109,13 +1109,15 @@ This setup hook moves any libraries installed in the `lib64/` subdirectory into
|
|||
|
||||
This setup hook moves any systemd user units installed in the `lib/` subdirectory into `share/`. In addition, a link is provided from `share/` to `lib/` for compatibility. This is needed for systemd to find user services when installed into the user profile.
|
||||
|
||||
This hook only runs when compiling for Linux.
|
||||
|
||||
### `set-source-date-epoch-to-latest.sh` {#set-source-date-epoch-to-latest.sh}
|
||||
|
||||
This sets `SOURCE_DATE_EPOCH` to the modification time of the most recent file.
|
||||
|
||||
### Bintools Wrapper {#bintools-wrapper}
|
||||
### Bintools Wrapper and hook {#bintools-wrapper}
|
||||
|
||||
The Bintools Wrapper wraps the binary utilities for a bunch of miscellaneous purposes. These are GNU Binutils when targetting Linux, and a mix of cctools and GNU binutils for Darwin. \[The “Bintools” name is supposed to be a compromise between “Binutils” and “cctools” not denoting any specific implementation.\] Specifically, the underlying bintools package, and a C standard library (glibc or Darwin’s libSystem, just for the dynamic loader) are all fed in, and dependency finding, hardening (see below), and purity checks for each are handled by the Bintools Wrapper. Packages typically depend on CC Wrapper, which in turn (at run time) depends on the Bintools Wrapper.
|
||||
The Bintools Wrapper wraps the binary utilities for a bunch of miscellaneous purposes. These are GNU Binutils when targeting Linux, and a mix of cctools and GNU binutils for Darwin. \[The “Bintools” name is supposed to be a compromise between “Binutils” and “cctools” not denoting any specific implementation.\] Specifically, the underlying bintools package, and a C standard library (glibc or Darwin’s libSystem, just for the dynamic loader) are all fed in, and dependency finding, hardening (see below), and purity checks for each are handled by the Bintools Wrapper. Packages typically depend on CC Wrapper, which in turn (at run time) depends on the Bintools Wrapper.
|
||||
|
||||
The Bintools Wrapper was only just recently split off from CC Wrapper, so the division of labor is still being worked out. For example, it shouldn’t care about the C standard library, but just take a derivation with the dynamic loader (which happens to be the glibc on linux). Dependency finding however is a task both wrappers will continue to need to share, and probably the most important to understand. It is currently accomplished by collecting directories of host-platform dependencies (i.e. `buildInputs` and `nativeBuildInputs`) in environment variables. The Bintools Wrapper’s setup hook causes any `lib` and `lib64` subdirectories to be added to `NIX_LDFLAGS`. Since the CC Wrapper and the Bintools Wrapper use the same strategy, most of the Bintools Wrapper code is sparsely commented and refers to the CC Wrapper. But the CC Wrapper’s code, by contrast, has quite lengthy comments. The Bintools Wrapper merely cites those, rather than repeating them, to avoid falling out of sync.
|
||||
|
||||
|
@ -1123,173 +1125,20 @@ A final task of the setup hook is defining a number of standard environment vari
|
|||
|
||||
A problem with this final task is that the Bintools Wrapper is honest and defines `LD` as `ld`. Most packages, however, firstly use the C compiler for linking, secondly use `LD` anyways, defining it as the C compiler, and thirdly, only so define `LD` when it is undefined as a fallback. This triple-threat means Bintools Wrapper will break those packages, as LD is already defined as the actual linker which the package won’t override yet doesn’t want to use. The workaround is to define, just for the problematic package, `LD` as the C compiler. A good way to do this would be `preConfigure = "LD=$CC"`.
|
||||
|
||||
### CC Wrapper {#cc-wrapper}
|
||||
### CC Wrapper and hook {#cc-wrapper}
|
||||
|
||||
The CC Wrapper wraps a C toolchain for a bunch of miscellaneous purposes. Specifically, a C compiler (GCC or Clang), wrapped binary tools, and a C standard library (glibc or Darwin’s libSystem, just for the dynamic loader) are all fed in, and dependency finding, hardening (see below), and purity checks for each are handled by the CC Wrapper. Packages typically depend on the CC Wrapper, which in turn (at run-time) depends on the Bintools Wrapper.
|
||||
|
||||
Dependency finding is undoubtedly the main task of the CC Wrapper. This works just like the Bintools Wrapper, except that any `include` subdirectory of any relevant dependency is added to `NIX_CFLAGS_COMPILE`. The setup hook itself contains some lengthy comments describing the exact convoluted mechanism by which this is accomplished.
|
||||
Dependency finding is undoubtedly the main task of the CC Wrapper. This works just like the Bintools Wrapper, except that any `include` subdirectory of any relevant dependency is added to `NIX_CFLAGS_COMPILE`. The setup hook itself contains elaborate comments describing the exact mechanism by which this is accomplished.
|
||||
|
||||
Similarly, the CC Wrapper follows the Bintools Wrapper in defining standard environment variables with the names of the tools it wraps, for the same reasons described above. Importantly, while it includes a `cc` symlink to the c compiler for portability, the `CC` will be defined using the compiler’s “real name” (i.e. `gcc` or `clang`). This helps lousy build systems that inspect on the name of the compiler rather than run it.
|
||||
|
||||
Here are some more packages that provide a setup hook. Since the list of hooks is extensible, this is not an exhaustive list. The mechanism is only to be used as a last resort, so it might cover most uses.
|
||||
|
||||
### Perl {#setup-hook-perl}
|
||||
### Other hooks
|
||||
|
||||
Adds the `lib/site_perl` subdirectory of each build input to the `PERL5LIB` environment variable. For instance, if `buildInputs` contains Perl, then the `lib/site_perl` subdirectory of each input is added to the `PERL5LIB` environment variable.
|
||||
|
||||
### Python {#setup-hook-python}
|
||||
|
||||
Adds the `lib/${python.libPrefix}/site-packages` subdirectory of each build input to the `PYTHONPATH` environment variable.
|
||||
|
||||
### pkg-config {#setup-hook-pkg-config}
|
||||
|
||||
Adds the `lib/pkgconfig` and `share/pkgconfig` subdirectories of each build input to the `PKG_CONFIG_PATH` environment variable.
|
||||
|
||||
### Automake {#setup-hook-automake}
|
||||
|
||||
Adds the `share/aclocal` subdirectory of each build input to the `ACLOCAL_PATH` environment variable.
|
||||
|
||||
### Autoconf {#setup-hook-autoconf}
|
||||
|
||||
The `autoreconfHook` derivation adds `autoreconfPhase`, which runs autoreconf, libtoolize and automake, essentially preparing the configure script in autotools-based builds. Most autotools-based packages come with the configure script pre-generated, but this hook is necessary for a few packages and when you need to patch the package’s configure scripts.
|
||||
|
||||
### libxml2 {#setup-hook-libxml2}
|
||||
|
||||
Adds every file named `catalog.xml` found under the `xml/dtd` and `xml/xsl` subdirectories of each build input to the `XML_CATALOG_FILES` environment variable.
|
||||
|
||||
### teTeX / TeX Live {#tetex-tex-live}
|
||||
|
||||
Adds the `share/texmf-nix` subdirectory of each build input to the `TEXINPUTS` environment variable.
|
||||
|
||||
### Qt 4 {#qt-4}
|
||||
|
||||
Sets the `QTDIR` environment variable to Qt’s path.
|
||||
|
||||
### gdk-pixbuf {#setup-hook-gdk-pixbuf}
|
||||
|
||||
Exports `GDK_PIXBUF_MODULE_FILE` environment variable to the builder. Add librsvg package to `buildInputs` to get svg support. See also the [setup hook description in GNOME platform docs](#ssec-gnome-hooks-gdk-pixbuf).
|
||||
|
||||
### GHC {#ghc}
|
||||
|
||||
Creates a temporary package database and registers every Haskell build input in it (TODO: how?).
|
||||
|
||||
### GNOME platform {#gnome-platform}
|
||||
|
||||
Hooks related to GNOME platform and related libraries like GLib, GTK and GStreamer are described in [](#sec-language-gnome).
|
||||
|
||||
### autoPatchelfHook {#setup-hook-autopatchelfhook}
|
||||
|
||||
This is a special setup hook which helps in packaging proprietary software in that it automatically tries to find missing shared library dependencies of ELF files based on the given `buildInputs` and `nativeBuildInputs`.
|
||||
|
||||
You can also specify a `runtimeDependencies` variable which lists dependencies to be unconditionally added to rpath of all executables. This is useful for programs that use dlopen 3 to load libraries at runtime.
|
||||
|
||||
In certain situations you may want to run the main command (`autoPatchelf`) of the setup hook on a file or a set of directories instead of unconditionally patching all outputs. This can be done by setting the `dontAutoPatchelf` environment variable to a non-empty value.
|
||||
|
||||
By default `autoPatchelf` will fail as soon as any ELF file requires a dependency which cannot be resolved via the given build inputs. In some situations you might prefer to just leave missing dependencies unpatched and continue to patch the rest. This can be achieved by setting the `autoPatchelfIgnoreMissingDeps` environment variable to a non-empty value. `autoPatchelfIgnoreMissingDeps` can be set to a list like `autoPatchelfIgnoreMissingDeps = [ "libcuda.so.1" "libcudart.so.1" ];` or to simply `[ "*" ]` to ignore all missing dependencies.
|
||||
|
||||
The `autoPatchelf` command also recognizes a `--no-recurse` command line flag, which prevents it from recursing into subdirectories.
|
||||
|
||||
### breakpointHook {#breakpointhook}
|
||||
|
||||
This hook will make a build pause instead of stopping when a failure happens. It prevents nix from cleaning up the build environment immediately and allows the user to attach to a build environment using the `cntr` command. Upon build error it will print instructions on how to use `cntr`, which can be used to enter the environment for debugging. Installing cntr and running the command will provide shell access to the build sandbox of failed build. At `/var/lib/cntr` the sandboxed filesystem is mounted. All commands and files of the system are still accessible within the shell. To execute commands from the sandbox use the cntr exec subcommand. `cntr` is only supported on Linux-based platforms. To use it first add `cntr` to your `environment.systemPackages` on NixOS or alternatively to the root user on non-NixOS systems. Then in the package that is supposed to be inspected, add `breakpointHook` to `nativeBuildInputs`.
|
||||
|
||||
```nix
|
||||
nativeBuildInputs = [ breakpointHook ];
|
||||
```
|
||||
|
||||
When a build failure happens there will be an instruction printed that shows how to attach with `cntr` to the build sandbox.
|
||||
|
||||
::: {.note}
|
||||
::: {.title}
|
||||
Caution with remote builds
|
||||
:::
|
||||
|
||||
This won’t work with remote builds as the build environment is on a different machine and can’t be accessed by `cntr`. Remote builds can be turned off by setting `--option builders ''` for `nix-build` or `--builders ''` for `nix build`.
|
||||
:::
|
||||
|
||||
### installShellFiles {#installshellfiles}
|
||||
|
||||
This hook helps with installing manpages and shell completion files. It exposes 2 shell functions `installManPage` and `installShellCompletion` that can be used from your `postInstall` hook.
|
||||
|
||||
The `installManPage` function takes one or more paths to manpages to install. The manpages must have a section suffix, and may optionally be compressed (with `.gz` suffix). This function will place them into the correct directory.
|
||||
|
||||
The `installShellCompletion` function takes one or more paths to shell completion files. By default it will autodetect the shell type from the completion file extension, but you may also specify it by passing one of `--bash`, `--fish`, or `--zsh`. These flags apply to all paths listed after them (up until another shell flag is given). Each path may also have a custom installation name provided by providing a flag `--name NAME` before the path. If this flag is not provided, zsh completions will be renamed automatically such that `foobar.zsh` becomes `_foobar`. A root name may be provided for all paths using the flag `--cmd NAME`; this synthesizes the appropriate name depending on the shell (e.g. `--cmd foo` will synthesize the name `foo.bash` for bash and `_foo` for zsh). The path may also be a fifo or named fd (such as produced by `<(cmd)`), in which case the shell and name must be provided.
|
||||
|
||||
```nix
|
||||
nativeBuildInputs = [ installShellFiles ];
|
||||
postInstall = ''
|
||||
installManPage doc/foobar.1 doc/barfoo.3
|
||||
# explicit behavior
|
||||
installShellCompletion --bash --name foobar.bash share/completions.bash
|
||||
installShellCompletion --fish --name foobar.fish share/completions.fish
|
||||
installShellCompletion --zsh --name _foobar share/completions.zsh
|
||||
# implicit behavior
|
||||
installShellCompletion share/completions/foobar.{bash,fish,zsh}
|
||||
# using named fd
|
||||
installShellCompletion --cmd foobar \
|
||||
--bash <($out/bin/foobar --bash-completion) \
|
||||
--fish <($out/bin/foobar --fish-completion) \
|
||||
--zsh <($out/bin/foobar --zsh-completion)
|
||||
'';
|
||||
```
|
||||
|
||||
### libiconv, libintl {#libiconv-libintl}
|
||||
|
||||
A few libraries automatically add to `NIX_LDFLAGS` their library, making their symbols automatically available to the linker. This includes libiconv and libintl (gettext). This is done to provide compatibility between GNU Linux, where libiconv and libintl are bundled in, and other systems where that might not be the case. Sometimes, this behavior is not desired. To disable this behavior, set `dontAddExtraLibs`.
|
||||
|
||||
### validatePkgConfig {#validatepkgconfig}
|
||||
|
||||
The `validatePkgConfig` hook validates all pkg-config (`.pc`) files in a package. This helps catching some common errors in pkg-config files, such as undefined variables.
|
||||
|
||||
### cmake {#cmake}
|
||||
|
||||
Overrides the default configure phase to run the CMake command. By default, we use the Make generator of CMake. In addition, dependencies are added automatically to `CMAKE_PREFIX_PATH` so that packages are correctly detected by CMake. Some additional flags are passed in to give similar behavior to configure-based packages. You can disable this hook’s behavior by setting `configurePhase` to a custom value, or by setting `dontUseCmakeConfigure`. `cmakeFlags` controls flags passed only to CMake. By default, parallel building is enabled as CMake supports parallel building almost everywhere. When Ninja is also in use, CMake will detect that and use the ninja generator.
|
||||
|
||||
### xcbuildHook {#xcbuildhook}
|
||||
|
||||
Overrides the build and install phases to run the "xcbuild" command. This hook is needed when a project only comes with build files for the XCode build system. You can disable this behavior by setting buildPhase and configurePhase to a custom value. xcbuildFlags controls flags passed only to xcbuild.
|
||||
|
||||
### Meson {#meson}
|
||||
|
||||
Overrides the configure phase to run meson to generate Ninja files. To run these files, you should accompany Meson with ninja. By default, `enableParallelBuilding` is enabled as Meson supports parallel building almost everywhere.
|
||||
|
||||
#### Variables controlling Meson {#variables-controlling-meson}
|
||||
|
||||
##### `mesonFlags` {#mesonflags}
|
||||
|
||||
Controls the flags passed to meson.
|
||||
|
||||
##### `mesonBuildType` {#mesonbuildtype}
|
||||
|
||||
Which [`--buildtype`](https://mesonbuild.com/Builtin-options.html#core-options) to pass to Meson. We default to `plain`.
|
||||
|
||||
##### `mesonAutoFeatures` {#mesonautofeatures}
|
||||
|
||||
What value to set [`-Dauto_features=`](https://mesonbuild.com/Builtin-options.html#core-options) to. We default to `enabled`.
|
||||
|
||||
##### `mesonWrapMode` {#mesonwrapmode}
|
||||
|
||||
What value to set [`-Dwrap_mode=`](https://mesonbuild.com/Builtin-options.html#core-options) to. We default to `nodownload` as we disallow network access.
|
||||
|
||||
##### `dontUseMesonConfigure` {#dontusemesonconfigure}
|
||||
|
||||
Disables using Meson’s `configurePhase`.
|
||||
|
||||
### ninja {#ninja}
|
||||
|
||||
Overrides the build, install, and check phase to run ninja instead of make. You can disable this behavior with the `dontUseNinjaBuild`, `dontUseNinjaInstall`, and `dontUseNinjaCheck`, respectively. Parallel building is enabled by default in Ninja.
|
||||
|
||||
### unzip {#unzip}
|
||||
|
||||
This setup hook will allow you to unzip .zip files specified in `$src`. There are many similar packages like `unrar`, `undmg`, etc.
|
||||
|
||||
### wafHook {#wafhook}
|
||||
|
||||
Overrides the configure, build, and install phases. This will run the “waf” script used by many projects. If `wafPath` (default `./waf`) doesn’t exist, it will copy the version of waf available in Nixpkgs. `wafFlags` can be used to pass flags to the waf script.
|
||||
|
||||
### scons {#scons}
|
||||
|
||||
Overrides the build, install, and check phases. This uses the scons build system as a replacement for make. scons does not provide a configure phase, so everything is managed at build and install time.
|
||||
Many other packages provide hooks, that are not part of `stdenv`. You can find
|
||||
these in the [Hooks Reference](#chap-hooks).
|
||||
|
||||
## Purity in Nixpkgs {#sec-purity-in-nixpkgs}
|
||||
|
||||
|
|
96
third_party/nixpkgs/lib/ascii-table.nix
vendored
Normal file
96
third_party/nixpkgs/lib/ascii-table.nix
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
{ " " = 32;
|
||||
"!" = 33;
|
||||
"\"" = 34;
|
||||
"#" = 35;
|
||||
"$" = 36;
|
||||
"%" = 37;
|
||||
"&" = 38;
|
||||
"'" = 39;
|
||||
"(" = 40;
|
||||
")" = 41;
|
||||
"*" = 42;
|
||||
"+" = 43;
|
||||
"," = 44;
|
||||
"-" = 45;
|
||||
"." = 46;
|
||||
"/" = 47;
|
||||
"0" = 48;
|
||||
"1" = 49;
|
||||
"2" = 50;
|
||||
"3" = 51;
|
||||
"4" = 52;
|
||||
"5" = 53;
|
||||
"6" = 54;
|
||||
"7" = 55;
|
||||
"8" = 56;
|
||||
"9" = 57;
|
||||
":" = 58;
|
||||
";" = 59;
|
||||
"<" = 60;
|
||||
"=" = 61;
|
||||
">" = 62;
|
||||
"?" = 63;
|
||||
"@" = 64;
|
||||
"A" = 65;
|
||||
"B" = 66;
|
||||
"C" = 67;
|
||||
"D" = 68;
|
||||
"E" = 69;
|
||||
"F" = 70;
|
||||
"G" = 71;
|
||||
"H" = 72;
|
||||
"I" = 73;
|
||||
"J" = 74;
|
||||
"K" = 75;
|
||||
"L" = 76;
|
||||
"M" = 77;
|
||||
"N" = 78;
|
||||
"O" = 79;
|
||||
"P" = 80;
|
||||
"Q" = 81;
|
||||
"R" = 82;
|
||||
"S" = 83;
|
||||
"T" = 84;
|
||||
"U" = 85;
|
||||
"V" = 86;
|
||||
"W" = 87;
|
||||
"X" = 88;
|
||||
"Y" = 89;
|
||||
"Z" = 90;
|
||||
"[" = 91;
|
||||
"\\" = 92;
|
||||
"]" = 93;
|
||||
"^" = 94;
|
||||
"_" = 95;
|
||||
"`" = 96;
|
||||
"a" = 97;
|
||||
"b" = 98;
|
||||
"c" = 99;
|
||||
"d" = 100;
|
||||
"e" = 101;
|
||||
"f" = 102;
|
||||
"g" = 103;
|
||||
"h" = 104;
|
||||
"i" = 105;
|
||||
"j" = 106;
|
||||
"k" = 107;
|
||||
"l" = 108;
|
||||
"m" = 109;
|
||||
"n" = 110;
|
||||
"o" = 111;
|
||||
"p" = 112;
|
||||
"q" = 113;
|
||||
"r" = 114;
|
||||
"s" = 115;
|
||||
"t" = 116;
|
||||
"u" = 117;
|
||||
"v" = 118;
|
||||
"w" = 119;
|
||||
"x" = 120;
|
||||
"y" = 121;
|
||||
"z" = 122;
|
||||
"{" = 123;
|
||||
"|" = 124;
|
||||
"}" = 125;
|
||||
"~" = 126;
|
||||
}
|
14
third_party/nixpkgs/lib/attrsets.nix
vendored
14
third_party/nixpkgs/lib/attrsets.nix
vendored
|
@ -622,6 +622,20 @@ rec {
|
|||
dontRecurseIntoAttrs =
|
||||
attrs: attrs // { recurseForDerivations = false; };
|
||||
|
||||
/* `unionOfDisjoint x y` is equal to `x // y // z` where the
|
||||
attrnames in `z` are the intersection of the attrnames in `x` and
|
||||
`y`, and all values `assert` with an error message. This
|
||||
operator is commutative, unlike (//). */
|
||||
unionOfDisjoint = x: y:
|
||||
let
|
||||
intersection = builtins.intersectAttrs x y;
|
||||
collisions = lib.concatStringsSep " " (builtins.attrNames intersection);
|
||||
mask = builtins.mapAttrs (name: value: builtins.throw
|
||||
"unionOfDisjoint: collision on ${name}; complete list: ${collisions}")
|
||||
intersection;
|
||||
in
|
||||
(x // y) // mask;
|
||||
|
||||
/*** deprecated stuff ***/
|
||||
|
||||
zipWithNames = zipAttrsWithNames;
|
||||
|
|
26
third_party/nixpkgs/lib/licenses.nix
vendored
26
third_party/nixpkgs/lib/licenses.nix
vendored
|
@ -78,6 +78,12 @@ in mkLicense lset) ({
|
|||
url = "https://aomedia.org/license/patent-license/";
|
||||
};
|
||||
|
||||
apsl10 = {
|
||||
spdxId = "APSL-1.0";
|
||||
fullName = "Apple Public Source License 1.0";
|
||||
url = "https://web.archive.org/web/20040701000000*/http://www.opensource.apple.com/apsl/1.0.txt";
|
||||
};
|
||||
|
||||
apsl20 = {
|
||||
spdxId = "APSL-2.0";
|
||||
fullName = "Apple Public Source License 2.0";
|
||||
|
@ -548,6 +554,16 @@ in mkLicense lset) ({
|
|||
free = false;
|
||||
};
|
||||
|
||||
lal12 = {
|
||||
spdxId = "LAL-1.2";
|
||||
fullName = "Licence Art Libre 1.2";
|
||||
};
|
||||
|
||||
lal13 = {
|
||||
spdxId = "LAL-1.3";
|
||||
fullName = "Licence Art Libre 1.3";
|
||||
};
|
||||
|
||||
lgpl2Only = {
|
||||
spdxId = "LGPL-2.0-only";
|
||||
fullName = "GNU Library General Public License v2 only";
|
||||
|
@ -593,6 +609,11 @@ in mkLicense lset) ({
|
|||
fullName = "PNG Reference Library version 2";
|
||||
};
|
||||
|
||||
libssh2 = {
|
||||
fullName = "libssh2 License";
|
||||
url = "https://www.libssh2.org/license.html";
|
||||
};
|
||||
|
||||
libtiff = {
|
||||
spdxId = "libtiff";
|
||||
fullName = "libtiff License";
|
||||
|
@ -979,11 +1000,6 @@ in mkLicense lset) ({
|
|||
fullName = "GNU Free Documentation License v1.3";
|
||||
deprecated = true;
|
||||
};
|
||||
gpl1 = {
|
||||
spdxId = "GPL-1.0";
|
||||
fullName = "GNU General Public License v1.0";
|
||||
deprecated = true;
|
||||
};
|
||||
gpl2 = {
|
||||
spdxId = "GPL-2.0";
|
||||
fullName = "GNU General Public License v2.0";
|
||||
|
|
12
third_party/nixpkgs/lib/options.nix
vendored
12
third_party/nixpkgs/lib/options.nix
vendored
|
@ -322,10 +322,16 @@ rec {
|
|||
showOption = parts: let
|
||||
escapeOptionPart = part:
|
||||
let
|
||||
escaped = lib.strings.escapeNixString part;
|
||||
in if escaped == "\"${part}\""
|
||||
# We assume that these are "special values" and not real configuration data.
|
||||
# If it is real configuration data, it is rendered incorrectly.
|
||||
specialIdentifiers = [
|
||||
"<name>" # attrsOf (submodule {})
|
||||
"*" # listOf (submodule {})
|
||||
"<function body>" # functionTo
|
||||
];
|
||||
in if builtins.elem part specialIdentifiers
|
||||
then part
|
||||
else escaped;
|
||||
else lib.strings.escapeNixIdentifier part;
|
||||
in (concatStringsSep ".") (map escapeOptionPart parts);
|
||||
showFiles = files: concatStringsSep " and " (map (f: "`${f}'") files);
|
||||
|
||||
|
|
38
third_party/nixpkgs/lib/strings.nix
vendored
38
third_party/nixpkgs/lib/strings.nix
vendored
|
@ -185,6 +185,16 @@ rec {
|
|||
*/
|
||||
makeBinPath = makeSearchPathOutput "bin" "bin";
|
||||
|
||||
/* Normalize path, removing extranous /s
|
||||
|
||||
Type: normalizePath :: string -> string
|
||||
|
||||
Example:
|
||||
normalizePath "/a//b///c/"
|
||||
=> "/a/b/c/"
|
||||
*/
|
||||
normalizePath = s: (builtins.foldl' (x: y: if y == "/" && hasSuffix "/" x then x else x+y) "" (splitString "" s));
|
||||
|
||||
/* Depending on the boolean `cond', return either the given string
|
||||
or the empty string. Useful to concatenate against a bigger string.
|
||||
|
||||
|
@ -294,6 +304,21 @@ rec {
|
|||
map f (stringToCharacters s)
|
||||
);
|
||||
|
||||
/* Convert char to ascii value, must be in printable range
|
||||
|
||||
Type: charToInt :: string -> int
|
||||
|
||||
Example:
|
||||
charToInt "A"
|
||||
=> 65
|
||||
charToInt "("
|
||||
=> 40
|
||||
|
||||
*/
|
||||
charToInt = let
|
||||
table = import ./ascii-table.nix;
|
||||
in c: builtins.getAttr c table;
|
||||
|
||||
/* Escape occurrence of the elements of `list` in `string` by
|
||||
prefixing it with a backslash.
|
||||
|
||||
|
@ -305,6 +330,19 @@ rec {
|
|||
*/
|
||||
escape = list: replaceChars list (map (c: "\\${c}") list);
|
||||
|
||||
/* Escape occurence of the element of `list` in `string` by
|
||||
converting to its ASCII value and prefixing it with \\x.
|
||||
Only works for printable ascii characters.
|
||||
|
||||
Type: escapeC = [string] -> string -> string
|
||||
|
||||
Example:
|
||||
escapeC [" "] "foo bar"
|
||||
=> "foo\\x20bar"
|
||||
|
||||
*/
|
||||
escapeC = list: replaceChars list (map (c: "\\x${ toLower (lib.toHexString (charToInt c))}") list);
|
||||
|
||||
/* Quote string to be used safely within the Bourne shell.
|
||||
|
||||
Type: escapeShellArg :: string -> string
|
||||
|
|
15
third_party/nixpkgs/lib/tests/misc.nix
vendored
15
third_party/nixpkgs/lib/tests/misc.nix
vendored
|
@ -312,6 +312,21 @@ runTests {
|
|||
expected = true;
|
||||
};
|
||||
|
||||
testNormalizePath = {
|
||||
expr = strings.normalizePath "//a/b//c////d/";
|
||||
expected = "/a/b/c/d/";
|
||||
};
|
||||
|
||||
testCharToInt = {
|
||||
expr = strings.charToInt "A";
|
||||
expected = 65;
|
||||
};
|
||||
|
||||
testEscapeC = {
|
||||
expr = strings.escapeC [ " " ] "Hello World";
|
||||
expected = "Hello\\x20World";
|
||||
};
|
||||
|
||||
# LISTS
|
||||
|
||||
testFilter = {
|
||||
|
|
4
third_party/nixpkgs/lib/tests/modules.sh
vendored
4
third_party/nixpkgs/lib/tests/modules.sh
vendored
|
@ -302,11 +302,11 @@ checkConfigOutput '^"baz"$' config.value.nested.bar.baz ./types-anything/mk-mods
|
|||
## types.functionTo
|
||||
checkConfigOutput '^"input is input"$' config.result ./functionTo/trivial.nix
|
||||
checkConfigOutput '^"a b"$' config.result ./functionTo/merging-list.nix
|
||||
checkConfigError 'A definition for option .fun.\[function body\]. is not of type .string.. Definition values:\n\s*- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
|
||||
checkConfigError 'A definition for option .fun.<function body>. is not of type .string.. Definition values:\n\s*- In .*wrong-type.nix' config.result ./functionTo/wrong-type.nix
|
||||
checkConfigOutput '^"b a"$' config.result ./functionTo/list-order.nix
|
||||
checkConfigOutput '^"a c"$' config.result ./functionTo/merging-attrs.nix
|
||||
checkConfigOutput '^"a bee"$' config.result ./functionTo/submodule-options.nix
|
||||
checkConfigOutput '^"fun.\[function body\].a fun.\[function body\].b"$' config.optionsResult ./functionTo/submodule-options.nix
|
||||
checkConfigOutput '^"fun.<function body>.a fun.<function body>.b"$' config.optionsResult ./functionTo/submodule-options.nix
|
||||
|
||||
# moduleType
|
||||
checkConfigOutput '^"a b"$' config.resultFoo ./declare-variants.nix ./define-variant.nix
|
||||
|
|
7
third_party/nixpkgs/lib/types.nix
vendored
7
third_party/nixpkgs/lib/types.nix
vendored
|
@ -262,7 +262,8 @@ rec {
|
|||
};
|
||||
|
||||
unspecified = mkOptionType {
|
||||
name = "unspecified value";
|
||||
name = "unspecified";
|
||||
description = "unspecified value";
|
||||
descriptionClass = "noun";
|
||||
};
|
||||
|
||||
|
@ -616,8 +617,8 @@ rec {
|
|||
descriptionClass = "composite";
|
||||
check = isFunction;
|
||||
merge = loc: defs:
|
||||
fnArgs: (mergeDefinitions (loc ++ [ "[function body]" ]) elemType (map (fn: { inherit (fn) file; value = fn.value fnArgs; }) defs)).mergedValue;
|
||||
getSubOptions = prefix: elemType.getSubOptions (prefix ++ [ "[function body]" ]);
|
||||
fnArgs: (mergeDefinitions (loc ++ [ "<function body>" ]) elemType (map (fn: { inherit (fn) file; value = fn.value fnArgs; }) defs)).mergedValue;
|
||||
getSubOptions = prefix: elemType.getSubOptions (prefix ++ [ "<function body>" ]);
|
||||
getSubModules = elemType.getSubModules;
|
||||
substSubModules = m: functionTo (elemType.substSubModules m);
|
||||
functor = (defaultFunctor "functionTo") // { wrapped = elemType; };
|
||||
|
|
163
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
163
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
|
@ -1241,6 +1241,15 @@
|
|||
githubId = 12923;
|
||||
name = "Astro";
|
||||
};
|
||||
astrobeastie = {
|
||||
email = "fischervincent98@gmail.com";
|
||||
github = "astrobeastie";
|
||||
githubId = 26362368;
|
||||
name = "Vincent Fischer";
|
||||
keys = [{
|
||||
fingerprint = "BF47 81E1 F304 1ADF 18CE C401 DE16 C7D1 536D A72F";
|
||||
}];
|
||||
};
|
||||
astsmtl = {
|
||||
email = "astsmtl@yandex.ru";
|
||||
github = "astsmtl";
|
||||
|
@ -1541,6 +1550,12 @@
|
|||
githubId = 576355;
|
||||
name = "Bas van Dijk";
|
||||
};
|
||||
BattleCh1cken = {
|
||||
email = "BattleCh1cken@larkov.de";
|
||||
github = "BattleCh1cken";
|
||||
githubId = 75806385;
|
||||
name = "Felix Hass";
|
||||
};
|
||||
Baughn = {
|
||||
email = "sveina@gmail.com";
|
||||
github = "Baughn";
|
||||
|
@ -1745,6 +1760,12 @@
|
|||
githubId = 28444296;
|
||||
name = "Benjamin Hougland";
|
||||
};
|
||||
bigzilla = {
|
||||
email = "m.billyzaelani@gmail.com";
|
||||
github = "bigzilla";
|
||||
githubId = 20436235;
|
||||
name = "Billy Zaelani Malik";
|
||||
};
|
||||
billewanick = {
|
||||
email = "bill@ewanick.com";
|
||||
github = "billewanick";
|
||||
|
@ -1922,6 +1943,12 @@
|
|||
githubId = 2506621;
|
||||
name = "Brayden Willenborg";
|
||||
};
|
||||
brendanreis = {
|
||||
email = "brendanreis@gmail.com";
|
||||
name = "Brendan Reis";
|
||||
github = "brendanreis";
|
||||
githubId = 10686906;
|
||||
};
|
||||
brian-dawn = {
|
||||
email = "brian.t.dawn@gmail.com";
|
||||
github = "brian-dawn";
|
||||
|
@ -3592,6 +3619,12 @@
|
|||
githubId = 10198051;
|
||||
name = "Drew Risinger";
|
||||
};
|
||||
dritter = {
|
||||
email = "dritter03@googlemail.com";
|
||||
github = "dritter";
|
||||
githubId = 1544760;
|
||||
name = "Dominik Ritter";
|
||||
};
|
||||
drperceptron = {
|
||||
email = "92106371+drperceptron@users.noreply.github.com";
|
||||
github = "drperceptron";
|
||||
|
@ -4383,6 +4416,12 @@
|
|||
name = "Fedx sudo";
|
||||
matrix = "fedx:matrix.org";
|
||||
};
|
||||
fee1-dead = {
|
||||
email = "ent3rm4n@gmail.com";
|
||||
github = "fee1-dead";
|
||||
githubId = 43851243;
|
||||
name = "Deadbeef";
|
||||
};
|
||||
fehnomenal = {
|
||||
email = "fehnomenal@fehn.systems";
|
||||
github = "fehnomenal";
|
||||
|
@ -6376,6 +6415,15 @@
|
|||
githubId = 1204734;
|
||||
name = "Emil Karlson";
|
||||
};
|
||||
jlamur = {
|
||||
email = "contact@juleslamur.fr";
|
||||
github = "jlamur";
|
||||
githubId = 7054317;
|
||||
name = "Jules Lamur";
|
||||
keys = [{
|
||||
fingerprint = "B768 6CD7 451A 650D 9C54 4204 6710 CF0C 1CBD 7762";
|
||||
}];
|
||||
};
|
||||
jlesquembre = {
|
||||
email = "jl@lafuente.me";
|
||||
github = "jlesquembre";
|
||||
|
@ -6501,6 +6549,12 @@
|
|||
githubId = 297653;
|
||||
name = "Joe Salisbury";
|
||||
};
|
||||
john-shaffer = {
|
||||
email = "jdsha@proton.me";
|
||||
github = "john-shaffer";
|
||||
githubId = 53870456;
|
||||
name = "John Shaffer";
|
||||
};
|
||||
johanot = {
|
||||
email = "write@ownrisk.dk";
|
||||
github = "johanot";
|
||||
|
@ -7309,6 +7363,12 @@
|
|||
githubId = 2037002;
|
||||
name = "Konstantinos";
|
||||
};
|
||||
kouyk = {
|
||||
email = "skykinetic@stevenkou.xyz";
|
||||
github = "kouyk";
|
||||
githubId = 1729497;
|
||||
name = "Steven Kou";
|
||||
};
|
||||
kovirobi = {
|
||||
email = "kovirobi@gmail.com";
|
||||
github = "KoviRobi";
|
||||
|
@ -7454,6 +7514,10 @@
|
|||
githubId = 72546287;
|
||||
name = "L3af";
|
||||
};
|
||||
laalsaas = {
|
||||
email = "laalsaas@systemli.org";
|
||||
name = "laalsaas";
|
||||
};
|
||||
lach = {
|
||||
email = "iam@lach.pw";
|
||||
github = "CertainLach";
|
||||
|
@ -8466,6 +8530,12 @@
|
|||
githubId = 95194;
|
||||
name = "Mauricio Scheffer";
|
||||
};
|
||||
maxhero = {
|
||||
email = "contact@maxhero.dev";
|
||||
github = "themaxhero";
|
||||
githubId = 4708337;
|
||||
name = "Marcelo A. de L. Santos";
|
||||
};
|
||||
max-niederman = {
|
||||
email = "max@maxniederman.com";
|
||||
github = "max-niederman";
|
||||
|
@ -9008,6 +9078,15 @@
|
|||
fingerprint = "E90C BA34 55B3 6236 740C 038F 0D94 8CE1 9CF4 9C5F";
|
||||
}];
|
||||
};
|
||||
mktip = {
|
||||
email = "mo.issa.ok+nix@gmail.com";
|
||||
github = "mktip";
|
||||
githubId = 45905717;
|
||||
name = "Mohammad Issa";
|
||||
keys = [{
|
||||
fingerprint = "64BE BF11 96C3 DD7A 443E 8314 1DC0 82FA DE5B A863";
|
||||
}];
|
||||
};
|
||||
mlieberman85 = {
|
||||
email = "mlieberman85@gmail.com";
|
||||
github = "mlieberman85";
|
||||
|
@ -9549,6 +9628,13 @@
|
|||
githubId = 23743547;
|
||||
name = "Akshay Oppiliappan";
|
||||
};
|
||||
ners = {
|
||||
name = "ners";
|
||||
email = "ners@gmx.ch";
|
||||
matrix = "@ners:ners.ch";
|
||||
github = "ners";
|
||||
githubId = 50560955;
|
||||
};
|
||||
nessdoor = {
|
||||
name = "Tomas Antonio Lopez";
|
||||
email = "entropy.overseer@protonmail.com";
|
||||
|
@ -9902,6 +9988,12 @@
|
|||
githubId = 1809198;
|
||||
name = "Victor Roest";
|
||||
};
|
||||
nullishamy = {
|
||||
email = "amy.codes@null.net";
|
||||
name = "nullishamy";
|
||||
github = "nullishamy";
|
||||
githubId = 99221043;
|
||||
};
|
||||
numinit = {
|
||||
email = "me@numin.it";
|
||||
github = "numinit";
|
||||
|
@ -10175,6 +10267,15 @@
|
|||
fingerprint = "F90F FD6D 585C 2BA1 F13D E8A9 7571 654C F88E 31C2";
|
||||
}];
|
||||
};
|
||||
oxapentane = {
|
||||
email = "blame@oxapentane.com";
|
||||
github = "oxapentane";
|
||||
githubId = 1297357;
|
||||
name = "Grigory Shipunov";
|
||||
keys = [{
|
||||
fingerprint = "DD09 98E6 CDF2 9453 7FC6 04F9 91FA 5E5B F9AA 901C";
|
||||
}];
|
||||
};
|
||||
oxij = {
|
||||
email = "oxij@oxij.org";
|
||||
github = "oxij";
|
||||
|
@ -10693,6 +10794,16 @@
|
|||
githubId = 178496;
|
||||
name = "Philipp Middendorf";
|
||||
};
|
||||
pmw = {
|
||||
email = "philip@mailworks.org";
|
||||
matrix = "@philip4g:matrix.org";
|
||||
name = "Philip White";
|
||||
github = "philipmw";
|
||||
githubId = 1379645;
|
||||
keys = [{
|
||||
fingerprint = "9AB0 6C94 C3D1 F9D0 B9D9 A832 BC54 6FB3 B16C 8B0B";
|
||||
}];
|
||||
};
|
||||
pmy = {
|
||||
email = "pmy@xqzp.net";
|
||||
github = "pmeiyu";
|
||||
|
@ -10863,6 +10974,12 @@
|
|||
}
|
||||
];
|
||||
};
|
||||
prtzl = {
|
||||
email = "matej.blagsic@protonmail.com";
|
||||
github = "prtzl";
|
||||
githubId = 32430344;
|
||||
name = "Matej Blagsic";
|
||||
};
|
||||
ProducerMatt = {
|
||||
name = "Matthew Pherigo";
|
||||
email = "ProducerMatt42@gmail.com";
|
||||
|
@ -11644,6 +11761,15 @@
|
|||
githubId = 373566;
|
||||
name = "Ronuk Raval";
|
||||
};
|
||||
rrbutani = {
|
||||
email = "rrbutani+nix@gmail.com";
|
||||
github = "rrbutani";
|
||||
githubId = 7833358;
|
||||
keys = [{
|
||||
fingerprint = "7DCA 5615 8AB2 621F 2F32 9FF4 1C7C E491 479F A273";
|
||||
}];
|
||||
name = "Rahul Butani";
|
||||
};
|
||||
rski = {
|
||||
name = "rski";
|
||||
email = "rom.skiad+nix@gmail.com";
|
||||
|
@ -11963,6 +12089,12 @@
|
|||
github = "sioodmy";
|
||||
githubId = 81568712;
|
||||
};
|
||||
siph = {
|
||||
name = "Chris Dawkins";
|
||||
email = "dawkins.chris.dev@gmail.com";
|
||||
github = "siph";
|
||||
githubId = 6619112;
|
||||
};
|
||||
schmitthenner = {
|
||||
email = "development@schmitthenner.eu";
|
||||
github = "fkz";
|
||||
|
@ -12027,6 +12159,16 @@
|
|||
githubId = 3598650;
|
||||
name = "Fritz Otlinghaus";
|
||||
};
|
||||
Scrumplex = {
|
||||
name = "Sefa Eyeoglu";
|
||||
email = "contact@scrumplex.net";
|
||||
matrix = "@Scrumplex:duckhub.io";
|
||||
github = "Scrumplex";
|
||||
githubId = 11587657;
|
||||
keys = [{
|
||||
fingerprint = "AF1F B107 E188 CB97 9A94 FD7F C104 1129 4912 A422";
|
||||
}];
|
||||
};
|
||||
scubed2 = {
|
||||
email = "scubed2@gmail.com";
|
||||
github = "scubed2";
|
||||
|
@ -12070,6 +12212,15 @@
|
|||
githubId = 1940568;
|
||||
name = "Sebastian Ball";
|
||||
};
|
||||
seberm = {
|
||||
email = "seberm@seberm.com";
|
||||
github = "seberm";
|
||||
githubId = 212597;
|
||||
name = "Otto Sabart";
|
||||
keys = [{
|
||||
fingerprint = "0AF6 4C3B 1F12 14B3 8C8C 5786 1FA2 DBE6 7438 7CC3";
|
||||
}];
|
||||
};
|
||||
sebtm = {
|
||||
email = "mail@sebastian-sellmeier.de";
|
||||
github = "SebTM";
|
||||
|
@ -14332,6 +14483,12 @@
|
|||
fingerprint = "2145 955E 3F5E 0C95 3458 41B5 11F7 BAEA 8567 43FF";
|
||||
}];
|
||||
};
|
||||
wentam = {
|
||||
name = "Matt Egeler";
|
||||
email = "wentam42@gmail.com";
|
||||
github = "wentam";
|
||||
githubId = 901583;
|
||||
};
|
||||
wentasah = {
|
||||
name = "Michal Sojka";
|
||||
email = "wsh@2x.cz";
|
||||
|
@ -15510,4 +15667,10 @@
|
|||
github = "quasigod-io";
|
||||
githubId = 62124625;
|
||||
};
|
||||
waelwindows = {
|
||||
email = "waelwindows9922@gmail.com";
|
||||
github = "Waelwindows";
|
||||
githubId = 5228243;
|
||||
name = "waelwindows";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ sub github_team_members {
|
|||
push @ret, $_->{'login'};
|
||||
}
|
||||
} else {
|
||||
print {*STDERR} "!! Requesting members of GitHub Team '$team_name' failed: $response->status_line";
|
||||
print {*STDERR} "!! Requesting members of GitHub Team '$team_name' failed: " . $response->status_line;
|
||||
}
|
||||
|
||||
return \@ret;
|
||||
|
|
|
@ -14,4 +14,4 @@ commit="$(jq -r .commit.sha <<< "$head_info")"
|
|||
date="$(date "--date=$(jq -r .commit.commit.committer.date <<< "$head_info")" +%F)"
|
||||
# generate nix expression from cabal file, replacing the version with the commit date
|
||||
echo '# This file defines cabal2nix-unstable, used by maintainers/scripts/haskell/regenerate-hackage-packages.sh.' > pkgs/development/haskell-modules/cabal2nix-unstable.nix
|
||||
cabal2nix "https://github.com/NixOS/cabal2nix/archive/$commit.tar.gz" | sed -e 's/version = ".*"/version = "'"unstable-$date"'"/' >> pkgs/development/haskell-modules/cabal2nix-unstable.nix
|
||||
cabal2nix --subpath cabal2nix "https://github.com/NixOS/cabal2nix/archive/$commit.tar.gz" | sed -e 's/version = ".*"/version = "'"unstable-$date"'"/' >> pkgs/development/haskell-modules/cabal2nix-unstable.nix
|
||||
|
|
|
@ -87,7 +87,7 @@ mediator_lua,,,,,,
|
|||
mpack,,,,,,
|
||||
moonscript,https://github.com/leafo/moonscript.git,dev-1,,,,arobyn
|
||||
nvim-client,https://github.com/neovim/lua-client.git,,,,,
|
||||
nvim-cmp,,,,,
|
||||
nvim-cmp,https://github.com/hrsh7th/nvim-cmp,,,,,
|
||||
penlight,https://github.com/lunarmodules/Penlight.git,,,,,alerque
|
||||
plenary.nvim,https://github.com/nvim-lua/plenary.nvim.git,,,,5.1,
|
||||
rapidjson,https://github.com/xpol/lua-rapidjson.git,,,,,
|
||||
|
@ -101,3 +101,4 @@ std.normalize,https://github.com/lua-stdlib/normalize.git,,,,,
|
|||
stdlib,,,,41.2.2,,vyp
|
||||
tl,,,,,,mephistophiles
|
||||
vstruct,https://github.com/ToxicFrog/vstruct.git,,,,,
|
||||
vusted,,,,,,figsoda
|
||||
|
|
Can't render this file because it has a wrong number of fields in line 90.
|
|
@ -3,8 +3,10 @@
|
|||
stdenv.mkDerivation {
|
||||
name = "nix-generate-from-cpan-3";
|
||||
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
|
||||
buildInputs = with perlPackages; [
|
||||
makeWrapper perl GetoptLongDescriptive CPANPLUS Readonly LogLog4perl
|
||||
perl GetoptLongDescriptive CPANPLUS Readonly LogLog4perl
|
||||
];
|
||||
|
||||
phases = [ "installPhase" ];
|
||||
|
|
|
@ -110,6 +110,7 @@ with lib.maintainers; {
|
|||
astro
|
||||
SuperSandro2000
|
||||
revol-xut
|
||||
oxapentane
|
||||
];
|
||||
scope = "Maintain packages used in the C3D2 hackspace";
|
||||
shortName = "c3d2";
|
||||
|
@ -672,7 +673,6 @@ with lib.maintainers; {
|
|||
# Verify additions by approval of an already existing member of the team.
|
||||
members = [
|
||||
balsoft
|
||||
mkaito
|
||||
];
|
||||
scope = "Group registration for Serokell employees who collectively maintain packages.";
|
||||
shortName = "Serokell employees";
|
||||
|
|
|
@ -43,14 +43,6 @@ Note: Assigning either role will also default both
|
|||
and [](#opt-services.kubernetes.easyCerts)
|
||||
to true. This sets up flannel as CNI and activates automatic PKI bootstrapping.
|
||||
|
||||
As of kubernetes 1.10.X it has been deprecated to open non-tls-enabled
|
||||
ports on kubernetes components. Thus, from NixOS 19.03 all plain HTTP
|
||||
ports have been disabled by default. While opening insecure ports is
|
||||
still possible, it is recommended not to bind these to other interfaces
|
||||
than loopback. To re-enable the insecure port on the apiserver, see options:
|
||||
[](#opt-services.kubernetes.apiserver.insecurePort) and
|
||||
[](#opt-services.kubernetes.apiserver.insecureBindAddress)
|
||||
|
||||
::: {.note}
|
||||
As of NixOS 19.03, it is mandatory to configure:
|
||||
[](#opt-services.kubernetes.masterAddress).
|
||||
|
|
|
@ -47,17 +47,6 @@ services.kubernetes.roles = [ "master" "node" ];
|
|||
<xref linkend="opt-services.kubernetes.easyCerts" /> to true. This
|
||||
sets up flannel as CNI and activates automatic PKI bootstrapping.
|
||||
</para>
|
||||
<para>
|
||||
As of kubernetes 1.10.X it has been deprecated to open
|
||||
non-tls-enabled ports on kubernetes components. Thus, from NixOS
|
||||
19.03 all plain HTTP ports have been disabled by default. While
|
||||
opening insecure ports is still possible, it is recommended not to
|
||||
bind these to other interfaces than loopback. To re-enable the
|
||||
insecure port on the apiserver, see options:
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecurePort" />
|
||||
and
|
||||
<xref linkend="opt-services.kubernetes.apiserver.insecureBindAddress" />
|
||||
</para>
|
||||
<note>
|
||||
<para>
|
||||
As of NixOS 19.03, it is mandatory to configure:
|
||||
|
|
|
@ -1501,18 +1501,18 @@
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
MultiMC has been replaced with the fork PolyMC due to upstream
|
||||
developers being hostile to 3rd party package maintainers.
|
||||
PolyMC removes all MultiMC branding and is aimed at providing
|
||||
proper 3rd party packages like the one contained in Nixpkgs.
|
||||
This change affects the data folder where game instances and
|
||||
other save and configuration files are stored. Users with
|
||||
existing installations should rename
|
||||
MultiMC has been replaced with the fork PrismLauncher due to
|
||||
upstream developers being hostile to 3rd party package
|
||||
maintainers. PrismLauncher removes all MultiMC branding and is
|
||||
aimed at providing proper 3rd party packages like the one
|
||||
contained in Nixpkgs. This change affects the data folder
|
||||
where game instances and other save and configuration files
|
||||
are stored. Users with existing installations should rename
|
||||
<literal>~/.local/share/multimc</literal> to
|
||||
<literal>~/.local/share/polymc</literal>. The main config
|
||||
file’s path has also moved from
|
||||
<literal>~/.local/share/PrismLauncher</literal>. The main
|
||||
config file’s path has also moved from
|
||||
<literal>~/.local/share/multimc/multimc.cfg</literal> to
|
||||
<literal>~/.local/share/polymc/polymc.cfg</literal>.
|
||||
<literal>~/.local/share/PrismLauncher/prismlauncher.cfg</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
|
|
@ -11,6 +11,13 @@
|
|||
has the following highlights:
|
||||
</para>
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
GNOME has been upgraded to 43. Please take a look at their
|
||||
<link xlink:href="https://release.gnome.org/43/">Release
|
||||
Notes</link> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
During cross-compilation, tests are now executed if the test
|
||||
|
@ -31,6 +38,24 @@
|
|||
<literal>stdenv.buildPlatform.canExecute stdenv.hostPlatform</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>polymc</literal> package has been removed due to
|
||||
a rogue maintainer. It has been replaced by
|
||||
<literal>prismlauncher</literal>, a fork by the rest of the
|
||||
maintainers. For more details, see
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/pull/196624">the
|
||||
pull request that made this change</link> and
|
||||
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/196460">this
|
||||
issue detailing the vulnerability</link>. Users with existing
|
||||
installations should rename
|
||||
<literal>~/.local/share/polymc</literal> to
|
||||
<literal>~/.local/share/PrismLauncher</literal>. The main
|
||||
config file’s path has also moved from
|
||||
<literal>~/.local/share/polymc/polymc.cfg</literal> to
|
||||
<literal>~/.local/share/PrismLauncher/prismlauncher.cfg</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>nixpkgs.hostPlatform</literal> and
|
||||
|
@ -209,6 +234,13 @@
|
|||
<link xlink:href="options.html#opt-services.hadoop.hbase.enable">services.hadoop.hbase</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/edneville/please">Please</link>,
|
||||
a Sudo clone written in Rust. Available as
|
||||
<link linkend="opt-security.please.enable">security.please</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/messagebird/sachet/">Sachet</link>,
|
||||
|
@ -240,6 +272,13 @@
|
|||
<link xlink:href="options.html#opt-services.kanata.enable">services.kanata</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/prymitive/karma">karma</link>,
|
||||
an alert dashboard for Prometheus Alertmanager. Available as
|
||||
<link xlink:href="options.html#opt-services.karma.enable">services.karma</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://languagetool.org/">languagetool</link>,
|
||||
|
@ -261,6 +300,13 @@
|
|||
<link linkend="opt-services.outline.enable">services.outline</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://ntfy.sh">ntfy.sh</link>, a push
|
||||
notification service. Available as
|
||||
<link linkend="opt-services.ntfy-sh.enable">services.ntfy-sh</link>
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://git.sr.ht/~migadu/alps">alps</link>,
|
||||
|
@ -268,6 +314,13 @@
|
|||
<link linkend="opt-services.alps.enable">services.alps</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://github.com/shizunge/endlessh-go">endlessh-go</link>,
|
||||
an SSH tarpit that exposes Prometheus metrics. Available as
|
||||
<link linkend="opt-services.endlessh-go.enable">services.endlessh-go</link>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="https://netbird.io">netbird</link>, a zero
|
||||
|
@ -481,6 +534,16 @@
|
|||
instead.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>p4</literal> package now only includes the
|
||||
open-source Perforce Helix Core command-line client and APIs.
|
||||
It no longer installs the unfree Helix Core Server binaries
|
||||
<literal>p4d</literal>, <literal>p4broker</literal>, and
|
||||
<literal>p4p</literal>. To install the Helix Core Server
|
||||
binaries, use the <literal>p4d</literal> package instead.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>coq</literal> package and versioned variants
|
||||
|
@ -501,7 +564,9 @@
|
|||
<listitem>
|
||||
<para>
|
||||
<literal>pkgs.cosign</literal> does not provide the
|
||||
<literal>cosigned</literal> binary anymore.
|
||||
<literal>cosigned</literal> binary anymore. The
|
||||
<literal>sget</literal> binary has been moved into its own
|
||||
package.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
|
@ -560,6 +625,27 @@
|
|||
module removed, due to lack of maintainers.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>generateOptparseApplicativeCompletions</literal> and
|
||||
<literal>generateOptparseApplicativeCompletion</literal> from
|
||||
<literal>haskell.lib.compose</literal> (and
|
||||
<literal>haskell.lib</literal>) have been deprecated in favor
|
||||
of <literal>generateOptparseApplicativeCompletions</literal>
|
||||
(plural!) as provided by the haskell package sets (so
|
||||
<literal>haskellPackages.generateOptparseApplicativeCompletions</literal>
|
||||
etc.). The latter allows for cross-compilation (by
|
||||
automatically disabling generation of completion in the cross
|
||||
case). For it to work properly you need to make sure that the
|
||||
function comes from the same context as the package you are
|
||||
trying to override, i.e. always use the same package set as
|
||||
your package is coming from or – even better – use
|
||||
<literal>self.generateOptparseApplicativeCompletions</literal>
|
||||
if you are overriding a haskell package set. The old functions
|
||||
are retained for backwards compatibility, but yield are
|
||||
warning.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>services.graphite.api</literal> and
|
||||
|
@ -578,6 +664,47 @@
|
|||
instead.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>systemd-networkd</literal> v250 deprecated, renamed,
|
||||
and moved some sections and settings which leads to the
|
||||
following breaking module changes:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>systemd.network.networks.<name>.dhcpV6PrefixDelegationConfig</literal>
|
||||
is renamed to
|
||||
<literal>systemd.network.networks.<name>.dhcpPrefixDelegationConfig</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>systemd.network.networks.<name>.dhcpV6Config</literal>
|
||||
no longer accepts the
|
||||
<literal>ForceDHCPv6PDOtherInformation=</literal> setting.
|
||||
Please use the <literal>WithoutRA=</literal> and
|
||||
<literal>UseDelegatedPrefix=</literal> settings in your
|
||||
<literal>systemd.network.networks.<name>.dhcpV6Config</literal>
|
||||
and the <literal>DHCPv6Client=</literal> setting in your
|
||||
<literal>systemd.network.networks.<name>.ipv6AcceptRAConfig</literal>
|
||||
to control when the DHCPv6 client is started and how the
|
||||
delegated prefixes are handled by the DHCPv6 client.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>systemd.network.networks.<name>.networkConfig</literal>
|
||||
no longer accepts the <literal>IPv6Token=</literal>
|
||||
setting. Use the <literal>Token=</literal> setting in your
|
||||
<literal>systemd.network.networks.<name>.ipv6AcceptRAConfig</literal>
|
||||
instead. The
|
||||
<literal>systemd.network.networks.<name>.ipv6Prefixes.*.ipv6PrefixConfig</literal>
|
||||
now also accepts the <literal>Token=</literal> setting.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>meta.mainProgram</literal> attribute of packages
|
||||
|
@ -592,6 +719,12 @@
|
|||
system timezone.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The top-level <literal>termonad-with-packages</literal> alias
|
||||
for <literal>termonad</literal> has been removed.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
(Neo)Vim can not be configured with
|
||||
|
@ -606,6 +739,12 @@
|
|||
for vim).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The default <literal>kops</literal> version is now 1.25.1 and
|
||||
support for 1.22 and older has been dropped.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>k3s</literal> no longer supports docker as runtime
|
||||
|
@ -627,6 +766,16 @@
|
|||
<literal>[ "lua54" "luau" ]</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>pkgs.fetchNextcloudApp</literal> has been rewritten
|
||||
to circumvent impurities in e.g. tarballs from GitHub and to
|
||||
make it easier to apply patches. This means that your hashes
|
||||
are out-of-date and the (previously required) attributes
|
||||
<literal>name</literal> and <literal>version</literal> are no
|
||||
longer accepted.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
<section xml:id="sec-release-22.11-notable-changes">
|
||||
|
@ -703,6 +852,14 @@
|
|||
release notes</link> for more details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>guake</literal> package has been updated from
|
||||
3.6.3 to 3.9.0, see the
|
||||
<link xlink:href="https://github.com/Guake/guake/releases">changelog</link>
|
||||
for more details.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>dockerTools.buildImage</literal> deprecates the
|
||||
|
|
|
@ -581,7 +581,15 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- The `miller` package has been upgraded from 5.10.3 to [6.2.0](https://github.com/johnkerl/miller/releases/tag/v6.2.0). See [What's new in Miller 6](https://miller.readthedocs.io/en/latest/new-in-miller-6).
|
||||
|
||||
- MultiMC has been replaced with the fork PolyMC due to upstream developers being hostile to 3rd party package maintainers. PolyMC removes all MultiMC branding and is aimed at providing proper 3rd party packages like the one contained in Nixpkgs. This change affects the data folder where game instances and other save and configuration files are stored. Users with existing installations should rename `~/.local/share/multimc` to `~/.local/share/polymc`. The main config file's path has also moved from `~/.local/share/multimc/multimc.cfg` to `~/.local/share/polymc/polymc.cfg`.
|
||||
- MultiMC has been replaced with the fork PrismLauncher due to upstream
|
||||
developers being hostile to 3rd party package maintainers. PrismLauncher
|
||||
removes all MultiMC branding and is aimed at providing proper 3rd party
|
||||
packages like the one contained in Nixpkgs. This change affects the data
|
||||
folder where game instances and other save and configuration files are stored.
|
||||
Users with existing installations should rename `~/.local/share/multimc` to
|
||||
`~/.local/share/PrismLauncher`. The main config file's path has also moved
|
||||
from `~/.local/share/multimc/multimc.cfg` to
|
||||
`~/.local/share/PrismLauncher/prismlauncher.cfg`.
|
||||
|
||||
- `systemd-nspawn@.service` settings have been reverted to the default systemd behaviour. User namespaces are now activated by default. If you want to keep running nspawn containers without user namespaces you need to set `systemd.nspawn.<name>.execConfig.PrivateUsers = false`
|
||||
|
||||
|
|
|
@ -6,6 +6,9 @@ Support is planned until the end of June 2023, handing over to 23.05.
|
|||
|
||||
In addition to numerous new and upgraded packages, this release has the following highlights:
|
||||
|
||||
- GNOME has been upgraded to 43. Please take a look at their [Release
|
||||
Notes](https://release.gnome.org/43/) for details.
|
||||
|
||||
- During cross-compilation, tests are now executed if the test suite can be executed
|
||||
by the build platform. This is the case when doing “native” cross-compilation
|
||||
where the build and host platforms are largely the same, but the nixpkgs' cross
|
||||
|
@ -17,6 +20,16 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
built for `stdenv.hostPlatform` (i.e. produced by `stdenv.cc`) by evaluating
|
||||
`stdenv.buildPlatform.canExecute stdenv.hostPlatform`.
|
||||
|
||||
- The `polymc` package has been removed due to a rogue maintainer. It has been
|
||||
replaced by `prismlauncher`, a fork by the rest of the maintainers. For more
|
||||
details, see [the pull request that made this
|
||||
change](https://github.com/NixOS/nixpkgs/pull/196624) and [this issue
|
||||
detailing the vulnerability](https://github.com/NixOS/nixpkgs/issues/196460).
|
||||
Users with existing installations should rename `~/.local/share/polymc` to
|
||||
`~/.local/share/PrismLauncher`. The main config file's path has also moved
|
||||
from `~/.local/share/polymc/polymc.cfg` to
|
||||
`~/.local/share/PrismLauncher/prismlauncher.cfg`.
|
||||
|
||||
- The `nixpkgs.hostPlatform` and `nixpkgs.buildPlatform` options have been added.
|
||||
These cover and override the `nixpkgs.{system,localSystem,crossSystem}` options.
|
||||
|
||||
|
@ -76,6 +89,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [HBase cluster](https://hbase.apache.org/), a distributed, scalable, big data store. Available as [services.hadoop.hbase](options.html#opt-services.hadoop.hbase.enable).
|
||||
|
||||
- [Please](https://github.com/edneville/please), a Sudo clone written in Rust. Available as [security.please](#opt-security.please.enable)
|
||||
|
||||
- [Sachet](https://github.com/messagebird/sachet/), an SMS alerting tool for the Prometheus Alertmanager. Available as [services.prometheus.sachet](#opt-services.prometheus.sachet.enable).
|
||||
|
||||
- [infnoise](https://github.com/leetronics/infnoise), a hardware True Random Number Generator dongle.
|
||||
|
@ -86,6 +101,8 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
- [kanata](https://github.com/jtroo/kanata), a tool to improve keyboard comfort and usability with advanced customization.
|
||||
Available as [services.kanata](options.html#opt-services.kanata.enable).
|
||||
|
||||
- [karma](https://github.com/prymitive/karma), an alert dashboard for Prometheus Alertmanager. Available as [services.karma](options.html#opt-services.karma.enable)
|
||||
|
||||
- [languagetool](https://languagetool.org/), a multilingual grammar, style, and spell checker.
|
||||
Available as [services.languagetool](options.html#opt-services.languagetool.enable).
|
||||
|
||||
|
@ -93,8 +110,12 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
- [Outline](https://www.getoutline.com/), a wiki and knowledge base similar to Notion. Available as [services.outline](#opt-services.outline.enable).
|
||||
|
||||
- [ntfy.sh](https://ntfy.sh), a push notification service. Available as [services.ntfy-sh](#opt-services.ntfy-sh.enable)
|
||||
|
||||
- [alps](https://git.sr.ht/~migadu/alps), a simple and extensible webmail. Available as [services.alps](#opt-services.alps.enable).
|
||||
|
||||
- [endlessh-go](https://github.com/shizunge/endlessh-go), an SSH tarpit that exposes Prometheus metrics. Available as [services.endlessh-go](#opt-services.endlessh-go.enable).
|
||||
|
||||
- [netbird](https://netbird.io), a zero configuration VPN.
|
||||
Available as [services.netbird](options.html#opt-services.netbird.enable).
|
||||
|
||||
|
@ -160,6 +181,8 @@ Available as [services.patroni](options.html#opt-services.patroni.enable).
|
|||
- `services.hbase` has been renamed to `services.hbase-standalone`.
|
||||
For production HBase clusters, use `services.hadoop.hbase` instead.
|
||||
|
||||
- The `p4` package now only includes the open-source Perforce Helix Core command-line client and APIs. It no longer installs the unfree Helix Core Server binaries `p4d`, `p4broker`, and `p4p`. To install the Helix Core Server binaries, use the `p4d` package instead.
|
||||
|
||||
- The `coq` package and versioned variants starting at `coq_8_14` no
|
||||
longer include CoqIDE, which is now available through
|
||||
`coqPackages.coqide`. It is still possible to get CoqIDE as part of
|
||||
|
@ -169,7 +192,7 @@ Available as [services.patroni](options.html#opt-services.patroni.enable).
|
|||
- PHP 7.4 is no longer supported due to upstream not supporting this
|
||||
version for the entire lifecycle of the 22.11 release.
|
||||
|
||||
- `pkgs.cosign` does not provide the `cosigned` binary anymore.
|
||||
- `pkgs.cosign` does not provide the `cosigned` binary anymore. The `sget` binary has been moved into its own package.
|
||||
|
||||
- Emacs now uses the Lucid toolkit by default instead of GTK because of stability and compatibility issues.
|
||||
Users who still wish to remain using GTK can do so by using `emacs-gtk`.
|
||||
|
@ -187,6 +210,15 @@ Available as [services.patroni](options.html#opt-services.patroni.enable).
|
|||
|
||||
- virtlyst package and `services.virtlyst` module removed, due to lack of maintainers.
|
||||
|
||||
- `generateOptparseApplicativeCompletions` and `generateOptparseApplicativeCompletion` from `haskell.lib.compose`
|
||||
(and `haskell.lib`) have been deprecated in favor of `generateOptparseApplicativeCompletions` (plural!) as
|
||||
provided by the haskell package sets (so `haskellPackages.generateOptparseApplicativeCompletions` etc.).
|
||||
The latter allows for cross-compilation (by automatically disabling generation of completion in the cross case).
|
||||
For it to work properly you need to make sure that the function comes from the same context as the package
|
||||
you are trying to override, i.e. always use the same package set as your package is coming from or – even
|
||||
better – use `self.generateOptparseApplicativeCompletions` if you are overriding a haskell package set.
|
||||
The old functions are retained for backwards compatibility, but yield are warning.
|
||||
|
||||
- The `services.graphite.api` and `services.graphite.beacon` NixOS options, and
|
||||
the `python3.pkgs.graphite_api`, `python3.pkgs.graphite_beacon` and
|
||||
`python3.pkgs.influxgraph` packages, have been removed due to lack of upstream
|
||||
|
@ -194,20 +226,34 @@ Available as [services.patroni](options.html#opt-services.patroni.enable).
|
|||
|
||||
- The `aws` package has been removed due to being abandoned by the upstream. It is recommended to use `awscli` or `awscli2` instead.
|
||||
|
||||
- `systemd-networkd` v250 deprecated, renamed, and moved some sections and settings which leads to the following breaking module changes:
|
||||
|
||||
* `systemd.network.networks.<name>.dhcpV6PrefixDelegationConfig` is renamed to `systemd.network.networks.<name>.dhcpPrefixDelegationConfig`.
|
||||
* `systemd.network.networks.<name>.dhcpV6Config` no longer accepts the `ForceDHCPv6PDOtherInformation=` setting. Please use the `WithoutRA=` and `UseDelegatedPrefix=` settings in your `systemd.network.networks.<name>.dhcpV6Config` and the `DHCPv6Client=` setting in your `systemd.network.networks.<name>.ipv6AcceptRAConfig` to control when the DHCPv6 client is started and how the delegated prefixes are handled by the DHCPv6 client.
|
||||
* `systemd.network.networks.<name>.networkConfig` no longer accepts the `IPv6Token=` setting. Use the `Token=` setting in your `systemd.network.networks.<name>.ipv6AcceptRAConfig` instead. The `systemd.network.networks.<name>.ipv6Prefixes.*.ipv6PrefixConfig` now also accepts the `Token=` setting.
|
||||
|
||||
- The `meta.mainProgram` attribute of packages in `wineWowPackages` now defaults to `"wine64"`.
|
||||
|
||||
- The `paperless` module now defaults `PAPERLESS_TIME_ZONE` to your configured system timezone.
|
||||
|
||||
- The top-level `termonad-with-packages` alias for `termonad` has been removed.
|
||||
|
||||
- (Neo)Vim can not be configured with `configure.pathogen` anymore to reduce maintainance burden.
|
||||
Use `configure.packages` instead.
|
||||
- Neovim can not be configured with plug anymore (still works for vim).
|
||||
|
||||
- The default `kops` version is now 1.25.1 and support for 1.22 and older has been dropped.
|
||||
|
||||
- `k3s` no longer supports docker as runtime due to upstream dropping support.
|
||||
|
||||
- `k3s` supports `clusterInit` option, and it is enabled by default, for servers.
|
||||
|
||||
- `stylua` no longer accepts `lua52Support` and `luauSupport` overrides, use `features` instead, which defaults to `[ "lua54" "luau" ]`.
|
||||
|
||||
- `pkgs.fetchNextcloudApp` has been rewritten to circumvent impurities in e.g. tarballs from GitHub and to make it easier to
|
||||
apply patches. This means that your hashes are out-of-date and the (previously required) attributes `name` and `version`
|
||||
are no longer accepted.
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
||||
## Other Notable Changes {#sec-release-22.11-notable-changes}
|
||||
|
@ -230,6 +276,8 @@ Available as [services.patroni](options.html#opt-services.patroni.enable).
|
|||
|
||||
- The `diamond` package has been update from 0.8.36 to 2.0.15. See the [upstream release notes](https://github.com/bbuchfink/diamond/releases) for more details.
|
||||
|
||||
- The `guake` package has been updated from 3.6.3 to 3.9.0, see the [changelog](https://github.com/Guake/guake/releases) for more details.
|
||||
|
||||
- `dockerTools.buildImage` deprecates the misunderstood `contents` parameter, in favor of `copyToRoot`.
|
||||
Use `copyToRoot = buildEnv { ... };` or similar if you intend to add packages to `/bin`.
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@
|
|||
concat($optionIdPrefix,
|
||||
translate(
|
||||
attr[@name = 'name']/string/@value,
|
||||
'*< >[]:',
|
||||
'_______'
|
||||
'*< >[]:"',
|
||||
'________'
|
||||
))" />
|
||||
<varlistentry>
|
||||
<term xlink:href="#{$id}">
|
||||
|
|
10
third_party/nixpkgs/nixos/lib/testing-python.nix
vendored
10
third_party/nixpkgs/nixos/lib/testing-python.nix
vendored
|
@ -29,7 +29,9 @@ rec {
|
|||
};
|
||||
};
|
||||
|
||||
# Make a full-blown test
|
||||
# Make a full-blown test (legacy)
|
||||
# For an official public interface to the tests, see
|
||||
# https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
|
||||
makeTest =
|
||||
{ machine ? null
|
||||
, nodes ? {}
|
||||
|
@ -48,7 +50,8 @@ rec {
|
|||
else builtins.unsafeGetAttrPos "testScript" t)
|
||||
, extraPythonPackages ? (_ : [])
|
||||
, interactive ? {}
|
||||
} @ t:
|
||||
} @ t: let
|
||||
testConfig =
|
||||
(evalTest {
|
||||
imports = [
|
||||
{ _file = "makeTest parameters"; config = t; }
|
||||
|
@ -60,6 +63,9 @@ rec {
|
|||
}
|
||||
];
|
||||
}).config;
|
||||
in
|
||||
testConfig.test # For nix-build
|
||||
// testConfig; # For all-tests.nix
|
||||
|
||||
simpleTest = as: (makeTest as).test;
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ in
|
|||
nodesCompat =
|
||||
mapAttrs
|
||||
(name: config: config // {
|
||||
config = lib.warn
|
||||
config = lib.warnIf (lib.isInOldestRelease 2211)
|
||||
"Module argument `nodes.${name}.config` is deprecated. Use `nodes.${name}` instead."
|
||||
config;
|
||||
})
|
||||
|
|
32
third_party/nixpkgs/nixos/lib/utils.nix
vendored
32
third_party/nixpkgs/nixos/lib/utils.nix
vendored
|
@ -39,11 +39,19 @@ rec {
|
|||
|| hasPrefix a'.mountPoint b'.mountPoint
|
||||
|| any (hasPrefix a'.mountPoint) b'.depends;
|
||||
|
||||
# Escape a path according to the systemd rules, e.g. /dev/xyzzy
|
||||
# becomes dev-xyzzy. FIXME: slow.
|
||||
escapeSystemdPath = s:
|
||||
replaceChars ["/" "-" " "] ["-" "\\x2d" "\\x20"]
|
||||
(removePrefix "/" s);
|
||||
# Escape a path according to the systemd rules. FIXME: slow
|
||||
# The rules are described in systemd.unit(5) as follows:
|
||||
# The escaping algorithm operates as follows: given a string, any "/" character is replaced by "-", and all other characters which are not ASCII alphanumerics, ":", "_" or "." are replaced by C-style "\x2d" escapes. In addition, "." is replaced with such a C-style escape when it would appear as the first character in the escaped string.
|
||||
# When the input qualifies as absolute file system path, this algorithm is extended slightly: the path to the root directory "/" is encoded as single dash "-". In addition, any leading, trailing or duplicate "/" characters are removed from the string before transformation. Example: /foo//bar/baz/ becomes "foo-bar-baz".
|
||||
escapeSystemdPath = s: let
|
||||
replacePrefix = p: r: s: (if (hasPrefix p s) then r + (removePrefix p s) else s);
|
||||
trim = s: removeSuffix "/" (removePrefix "/" s);
|
||||
normalizedPath = strings.normalizePath s;
|
||||
in
|
||||
replaceChars ["/"] ["-"]
|
||||
(replacePrefix "." (strings.escapeC ["."] ".")
|
||||
(strings.escapeC (stringToCharacters " !\"#$%&'()*+,;<=>=@[\\]^`{|}~-")
|
||||
(if normalizedPath == "/" then normalizedPath else trim normalizedPath)));
|
||||
|
||||
# Quotes an argument for use in Exec* service lines.
|
||||
# systemd accepts "-quoted strings with escape sequences, toJSON produces
|
||||
|
@ -102,7 +110,11 @@ rec {
|
|||
if item ? ${attr} then
|
||||
nameValuePair prefix item.${attr}
|
||||
else if isAttrs item then
|
||||
map (name: recurse (prefix + "." + name) item.${name}) (attrNames item)
|
||||
map (name:
|
||||
let
|
||||
escapedName = ''"${replaceChars [''"'' "\\"] [''\"'' "\\\\"] name}"'';
|
||||
in
|
||||
recurse (prefix + "." + escapedName) item.${name}) (attrNames item)
|
||||
else if isList item then
|
||||
imap0 (index: item: recurse (prefix + "[${toString index}]") item) item
|
||||
else
|
||||
|
@ -182,13 +194,13 @@ rec {
|
|||
'')
|
||||
(attrNames secrets))
|
||||
+ "\n"
|
||||
+ "${pkgs.jq}/bin/jq >'${output}' '"
|
||||
+ concatStringsSep
|
||||
+ "${pkgs.jq}/bin/jq >'${output}' "
|
||||
+ lib.escapeShellArg (concatStringsSep
|
||||
" | "
|
||||
(imap1 (index: name: ''${name} = $ENV.secret${toString index}'')
|
||||
(attrNames secrets))
|
||||
(attrNames secrets)))
|
||||
+ ''
|
||||
' <<'EOF'
|
||||
<<'EOF'
|
||||
${builtins.toJSON set}
|
||||
EOF
|
||||
(( ! $inherit_errexit_enabled )) && shopt -u inherit_errexit
|
||||
|
|
|
@ -102,7 +102,7 @@ in {
|
|||
each user that tries to use the sound system. The server runs
|
||||
with user privileges. If true, one system-wide PulseAudio
|
||||
server is launched on boot, running as the user "pulse", and
|
||||
only users in the "audio" group will have access to the server.
|
||||
only users in the "pulse-access" group will have access to the server.
|
||||
Please read the PulseAudio documentation for more details.
|
||||
|
||||
Don't enable this option unless you know what you are doing.
|
||||
|
@ -310,6 +310,7 @@ in {
|
|||
};
|
||||
|
||||
users.groups.pulse.gid = gid;
|
||||
users.groups.pulse-access = {};
|
||||
|
||||
systemd.services.pulseaudio = {
|
||||
description = "PulseAudio System-Wide Server";
|
||||
|
|
|
@ -186,7 +186,7 @@ foreach my $name (keys %groupsCur) {
|
|||
# Rewrite /etc/group. FIXME: acquire lock.
|
||||
my @lines = map { join(":", $_->{name}, $_->{password}, $_->{gid}, $_->{members}) . "\n" }
|
||||
(sort { $a->{gid} <=> $b->{gid} } values(%groupsOut));
|
||||
updateFile($gidMapFile, to_json($gidMap));
|
||||
updateFile($gidMapFile, to_json($gidMap, {canonical => 1}));
|
||||
updateFile("/etc/group", \@lines);
|
||||
nscdInvalidate("group");
|
||||
|
||||
|
@ -272,7 +272,7 @@ foreach my $name (keys %usersCur) {
|
|||
# Rewrite /etc/passwd. FIXME: acquire lock.
|
||||
@lines = map { join(":", $_->{name}, $_->{fakePassword}, $_->{uid}, $_->{gid}, $_->{description}, $_->{home}, $_->{shell}) . "\n" }
|
||||
(sort { $a->{uid} <=> $b->{uid} } (values %usersOut));
|
||||
updateFile($uidMapFile, to_json($uidMap));
|
||||
updateFile($uidMapFile, to_json($uidMap, {canonical => 1}));
|
||||
updateFile("/etc/passwd", \@lines);
|
||||
nscdInvalidate("passwd");
|
||||
|
||||
|
|
|
@ -23,8 +23,10 @@ in
|
|||
runCommand "uvcdynctrl-udev-rules-${version}"
|
||||
{
|
||||
inherit dataPath;
|
||||
buildInputs = [
|
||||
nativeBuildInputs = [
|
||||
makeWrapper
|
||||
];
|
||||
buildInputs = [
|
||||
libwebcam
|
||||
];
|
||||
dontPatchELF = true;
|
||||
|
|
|
@ -38,9 +38,9 @@ with lib;
|
|||
# VM guest additions to improve host-guest interaction
|
||||
services.spice-vdagentd.enable = true;
|
||||
services.qemuGuest.enable = true;
|
||||
virtualisation.vmware.guest.enable = true;
|
||||
virtualisation.vmware.guest.enable = pkgs.stdenv.hostPlatform.isx86;
|
||||
virtualisation.hypervGuest.enable = true;
|
||||
services.xe-guest-utilities.enable = true;
|
||||
services.xe-guest-utilities.enable = pkgs.stdenv.hostPlatform.isx86;
|
||||
# The VirtualBox guest additions rely on an out-of-tree kernel module
|
||||
# which lags behind kernel releases, potentially causing broken builds.
|
||||
virtualisation.virtualbox.guest.enable = false;
|
||||
|
|
|
@ -55,6 +55,11 @@ let
|
|||
check = builtins.isAttrs;
|
||||
};
|
||||
|
||||
# Whether `pkgs` was constructed by this module - not if nixpkgs.pkgs or
|
||||
# _module.args.pkgs is set. However, determining whether _module.args.pkgs
|
||||
# is defined elsewhere does not seem feasible.
|
||||
constructedByMe = !opt.pkgs.isDefined;
|
||||
|
||||
hasBuildPlatform = opt.buildPlatform.highestPrio < (mkOptionDefault {}).priority;
|
||||
hasHostPlatform = opt.hostPlatform.isDefined;
|
||||
hasPlatform = hasHostPlatform || hasBuildPlatform;
|
||||
|
@ -358,7 +363,7 @@ in
|
|||
}
|
||||
)
|
||||
{
|
||||
assertion = hasPlatform -> legacyOptionsDefined == [];
|
||||
assertion = constructedByMe -> hasPlatform -> legacyOptionsDefined == [];
|
||||
message = ''
|
||||
Your system configures nixpkgs with the platform parameter${optionalString hasBuildPlatform "s"}:
|
||||
${hostPlatformLine
|
||||
|
|
|
@ -59,5 +59,11 @@ lib.recurseIntoAttrs {
|
|||
For a future proof system configuration, we recommend to remove
|
||||
the legacy definitions.
|
||||
''];
|
||||
assert getErrors {
|
||||
nixpkgs.localSystem = pkgs.stdenv.hostPlatform;
|
||||
nixpkgs.hostPlatform = pkgs.stdenv.hostPlatform;
|
||||
nixpkgs.pkgs = pkgs;
|
||||
} == [];
|
||||
|
||||
pkgs.emptyFile;
|
||||
}
|
||||
|
|
|
@ -263,6 +263,7 @@
|
|||
./security/pam.nix
|
||||
./security/pam_usb.nix
|
||||
./security/pam_mount.nix
|
||||
./security/please.nix
|
||||
./security/polkit.nix
|
||||
./security/rngd.nix
|
||||
./security/rtkit.nix
|
||||
|
@ -346,6 +347,7 @@
|
|||
./services/continuous-integration/hercules-ci-agent/default.nix
|
||||
./services/continuous-integration/hydra/default.nix
|
||||
./services/continuous-integration/github-runner.nix
|
||||
./services/continuous-integration/github-runners.nix
|
||||
./services/continuous-integration/gitlab-runner.nix
|
||||
./services/continuous-integration/gocd-agent/default.nix
|
||||
./services/continuous-integration/gocd-server/default.nix
|
||||
|
@ -391,9 +393,9 @@
|
|||
./services/desktops/pipewire/pipewire-media-session.nix
|
||||
./services/desktops/pipewire/wireplumber.nix
|
||||
./services/desktops/gnome/at-spi2-core.nix
|
||||
./services/desktops/gnome/chrome-gnome-shell.nix
|
||||
./services/desktops/gnome/evolution-data-server.nix
|
||||
./services/desktops/gnome/glib-networking.nix
|
||||
./services/desktops/gnome/gnome-browser-connector.nix
|
||||
./services/desktops/gnome/gnome-initial-setup.nix
|
||||
./services/desktops/gnome/gnome-keyring.nix
|
||||
./services/desktops/gnome/gnome-online-accounts.nix
|
||||
|
@ -611,6 +613,7 @@
|
|||
./services/misc/nix-optimise.nix
|
||||
./services/misc/nix-ssh-serve.nix
|
||||
./services/misc/novacomd.nix
|
||||
./services/misc/ntfy-sh.nix
|
||||
./services/misc/nzbget.nix
|
||||
./services/misc/nzbhydra2.nix
|
||||
./services/misc/octoprint.nix
|
||||
|
@ -683,6 +686,7 @@
|
|||
./services/monitoring/heapster.nix
|
||||
./services/monitoring/incron.nix
|
||||
./services/monitoring/kapacitor.nix
|
||||
./services/monitoring/karma.nix
|
||||
./services/monitoring/kthxbye.nix
|
||||
./services/monitoring/loki.nix
|
||||
./services/monitoring/longview.nix
|
||||
|
@ -713,6 +717,7 @@
|
|||
./services/monitoring/unifi-poller.nix
|
||||
./services/monitoring/ups.nix
|
||||
./services/monitoring/uptime.nix
|
||||
./services/monitoring/vmagent.nix
|
||||
./services/monitoring/vnstat.nix
|
||||
./services/monitoring/zabbix-agent.nix
|
||||
./services/monitoring/zabbix-proxy.nix
|
||||
|
@ -1004,6 +1009,7 @@
|
|||
./services/security/certmgr.nix
|
||||
./services/security/cfssl.nix
|
||||
./services/security/clamav.nix
|
||||
./services/security/endlessh-go.nix
|
||||
./services/security/fail2ban.nix
|
||||
./services/security/fprintd.nix
|
||||
./services/security/haka.nix
|
||||
|
@ -1067,6 +1073,7 @@
|
|||
./services/web-apps/calibre-web.nix
|
||||
./services/web-apps/code-server.nix
|
||||
./services/web-apps/baget.nix
|
||||
./services/web-apps/changedetection-io.nix
|
||||
./services/web-apps/convos.nix
|
||||
./services/web-apps/dex.nix
|
||||
./services/web-apps/discourse.nix
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# This module defines the software packages included in the "minimal"
|
||||
# installation CD. It might be useful elsewhere.
|
||||
|
||||
{ lib, pkgs, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
# Include some utilities that are useful for installing or repairing
|
||||
|
@ -51,7 +51,9 @@
|
|||
];
|
||||
|
||||
# Include support for various filesystems.
|
||||
boot.supportedFilesystems = [ "btrfs" "reiserfs" "vfat" "f2fs" "xfs" "zfs" "ntfs" "cifs" ];
|
||||
boot.supportedFilesystems =
|
||||
[ "btrfs" "reiserfs" "vfat" "f2fs" "xfs" "ntfs" "cifs" ] ++
|
||||
lib.optional (lib.meta.availableOn pkgs.stdenv.hostPlatform config.boot.zfs.package) "zfs";
|
||||
|
||||
# Configure host id for ZFS to work
|
||||
networking.hostId = lib.mkDefault "8425e349";
|
||||
|
|
|
@ -14,7 +14,7 @@ let
|
|||
pyEnv = pkgs.python.withPackages(ps: [ ps.mininet-python ]);
|
||||
|
||||
mnexecWrapped = pkgs.runCommand "mnexec-wrapper"
|
||||
{ buildInputs = [ pkgs.makeWrapper pkgs.pythonPackages.wrapPython ]; }
|
||||
{ nativeBuildInputs = [ pkgs.makeWrapper pkgs.pythonPackages.wrapPython ]; }
|
||||
''
|
||||
makeWrapper ${pkgs.mininet}/bin/mnexec \
|
||||
$out/bin/mnexec \
|
||||
|
|
|
@ -11,7 +11,19 @@ let
|
|||
|
||||
in {
|
||||
options.programs.neovim = {
|
||||
enable = mkEnableOption (lib.mdDoc "Neovim");
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable Neovim.
|
||||
|
||||
When enabled through this option, Neovim is wrapped to use a
|
||||
configuration managed by this module. The configuration file in the
|
||||
user's home directory at {file}`~/.config/nvim/init.vim` is no longer
|
||||
loaded by default.
|
||||
'';
|
||||
};
|
||||
|
||||
defaultEditor = mkOption {
|
||||
type = types.bool;
|
||||
|
|
|
@ -14,6 +14,7 @@ let
|
|||
''
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
|
||||
export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
|
||||
exec ${askPassword} "$@"
|
||||
'';
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ let
|
|||
description = lib.mdDoc ''
|
||||
The TSM client derivation to be
|
||||
added to the system environment.
|
||||
It will called with `.override`
|
||||
It will be used with `.override`
|
||||
to add paths to the client system-options file.
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -237,8 +237,8 @@ services.bind = {
|
|||
|
||||
<programlisting>
|
||||
systemd.services.dns-rfc2136-conf = {
|
||||
requiredBy = ["acme-example.com.service", "bind.service"];
|
||||
before = ["acme-example.com.service", "bind.service"];
|
||||
requiredBy = ["acme-example.com.service" "bind.service"];
|
||||
before = ["acme-example.com.service" "bind.service"];
|
||||
unitConfig = {
|
||||
ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
|
||||
};
|
||||
|
@ -249,18 +249,19 @@ systemd.services.dns-rfc2136-conf = {
|
|||
path = [ pkgs.bind ];
|
||||
script = ''
|
||||
mkdir -p /var/lib/secrets
|
||||
chmod 755 /var/lib/secrets
|
||||
tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
|
||||
chown named:root /var/lib/secrets/dnskeys.conf
|
||||
chmod 400 /var/lib/secrets/dnskeys.conf
|
||||
|
||||
# Copy the secret value from the dnskeys.conf, and put it in
|
||||
# RFC2136_TSIG_SECRET below
|
||||
# extract secret value from the dnskeys.conf
|
||||
while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
|
||||
|
||||
cat > /var/lib/secrets/certs.secret << EOF
|
||||
RFC2136_NAMESERVER='127.0.0.1:53'
|
||||
RFC2136_TSIG_ALGORITHM='hmac-sha256.'
|
||||
RFC2136_TSIG_KEY='rfc2136key.example.com'
|
||||
RFC2136_TSIG_SECRET='your secret key'
|
||||
RFC2136_TSIG_SECRET='$secret'
|
||||
EOF
|
||||
chmod 400 /var/lib/secrets/certs.secret
|
||||
'';
|
||||
|
|
122
third_party/nixpkgs/nixos/modules/security/please.nix
vendored
Normal file
122
third_party/nixpkgs/nixos/modules/security/please.nix
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.security.please;
|
||||
ini = pkgs.formats.ini { };
|
||||
in
|
||||
{
|
||||
options.security.please = {
|
||||
enable = mkEnableOption (mdDoc ''
|
||||
please, a Sudo clone which allows a users to execute a command or edit a
|
||||
file as another user
|
||||
'');
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.please;
|
||||
defaultText = literalExpression "pkgs.please";
|
||||
description = mdDoc ''
|
||||
Which package to use for {command}`please`.
|
||||
'';
|
||||
};
|
||||
|
||||
wheelNeedsPassword = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether users of the `wheel` group must provide a password to run
|
||||
commands or edit files with {command}`please` and
|
||||
{command}`pleaseedit` respectively.
|
||||
'';
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = ini.type;
|
||||
default = { };
|
||||
example = {
|
||||
jim_run_any_as_root = {
|
||||
name = "jim";
|
||||
type = "run";
|
||||
target = "root";
|
||||
rule = ".*";
|
||||
require_pass = false;
|
||||
};
|
||||
jim_edit_etc_hosts_as_root = {
|
||||
name = "jim";
|
||||
type = "edit";
|
||||
target = "root";
|
||||
rule = "/etc/hosts";
|
||||
editmode = 644;
|
||||
require_pass = true;
|
||||
};
|
||||
};
|
||||
description = mdDoc ''
|
||||
Please configuration. Refer to
|
||||
<https://github.com/edneville/please/blob/master/please.ini.md> for
|
||||
details.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
security.wrappers =
|
||||
let
|
||||
owner = "root";
|
||||
group = "root";
|
||||
setuid = true;
|
||||
in
|
||||
{
|
||||
please = {
|
||||
source = "${cfg.package}/bin/please";
|
||||
inherit owner group setuid;
|
||||
};
|
||||
pleaseedit = {
|
||||
source = "${cfg.package}/bin/pleaseedit";
|
||||
inherit owner group setuid;
|
||||
};
|
||||
};
|
||||
|
||||
security.please.settings = rec {
|
||||
# The "wheel" group is allowed to do anything by default but this can be
|
||||
# overridden.
|
||||
wheel_run_as_any = {
|
||||
type = "run";
|
||||
group = true;
|
||||
name = "wheel";
|
||||
target = ".*";
|
||||
rule = ".*";
|
||||
require_pass = cfg.wheelNeedsPassword;
|
||||
};
|
||||
wheel_edit_as_any = wheel_run_as_any // { type = "edit"; };
|
||||
wheel_list_as_any = wheel_run_as_any // { type = "list"; };
|
||||
};
|
||||
|
||||
environment = {
|
||||
systemPackages = [ cfg.package ];
|
||||
|
||||
etc."please.ini".source = ini.generate "please.ini"
|
||||
(cfg.settings // (rec {
|
||||
# The "root" user is allowed to do anything by default and this cannot
|
||||
# be overridden.
|
||||
root_run_as_any = {
|
||||
type = "run";
|
||||
name = "root";
|
||||
target = ".*";
|
||||
rule = ".*";
|
||||
require_pass = false;
|
||||
};
|
||||
root_edit_as_any = root_run_as_any // { type = "edit"; };
|
||||
root_list_as_any = root_run_as_any // { type = "list"; };
|
||||
}));
|
||||
};
|
||||
|
||||
security.pam.services.please = {
|
||||
sshAgentAuth = true;
|
||||
usshAuth = true;
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ azahi ];
|
||||
};
|
||||
}
|
|
@ -14,7 +14,7 @@ let
|
|||
name = "mopidy-with-extensions-${mopidy.version}";
|
||||
paths = closePropagation cfg.extensionPackages;
|
||||
pathsToLink = [ "/${mopidyPackages.python.sitePackages}" ];
|
||||
buildInputs = [ makeWrapper ];
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
postBuild = ''
|
||||
makeWrapper ${mopidy}/bin/mopidy $out/bin/mopidy \
|
||||
--prefix PYTHONPATH : $out/${mopidyPackages.python.sitePackages}
|
||||
|
|
|
@ -116,7 +116,7 @@ let
|
|||
original, name, set ? {}
|
||||
}:
|
||||
pkgs.runCommand "${name}-wrapper" {
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
} (with lib; ''
|
||||
makeWrapper "${original}" "$out/bin/${name}" \
|
||||
${concatStringsSep " \\\n " (mapAttrsToList (name: value: ''--set ${name} "${value}"'') set)}
|
||||
|
|
|
@ -196,6 +196,18 @@ in
|
|||
];
|
||||
};
|
||||
|
||||
checkOpts = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
description = lib.mdDoc ''
|
||||
A list of options for 'restic check', which is run after
|
||||
pruning.
|
||||
'';
|
||||
example = [
|
||||
"--with-cache"
|
||||
];
|
||||
};
|
||||
|
||||
dynamicFilesFrom = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
|
@ -270,8 +282,8 @@ in
|
|||
then if (backup.paths != null) then concatStringsSep " " backup.paths else ""
|
||||
else "--files-from ${filesFromTmpFile}";
|
||||
pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
|
||||
(resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
|
||||
(resticCmd + " check")
|
||||
(resticCmd + " forget --prune --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.pruneOpts))
|
||||
(resticCmd + " check --cache-dir=%C/restic-backups-${name} " + (concatStringsSep " " backup.checkOpts))
|
||||
];
|
||||
# Helper functions for rclone remotes
|
||||
rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1;
|
||||
|
|
|
@ -18,7 +18,8 @@ in
|
|||
imports = [
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "port" ] ["services" "kubernetes" "apiserver" "insecurePort"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecureBindAddress" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecurePort" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
|
||||
|
@ -164,18 +165,6 @@ in
|
|||
type = listOf str;
|
||||
};
|
||||
|
||||
insecureBindAddress = mkOption {
|
||||
description = lib.mdDoc "The IP address on which to serve the --insecure-port.";
|
||||
default = "127.0.0.1";
|
||||
type = str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = lib.mdDoc "Kubernetes apiserver insecure listening port. (0 = disabled)";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeletClientCaFile = mkOption {
|
||||
description = lib.mdDoc "Path to a cert file for connecting to kubelet.";
|
||||
default = top.caFile;
|
||||
|
@ -376,8 +365,6 @@ in
|
|||
"--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
|
||||
${optionalString (cfg.proxyClientKeyFile != null)
|
||||
"--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
|
||||
--insecure-bind-address=${cfg.insecureBindAddress} \
|
||||
--insecure-port=${toString cfg.insecurePort} \
|
||||
${optionalString (cfg.runtimeConfig != "")
|
||||
"--runtime-config=${cfg.runtimeConfig}"} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
|
|
|
@ -10,7 +10,7 @@ in
|
|||
{
|
||||
imports = [
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
|
||||
(mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "port" ] ["services" "kubernetes" "controllerManager" "insecurePort"])
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "controllerManager" "insecurePort" ] "")
|
||||
];
|
||||
|
||||
###### interface
|
||||
|
@ -50,12 +50,6 @@ in
|
|||
type = listOf str;
|
||||
};
|
||||
|
||||
insecurePort = mkOption {
|
||||
description = lib.mdDoc "Kubernetes controller manager insecure listening port.";
|
||||
default = 0;
|
||||
type = int;
|
||||
};
|
||||
|
||||
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
|
||||
|
||||
leaderElect = mkOption {
|
||||
|
@ -133,7 +127,6 @@ in
|
|||
--leader-elect=${boolToString cfg.leaderElect} \
|
||||
${optionalString (cfg.rootCaFile!=null)
|
||||
"--root-ca-file=${cfg.rootCaFile}"} \
|
||||
--port=${toString cfg.insecurePort} \
|
||||
--secure-port=${toString cfg.securePort} \
|
||||
${optionalString (cfg.serviceAccountKeyFile!=null)
|
||||
"--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
|
||||
|
|
|
@ -26,7 +26,6 @@ in
|
|||
};
|
||||
|
||||
services.kubernetes.kubelet = {
|
||||
networkPlugin = mkDefault "cni";
|
||||
cni.config = mkDefault [{
|
||||
name = "mynet";
|
||||
type = "flannel";
|
||||
|
|
|
@ -62,6 +62,7 @@ in
|
|||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "allowPrivileged" ] "")
|
||||
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "networkPlugin" ] "")
|
||||
];
|
||||
|
||||
###### interface
|
||||
|
@ -189,12 +190,6 @@ in
|
|||
default = {};
|
||||
};
|
||||
|
||||
networkPlugin = mkOption {
|
||||
description = lib.mdDoc "Network plugin to use by Kubernetes.";
|
||||
type = nullOr (enum ["cni" "kubenet"]);
|
||||
default = "kubenet";
|
||||
};
|
||||
|
||||
nodeIp = mkOption {
|
||||
description = lib.mdDoc "IP address of the node. If set, kubelet will use this IP address for the node.";
|
||||
default = null;
|
||||
|
@ -315,7 +310,6 @@ in
|
|||
"--cluster-dns=${cfg.clusterDns}"} \
|
||||
${optionalString (cfg.clusterDomain != "")
|
||||
"--cluster-domain=${cfg.clusterDomain}"} \
|
||||
--cni-conf-dir=${cniConfig} \
|
||||
${optionalString (cfg.featureGates != [])
|
||||
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
|
||||
--hairpin-mode=hairpin-veth \
|
||||
|
@ -323,8 +317,6 @@ in
|
|||
--healthz-port=${toString cfg.healthz.port} \
|
||||
--hostname-override=${cfg.hostname} \
|
||||
--kubeconfig=${kubeconfig} \
|
||||
${optionalString (cfg.networkPlugin != null)
|
||||
"--network-plugin=${cfg.networkPlugin}"} \
|
||||
${optionalString (cfg.nodeIp != null)
|
||||
"--node-ip=${cfg.nodeIp}"} \
|
||||
--pod-infra-container-image=pause \
|
||||
|
|
|
@ -266,7 +266,7 @@ in
|
|||
in
|
||||
''
|
||||
export KUBECONFIG=${clusterAdminKubeconfig}
|
||||
${kubernetes}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
${top.package}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
|
||||
'';
|
||||
})]);
|
||||
|
||||
|
|
|
@ -1,377 +1,23 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.github-runner;
|
||||
svcName = "github-runner";
|
||||
systemdDir = "${svcName}/${cfg.name}";
|
||||
# %t: Runtime directory root (usually /run); see systemd.unit(5)
|
||||
runtimeDir = "%t/${systemdDir}";
|
||||
# %S: State directory root (usually /var/lib); see systemd.unit(5)
|
||||
stateDir = "%S/${systemdDir}";
|
||||
# %L: Log directory root (usually /var/log); see systemd.unit(5)
|
||||
logsDir = "%L/${systemdDir}";
|
||||
# Name of file stored in service state directory
|
||||
currentConfigTokenFilename = ".current-token";
|
||||
in
|
||||
|
||||
{
|
||||
options.services.github-runner = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
example = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable GitHub Actions runner.
|
||||
|
||||
Note: GitHub recommends using self-hosted runners with private repositories only. Learn more here:
|
||||
[About self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners).
|
||||
'';
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
description = lib.mdDoc ''
|
||||
Repository to add the runner to.
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
|
||||
IMPORTANT: If your token is org-wide (not per repository), you need to
|
||||
provide a github org link, not a single repository, so do it like this
|
||||
`https://github.com/nixos`, not like this
|
||||
`https://github.com/nixos/nixpkgs`.
|
||||
Otherwise, you are going to get a `404 NotFound`
|
||||
from `POST https://api.github.com/actions/runner-registration`
|
||||
in the configure script.
|
||||
'';
|
||||
example = "https://github.com/nixos/nixpkgs";
|
||||
};
|
||||
|
||||
tokenFile = mkOption {
|
||||
type = types.path;
|
||||
description = lib.mdDoc ''
|
||||
The full path to a file which contains either a runner registration token or a
|
||||
personal access token (PAT).
|
||||
The file should contain exactly one line with the token without any newline.
|
||||
If a registration token is given, it can be used to re-register a runner of the same
|
||||
name but is time-limited. If the file contains a PAT, the service creates a new
|
||||
registration token on startup as needed. Make sure the PAT has a scope of
|
||||
`admin:org` for organization-wide registrations or a scope of
|
||||
`repo` for a single repository.
|
||||
|
||||
Changing this option or the file's content triggers a new runner registration.
|
||||
'';
|
||||
example = "/run/secrets/github-runner/nixos.token";
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
# Same pattern as for `networking.hostName`
|
||||
type = types.strMatching "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
|
||||
description = lib.mdDoc ''
|
||||
Name of the runner to configure. Defaults to the hostname.
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
example = "nixos";
|
||||
default = config.networking.hostName;
|
||||
defaultText = literalExpression "config.networking.hostName";
|
||||
};
|
||||
|
||||
runnerGroup = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
description = lib.mdDoc ''
|
||||
Name of the runner group to add this runner to (defaults to the default runner group).
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
extraLabels = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
Extra labels in addition to the default (`["self-hosted", "Linux", "X64"]`).
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
example = literalExpression ''[ "nixos" ]'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
replace = mkOption {
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Replace any existing runner with the same name.
|
||||
|
||||
Without this flag, registering a new runner with the same name fails.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
extraPackages = mkOption {
|
||||
type = types.listOf types.package;
|
||||
description = lib.mdDoc ''
|
||||
Extra packages to add to `PATH` of the service to make them available to workflows.
|
||||
'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
description = lib.mdDoc ''
|
||||
Which github-runner derivation to use.
|
||||
'';
|
||||
default = pkgs.github-runner;
|
||||
defaultText = literalExpression "pkgs.github-runner";
|
||||
};
|
||||
|
||||
ephemeral = mkOption {
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
If enabled, causes the following behavior:
|
||||
|
||||
- Passes the `--ephemeral` flag to the runner configuration script
|
||||
- De-registers and stops the runner with GitHub after it has processed one job
|
||||
- On stop, systemd wipes the runtime directory (this always happens, even without using the ephemeral option)
|
||||
- Restarts the service after its successful exit
|
||||
- On start, wipes the state directory and configures a new runner
|
||||
|
||||
You should only enable this option if `tokenFile` points to a file which contains a
|
||||
personal access token (PAT). If you're using the option with a registration token, restarting the
|
||||
service will fail as soon as the registration token expired.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
};
|
||||
options.services.github-runner = import ./github-runner/options.nix (args // {
|
||||
# Users don't need to specify options.services.github-runner.name; it will default
|
||||
# to the hostname.
|
||||
includeNameDefault = true;
|
||||
});
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
warnings = optionals (isStorePath cfg.tokenFile) [
|
||||
''
|
||||
`services.github-runner.tokenFile` points to the Nix store and, therefore, is world-readable.
|
||||
Consider using a path outside of the Nix store to keep the token private.
|
||||
''
|
||||
];
|
||||
|
||||
systemd.services.${svcName} = {
|
||||
description = "GitHub Actions runner";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
|
||||
environment = {
|
||||
HOME = runtimeDir;
|
||||
RUNNER_ROOT = stateDir;
|
||||
};
|
||||
|
||||
path = (with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
git
|
||||
gnutar
|
||||
gzip
|
||||
]) ++ [
|
||||
config.nix.package
|
||||
] ++ cfg.extraPackages;
|
||||
|
||||
serviceConfig = rec {
|
||||
ExecStart = "${cfg.package}/bin/Runner.Listener run --startuptype service";
|
||||
|
||||
# Does the following, sequentially:
|
||||
# - If the module configuration or the token has changed, purge the state directory,
|
||||
# and create the current and the new token file with the contents of the configured
|
||||
# token. While both files have the same content, only the later is accessible by
|
||||
# the service user.
|
||||
# - Configure the runner using the new token file. When finished, delete it.
|
||||
# - Set up the directory structure by creating the necessary symlinks.
|
||||
ExecStartPre =
|
||||
let
|
||||
# Wrapper script which expects the full path of the state, runtime and logs
|
||||
# directory as arguments. Overrides the respective systemd variables to provide
|
||||
# unambiguous directory names. This becomes relevant, for example, if the
|
||||
# caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory=
|
||||
# to contain more than one directory. This causes systemd to set the respective
|
||||
# environment variables with the path of all of the given directories, separated
|
||||
# by a colon.
|
||||
writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
|
||||
set -euo pipefail
|
||||
|
||||
STATE_DIRECTORY="$1"
|
||||
RUNTIME_DIRECTORY="$2"
|
||||
LOGS_DIRECTORY="$3"
|
||||
|
||||
${lines}
|
||||
'';
|
||||
currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
|
||||
runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" ] cfg;
|
||||
newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
|
||||
newConfigTokenFilename = ".new-token";
|
||||
runnerCredFiles = [
|
||||
".credentials"
|
||||
".credentials_rsaparams"
|
||||
".runner"
|
||||
];
|
||||
unconfigureRunner = writeScript "unconfigure" ''
|
||||
differs=
|
||||
|
||||
if [[ "$(ls -A "$STATE_DIRECTORY")" ]]; then
|
||||
# State directory is not empty
|
||||
# Set `differs = 1` if current and new runner config differ or if `currentConfigPath` does not exist
|
||||
${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 || differs=1
|
||||
# Also trigger a registration if the token content changed
|
||||
${pkgs.diffutils}/bin/diff -q \
|
||||
"$STATE_DIRECTORY"/${currentConfigTokenFilename} \
|
||||
${escapeShellArg cfg.tokenFile} \
|
||||
>/dev/null 2>&1 || differs=1
|
||||
# If .credentials does not exist, assume a previous run de-registered the runner on stop (ephemeral mode)
|
||||
[[ ! -f "$STATE_DIRECTORY/.credentials" ]] && differs=1
|
||||
fi
|
||||
|
||||
if [[ -n "$differs" ]]; then
|
||||
echo "Config has changed, removing old runner state."
|
||||
# In ephemeral mode, the runner deletes the `.credentials` file after de-registering it with GitHub
|
||||
[[ -f "$STATE_DIRECTORY/.credentials" ]] && echo "The old runner will still appear in the GitHub Actions UI." \
|
||||
"You have to remove it manually."
|
||||
find "$STATE_DIRECTORY/" -mindepth 1 -delete
|
||||
|
||||
# Copy the configured token file to the state dir and allow the service user to read the file
|
||||
install --mode=666 ${escapeShellArg cfg.tokenFile} "$STATE_DIRECTORY/${newConfigTokenFilename}"
|
||||
# Also copy current file to allow for a diff on the next start
|
||||
install --mode=600 ${escapeShellArg cfg.tokenFile} "$STATE_DIRECTORY/${currentConfigTokenFilename}"
|
||||
fi
|
||||
'';
|
||||
configureRunner = writeScript "configure" ''
|
||||
if [[ -e "$STATE_DIRECTORY/${newConfigTokenFilename}" ]]; then
|
||||
echo "Configuring GitHub Actions Runner"
|
||||
|
||||
args=(
|
||||
--unattended
|
||||
--disableupdate
|
||||
--work "$RUNTIME_DIRECTORY"
|
||||
--url ${escapeShellArg cfg.url}
|
||||
--labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)}
|
||||
--name ${escapeShellArg cfg.name}
|
||||
${optionalString cfg.replace "--replace"}
|
||||
${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
|
||||
${optionalString cfg.ephemeral "--ephemeral"}
|
||||
)
|
||||
|
||||
# If the token file contains a PAT (i.e., it starts with "ghp_"), we have to use the --pat option,
|
||||
# if it is not a PAT, we assume it contains a registration token and use the --token option
|
||||
token=$(<"$STATE_DIRECTORY/${newConfigTokenFilename}")
|
||||
if [[ "$token" =~ ^ghp_* ]]; then
|
||||
args+=(--pat "$token")
|
||||
else
|
||||
args+=(--token "$token")
|
||||
fi
|
||||
|
||||
${cfg.package}/bin/config.sh "''${args[@]}"
|
||||
|
||||
# Move the automatically created _diag dir to the logs dir
|
||||
mkdir -p "$STATE_DIRECTORY/_diag"
|
||||
cp -r "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
|
||||
rm -rf "$STATE_DIRECTORY/_diag/"
|
||||
|
||||
# Cleanup token from config
|
||||
rm "$STATE_DIRECTORY/${newConfigTokenFilename}"
|
||||
|
||||
# Symlink to new config
|
||||
ln -s '${newConfigPath}' "${currentConfigPath}"
|
||||
fi
|
||||
'';
|
||||
setupRuntimeDir = writeScript "setup-runtime-dirs" ''
|
||||
# Link _diag dir
|
||||
ln -s "$LOGS_DIRECTORY" "$RUNTIME_DIRECTORY/_diag"
|
||||
|
||||
# Link the runner credentials to the runtime dir
|
||||
ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$RUNTIME_DIRECTORY/"
|
||||
'';
|
||||
in
|
||||
map (x: "${x} ${escapeShellArgs [ stateDir runtimeDir logsDir ]}") [
|
||||
"+${unconfigureRunner}" # runs as root
|
||||
configureRunner
|
||||
setupRuntimeDir
|
||||
];
|
||||
|
||||
# If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner)
|
||||
# to trigger a fresh registration.
|
||||
Restart = if cfg.ephemeral then "on-success" else "no";
|
||||
|
||||
# Contains _diag
|
||||
LogsDirectory = [ systemdDir ];
|
||||
# Default RUNNER_ROOT which contains ephemeral Runner data
|
||||
RuntimeDirectory = [ systemdDir ];
|
||||
# Home of persistent runner data, e.g., credentials
|
||||
StateDirectory = [ systemdDir ];
|
||||
StateDirectoryMode = "0700";
|
||||
WorkingDirectory = runtimeDir;
|
||||
|
||||
InaccessiblePaths = [
|
||||
# Token file path given in the configuration
|
||||
cfg.tokenFile
|
||||
# Token file in the state directory
|
||||
"${stateDir}/${currentConfigTokenFilename}"
|
||||
];
|
||||
|
||||
# By default, use a dynamically allocated user
|
||||
DynamicUser = true;
|
||||
|
||||
KillSignal = "SIGINT";
|
||||
|
||||
# Hardening (may overlap with DynamicUser=)
|
||||
# The following options are only for optimizing:
|
||||
# systemd-analyze security github-runner
|
||||
AmbientCapabilities = "";
|
||||
CapabilityBoundingSet = "";
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
UMask = "0066";
|
||||
ProtectProc = "invisible";
|
||||
SystemCallFilter = [
|
||||
"~@clock"
|
||||
"~@cpu-emulation"
|
||||
"~@module"
|
||||
"~@mount"
|
||||
"~@obsolete"
|
||||
"~@raw-io"
|
||||
"~@reboot"
|
||||
"~capset"
|
||||
"~setdomainname"
|
||||
"~sethostname"
|
||||
];
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
||||
|
||||
# Needs network access
|
||||
PrivateNetwork = false;
|
||||
# Cannot be true due to Node
|
||||
MemoryDenyWriteExecute = false;
|
||||
|
||||
# The more restrictive "pid" option makes `nix` commands in CI emit
|
||||
# "GC Warning: Couldn't read /proc/stat"
|
||||
# You may want to set this to "pid" if not using `nix` commands
|
||||
ProcSubset = "all";
|
||||
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
|
||||
# ASLR (address space layout randomization) which requires the
|
||||
# `personality` syscall
|
||||
# You may want to set this to `true` if not using coverage tooling on
|
||||
# compiled code
|
||||
LockPersonality = false;
|
||||
};
|
||||
};
|
||||
services.github-runners.${cfg.name} = cfg;
|
||||
};
|
||||
}
|
||||
|
|
173
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix
vendored
Normal file
173
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix
vendored
Normal file
|
@ -0,0 +1,173 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, includeNameDefault
|
||||
, ...
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
example = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable GitHub Actions runner.
|
||||
|
||||
Note: GitHub recommends using self-hosted runners with private repositories only. Learn more here:
|
||||
[About self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners).
|
||||
'';
|
||||
type = lib.types.bool;
|
||||
};
|
||||
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
description = lib.mdDoc ''
|
||||
Repository to add the runner to.
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
|
||||
IMPORTANT: If your token is org-wide (not per repository), you need to
|
||||
provide a github org link, not a single repository, so do it like this
|
||||
`https://github.com/nixos`, not like this
|
||||
`https://github.com/nixos/nixpkgs`.
|
||||
Otherwise, you are going to get a `404 NotFound`
|
||||
from `POST https://api.github.com/actions/runner-registration`
|
||||
in the configure script.
|
||||
'';
|
||||
example = "https://github.com/nixos/nixpkgs";
|
||||
};
|
||||
|
||||
tokenFile = mkOption {
|
||||
type = types.path;
|
||||
description = lib.mdDoc ''
|
||||
The full path to a file which contains either a runner registration token or a
|
||||
(fine-grained) personal access token (PAT).
|
||||
The file should contain exactly one line with the token without any newline.
|
||||
If a registration token is given, it can be used to re-register a runner of the same
|
||||
name but is time-limited. If the file contains a PAT, the service creates a new
|
||||
registration token on startup as needed. Make sure the PAT has a scope of
|
||||
`admin:org` for organization-wide registrations or a scope of
|
||||
`repo` for a single repository. Fine-grained PATs need read and write permission
|
||||
to the "Adminstration" resources.
|
||||
|
||||
Changing this option or the file's content triggers a new runner registration.
|
||||
'';
|
||||
example = "/run/secrets/github-runner/nixos.token";
|
||||
};
|
||||
|
||||
name = let
|
||||
# Same pattern as for `networking.hostName`
|
||||
baseType = types.strMatching "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
|
||||
in mkOption {
|
||||
type = if includeNameDefault then baseType else types.nullOr baseType;
|
||||
description = lib.mdDoc ''
|
||||
Name of the runner to configure. Defaults to the hostname.
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
example = "nixos";
|
||||
} // (if includeNameDefault then {
|
||||
default = config.networking.hostName;
|
||||
defaultText = literalExpression "config.networking.hostName";
|
||||
} else {
|
||||
default = null;
|
||||
});
|
||||
|
||||
runnerGroup = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
description = lib.mdDoc ''
|
||||
Name of the runner group to add this runner to (defaults to the default runner group).
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
default = null;
|
||||
};
|
||||
|
||||
extraLabels = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
Extra labels in addition to the default (`["self-hosted", "Linux", "X64"]`).
|
||||
|
||||
Changing this option triggers a new runner registration.
|
||||
'';
|
||||
example = literalExpression ''[ "nixos" ]'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
replace = mkOption {
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Replace any existing runner with the same name.
|
||||
|
||||
Without this flag, registering a new runner with the same name fails.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
extraPackages = mkOption {
|
||||
type = types.listOf types.package;
|
||||
description = lib.mdDoc ''
|
||||
Extra packages to add to `PATH` of the service to make them available to workflows.
|
||||
'';
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
extraEnvironment = mkOption {
|
||||
type = types.attrs;
|
||||
description = lib.mdDoc ''
|
||||
Extra environment variables to set for the runner, as an attrset.
|
||||
'';
|
||||
example = {
|
||||
GIT_CONFIG = "/path/to/git/config";
|
||||
};
|
||||
default = {};
|
||||
};
|
||||
|
||||
serviceOverrides = mkOption {
|
||||
type = types.attrs;
|
||||
description = lib.mdDoc ''
|
||||
Overrides for the systemd service. Can be used to adjust the sandboxing options.
|
||||
'';
|
||||
example = {
|
||||
ProtectHome = false;
|
||||
};
|
||||
default = {};
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
description = lib.mdDoc ''
|
||||
Which github-runner derivation to use.
|
||||
'';
|
||||
default = pkgs.github-runner;
|
||||
defaultText = literalExpression "pkgs.github-runner";
|
||||
};
|
||||
|
||||
ephemeral = mkOption {
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
If enabled, causes the following behavior:
|
||||
|
||||
- Passes the `--ephemeral` flag to the runner configuration script
|
||||
- De-registers and stops the runner with GitHub after it has processed one job
|
||||
- On stop, systemd wipes the runtime directory (this always happens, even without using the ephemeral option)
|
||||
- Restarts the service after its successful exit
|
||||
- On start, wipes the state directory and configures a new runner
|
||||
|
||||
You should only enable this option if `tokenFile` points to a file which contains a
|
||||
personal access token (PAT). If you're using the option with a registration token, restarting the
|
||||
service will fail as soon as the registration token expired.
|
||||
'';
|
||||
default = false;
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
description = lib.mdDoc ''
|
||||
User under which to run the service. If null, will use a systemd dynamic user.
|
||||
'';
|
||||
default = null;
|
||||
defaultText = literalExpression "username";
|
||||
};
|
||||
}
|
254
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix
vendored
Normal file
254
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix
vendored
Normal file
|
@ -0,0 +1,254 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
|
||||
, cfg ? config.services.github-runner
|
||||
, svcName
|
||||
|
||||
, systemdDir ? "${svcName}/${cfg.name}"
|
||||
# %t: Runtime directory root (usually /run); see systemd.unit(5)
|
||||
, runtimeDir ? "%t/${systemdDir}"
|
||||
# %S: State directory root (usually /var/lib); see systemd.unit(5)
|
||||
, stateDir ? "%S/${systemdDir}"
|
||||
# %L: Log directory root (usually /var/log); see systemd.unit(5)
|
||||
, logsDir ? "%L/${systemdDir}"
|
||||
# Name of file stored in service state directory
|
||||
, currentConfigTokenFilename ? ".current-token"
|
||||
|
||||
, ...
|
||||
}:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
description = "GitHub Actions runner";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network.target" "network-online.target" ];
|
||||
|
||||
environment = {
|
||||
HOME = runtimeDir;
|
||||
RUNNER_ROOT = stateDir;
|
||||
} // cfg.extraEnvironment;
|
||||
|
||||
path = (with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
git
|
||||
gnutar
|
||||
gzip
|
||||
]) ++ [
|
||||
config.nix.package
|
||||
] ++ cfg.extraPackages;
|
||||
|
||||
serviceConfig = rec {
|
||||
ExecStart = "${cfg.package}/bin/Runner.Listener run --startuptype service";
|
||||
|
||||
# Does the following, sequentially:
|
||||
# - If the module configuration or the token has changed, purge the state directory,
|
||||
# and create the current and the new token file with the contents of the configured
|
||||
# token. While both files have the same content, only the later is accessible by
|
||||
# the service user.
|
||||
# - Configure the runner using the new token file. When finished, delete it.
|
||||
# - Set up the directory structure by creating the necessary symlinks.
|
||||
ExecStartPre =
|
||||
let
|
||||
# Wrapper script which expects the full path of the state, runtime and logs
|
||||
# directory as arguments. Overrides the respective systemd variables to provide
|
||||
# unambiguous directory names. This becomes relevant, for example, if the
|
||||
# caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory=
|
||||
# to contain more than one directory. This causes systemd to set the respective
|
||||
# environment variables with the path of all of the given directories, separated
|
||||
# by a colon.
|
||||
writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
|
||||
set -euo pipefail
|
||||
|
||||
STATE_DIRECTORY="$1"
|
||||
RUNTIME_DIRECTORY="$2"
|
||||
LOGS_DIRECTORY="$3"
|
||||
|
||||
${lines}
|
||||
'';
|
||||
runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" ] cfg;
|
||||
newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
|
||||
currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
|
||||
newConfigTokenPath= "$STATE_DIRECTORY/.new-token";
|
||||
currentConfigTokenPath = "$STATE_DIRECTORY/${currentConfigTokenFilename}";
|
||||
|
||||
runnerCredFiles = [
|
||||
".credentials"
|
||||
".credentials_rsaparams"
|
||||
".runner"
|
||||
];
|
||||
unconfigureRunner = writeScript "unconfigure" ''
|
||||
copy_tokens() {
|
||||
# Copy the configured token file to the state dir and allow the service user to read the file
|
||||
install --mode=666 ${escapeShellArg cfg.tokenFile} "${newConfigTokenPath}"
|
||||
# Also copy current file to allow for a diff on the next start
|
||||
install --mode=600 ${escapeShellArg cfg.tokenFile} "${currentConfigTokenPath}"
|
||||
}
|
||||
clean_state() {
|
||||
find "$STATE_DIRECTORY/" -mindepth 1 -delete
|
||||
copy_tokens
|
||||
}
|
||||
diff_config() {
|
||||
changed=0
|
||||
# Check for module config changes
|
||||
[[ -f "${currentConfigPath}" ]] \
|
||||
&& ${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 \
|
||||
|| changed=1
|
||||
# Also check the content of the token file
|
||||
[[ -f "${currentConfigTokenPath}" ]] \
|
||||
&& ${pkgs.diffutils}/bin/diff -q "${currentConfigTokenPath}" ${escapeShellArg cfg.tokenFile} >/dev/null 2>&1 \
|
||||
|| changed=1
|
||||
# If the config has changed, remove old state and copy tokens
|
||||
if [[ "$changed" -eq 1 ]]; then
|
||||
echo "Config has changed, removing old runner state."
|
||||
echo "The old runner will still appear in the GitHub Actions UI." \
|
||||
"You have to remove it manually."
|
||||
clean_state
|
||||
fi
|
||||
}
|
||||
if [[ "${optionalString cfg.ephemeral "1"}" ]]; then
|
||||
# In ephemeral mode, we always want to start with a clean state
|
||||
clean_state
|
||||
elif [[ "$(ls -A "$STATE_DIRECTORY")" ]]; then
|
||||
# There are state files from a previous run; diff them to decide if we need a new registration
|
||||
diff_config
|
||||
else
|
||||
# The state directory is entirely empty which indicates a first start
|
||||
copy_tokens
|
||||
fi '';
|
||||
configureRunner = writeScript "configure" ''
|
||||
if [[ -e "${newConfigTokenPath}" ]]; then
|
||||
echo "Configuring GitHub Actions Runner"
|
||||
args=(
|
||||
--unattended
|
||||
--disableupdate
|
||||
--work "$RUNTIME_DIRECTORY"
|
||||
--url ${escapeShellArg cfg.url}
|
||||
--labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)}
|
||||
--name ${escapeShellArg cfg.name}
|
||||
${optionalString cfg.replace "--replace"}
|
||||
${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
|
||||
${optionalString cfg.ephemeral "--ephemeral"}
|
||||
)
|
||||
# If the token file contains a PAT (i.e., it starts with "ghp_" or "github_pat_"), we have to use the --pat option,
|
||||
# if it is not a PAT, we assume it contains a registration token and use the --token option
|
||||
token=$(<"${newConfigTokenPath}")
|
||||
if [[ "$token" =~ ^ghp_* ]] || [[ "$token" =~ ^github_pat_* ]]; then
|
||||
args+=(--pat "$token")
|
||||
else
|
||||
args+=(--token "$token")
|
||||
fi
|
||||
${cfg.package}/bin/config.sh "''${args[@]}"
|
||||
# Move the automatically created _diag dir to the logs dir
|
||||
mkdir -p "$STATE_DIRECTORY/_diag"
|
||||
cp -r "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
|
||||
rm -rf "$STATE_DIRECTORY/_diag/"
|
||||
# Cleanup token from config
|
||||
rm "${newConfigTokenPath}"
|
||||
# Symlink to new config
|
||||
ln -s '${newConfigPath}' "${currentConfigPath}"
|
||||
fi
|
||||
'';
|
||||
setupRuntimeDir = writeScript "setup-runtime-dirs" ''
|
||||
# Link _diag dir
|
||||
ln -s "$LOGS_DIRECTORY" "$RUNTIME_DIRECTORY/_diag"
|
||||
|
||||
# Link the runner credentials to the runtime dir
|
||||
ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$RUNTIME_DIRECTORY/"
|
||||
'';
|
||||
in
|
||||
map (x: "${x} ${escapeShellArgs [ stateDir runtimeDir logsDir ]}") [
|
||||
"+${unconfigureRunner}" # runs as root
|
||||
configureRunner
|
||||
setupRuntimeDir
|
||||
];
|
||||
|
||||
# If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner)
|
||||
# to trigger a fresh registration.
|
||||
Restart = if cfg.ephemeral then "on-success" else "no";
|
||||
|
||||
# Contains _diag
|
||||
LogsDirectory = [ systemdDir ];
|
||||
# Default RUNNER_ROOT which contains ephemeral Runner data
|
||||
RuntimeDirectory = [ systemdDir ];
|
||||
# Home of persistent runner data, e.g., credentials
|
||||
StateDirectory = [ systemdDir ];
|
||||
StateDirectoryMode = "0700";
|
||||
WorkingDirectory = runtimeDir;
|
||||
|
||||
InaccessiblePaths = [
|
||||
# Token file path given in the configuration, if visible to the service
|
||||
"-${cfg.tokenFile}"
|
||||
# Token file in the state directory
|
||||
"${stateDir}/${currentConfigTokenFilename}"
|
||||
];
|
||||
|
||||
KillSignal = "SIGINT";
|
||||
|
||||
# Hardening (may overlap with DynamicUser=)
|
||||
# The following options are only for optimizing:
|
||||
# systemd-analyze security github-runner
|
||||
AmbientCapabilities = "";
|
||||
CapabilityBoundingSet = "";
|
||||
# ProtectClock= adds DeviceAllow=char-rtc r
|
||||
DeviceAllow = "";
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
PrivateTmp = true;
|
||||
PrivateUsers = true;
|
||||
ProtectClock = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectSystem = "strict";
|
||||
RemoveIPC = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
UMask = "0066";
|
||||
ProtectProc = "invisible";
|
||||
SystemCallFilter = [
|
||||
"~@clock"
|
||||
"~@cpu-emulation"
|
||||
"~@module"
|
||||
"~@mount"
|
||||
"~@obsolete"
|
||||
"~@raw-io"
|
||||
"~@reboot"
|
||||
"~capset"
|
||||
"~setdomainname"
|
||||
"~sethostname"
|
||||
];
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
||||
|
||||
# Needs network access
|
||||
PrivateNetwork = false;
|
||||
# Cannot be true due to Node
|
||||
MemoryDenyWriteExecute = false;
|
||||
|
||||
# The more restrictive "pid" option makes `nix` commands in CI emit
|
||||
# "GC Warning: Couldn't read /proc/stat"
|
||||
# You may want to set this to "pid" if not using `nix` commands
|
||||
ProcSubset = "all";
|
||||
# Coverage programs for compiled code such as `cargo-tarpaulin` disable
|
||||
# ASLR (address space layout randomization) which requires the
|
||||
# `personality` syscall
|
||||
# You may want to set this to `true` if not using coverage tooling on
|
||||
# compiled code
|
||||
LockPersonality = false;
|
||||
|
||||
# Note that this has some interactions with the User setting; so you may
|
||||
# want to consult the systemd docs if using both.
|
||||
DynamicUser = true;
|
||||
} // (
|
||||
lib.optionalAttrs (cfg.user != null) { User = cfg.user; }
|
||||
) // cfg.serviceOverrides;
|
||||
}
|
56
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix
vendored
Normal file
56
third_party/nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
{ config
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
}@args:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.github-runners;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
options.services.github-runners = mkOption {
|
||||
default = {};
|
||||
type = with types; attrsOf (submodule { options = import ./github-runner/options.nix (args // {
|
||||
# services.github-runners.${name}.name doesn't have a default; it falls back to ${name} below.
|
||||
includeNameDefault = false;
|
||||
}); });
|
||||
example = {
|
||||
runner1 = {
|
||||
enable = true;
|
||||
url = "https://github.com/owner/repo";
|
||||
name = "runner1";
|
||||
tokenFile = "/secrets/token1";
|
||||
};
|
||||
|
||||
runner2 = {
|
||||
enable = true;
|
||||
url = "https://github.com/owner/repo";
|
||||
name = "runner2";
|
||||
tokenFile = "/secrets/token2";
|
||||
};
|
||||
};
|
||||
description = lib.mdDoc ''
|
||||
Multiple GitHub Runners.
|
||||
'';
|
||||
};
|
||||
|
||||
config = {
|
||||
systemd.services = flip mapAttrs' cfg (n: v:
|
||||
let
|
||||
svcName = "github-runner-${n}";
|
||||
in
|
||||
nameValuePair svcName
|
||||
(import ./github-runner/service.nix (args // {
|
||||
inherit svcName;
|
||||
cfg = v // {
|
||||
name = if v.name != null then v.name else n;
|
||||
};
|
||||
systemdDir = "github-runner/${n}";
|
||||
}))
|
||||
);
|
||||
};
|
||||
}
|
|
@ -453,6 +453,43 @@ in
|
|||
};
|
||||
});
|
||||
};
|
||||
clear-docker-cache = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Whether to periodically prune gitlab runner's Docker resources. If
|
||||
enabled, a systemd timer will run {command}`clear-docker-cache` as
|
||||
specified by the `dates` option.
|
||||
'';
|
||||
};
|
||||
|
||||
flags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
example = [ "prune" ];
|
||||
description = lib.mdDoc ''
|
||||
Any additional flags passed to {command}`clear-docker-cache`.
|
||||
'';
|
||||
};
|
||||
|
||||
dates = mkOption {
|
||||
default = "weekly";
|
||||
type = types.str;
|
||||
description = lib.mdDoc ''
|
||||
Specification (in the format described by
|
||||
{manpage}`systemd.time(7)`) of the time at
|
||||
which the prune will occur.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
default = config.virtualisation.docker.package;
|
||||
defaultText = literalExpression "config.virtualisation.docker.package";
|
||||
example = literalExpression "pkgs.docker";
|
||||
description = lib.mdDoc "Docker package to use for clearing up docker cache.";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = mkIf cfg.enable {
|
||||
warnings = (mapAttrsToList
|
||||
|
@ -497,6 +534,22 @@ in
|
|||
KillMode = "process";
|
||||
};
|
||||
};
|
||||
# Enable periodic clear-docker-cache script
|
||||
systemd.services.gitlab-runner-clear-docker-cache = {
|
||||
description = "Prune gitlab-runner docker resources";
|
||||
restartIfChanged = false;
|
||||
unitConfig.X-StopOnRemoval = false;
|
||||
|
||||
serviceConfig.Type = "oneshot";
|
||||
|
||||
path = [ cfg.clear-docker-cache.package pkgs.gawk ];
|
||||
|
||||
script = ''
|
||||
${pkgs.gitlab-runner}/bin/clear-docker-cache ${toString cfg.clear-docker-cache.flags}
|
||||
'';
|
||||
|
||||
startAt = optional cfg.clear-docker-cache.enable cfg.clear-docker-cache.dates;
|
||||
};
|
||||
# Enable docker if `docker` executor is used in any service
|
||||
virtualisation.docker.enable = mkIf (
|
||||
any (s: s.executor == "docker") (attrValues cfg.services)
|
||||
|
|
|
@ -42,7 +42,7 @@ let
|
|||
makeWrapperArgs = concatStringsSep " " (mapAttrsToList (key: value: "--set \"${key}\" \"${value}\"") hydraEnv);
|
||||
in pkgs.buildEnv rec {
|
||||
name = "hydra-env";
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
paths = [ cfg.package ];
|
||||
|
||||
postBuild = ''
|
||||
|
|
|
@ -1,41 +0,0 @@
|
|||
# Chrome GNOME Shell native host connector.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
meta = {
|
||||
maintainers = teams.gnome.members;
|
||||
};
|
||||
|
||||
# Added 2021-05-07
|
||||
imports = [
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "gnome3" "chrome-gnome-shell" "enable" ]
|
||||
[ "services" "gnome" "chrome-gnome-shell" "enable" ]
|
||||
)
|
||||
];
|
||||
|
||||
###### interface
|
||||
options = {
|
||||
services.gnome.chrome-gnome-shell.enable = mkEnableOption (lib.mdDoc ''
|
||||
Chrome GNOME Shell native host connector, a DBus service
|
||||
allowing to install GNOME Shell extensions from a web browser.
|
||||
'');
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
config = mkIf config.services.gnome.chrome-gnome-shell.enable {
|
||||
environment.etc = {
|
||||
"chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.chrome-gnome-shell}/etc/chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
|
||||
"opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.chrome-gnome-shell}/etc/opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.chrome-gnome-shell ];
|
||||
|
||||
services.dbus.packages = [ pkgs.chrome-gnome-shell ];
|
||||
|
||||
nixpkgs.config.firefox.enableGnomeExtensions = true;
|
||||
};
|
||||
}
|
47
third_party/nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix
vendored
Normal file
47
third_party/nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) mdDoc mkEnableOption mkIf mkRenamedOptionModule teams;
|
||||
in
|
||||
|
||||
{
|
||||
meta = {
|
||||
maintainers = teams.gnome.members;
|
||||
};
|
||||
|
||||
imports = [
|
||||
# Added 2021-05-07
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "gnome3" "chrome-gnome-shell" "enable" ]
|
||||
[ "services" "gnome" "gnome-browser-connector" "enable" ]
|
||||
)
|
||||
# Added 2022-07-25
|
||||
(mkRenamedOptionModule
|
||||
[ "services" "gnome" "chrome-gnome-shell" "enable" ]
|
||||
[ "services" "gnome" "gnome-browser-connector" "enable" ]
|
||||
)
|
||||
];
|
||||
|
||||
options = {
|
||||
services.gnome.gnome-browser-connector.enable = mkEnableOption (mdDoc ''
|
||||
Native host connector for the GNOME Shell browser extension, a DBus service
|
||||
allowing to install GNOME Shell extensions from a web browser.
|
||||
'');
|
||||
};
|
||||
|
||||
config = mkIf config.services.gnome.gnome-browser-connector.enable {
|
||||
environment.etc = {
|
||||
"chromium/native-messaging-hosts/org.gnome.browser_connector.json".source = "${pkgs.gnome-browser-connector}/etc/chromium/native-messaging-hosts/org.gnome.browser_connector.json";
|
||||
"opt/chrome/native-messaging-hosts/org.gnome.browser_connector.json".source = "${pkgs.gnome-browser-connector}/etc/opt/chrome/native-messaging-hosts/org.gnome.browser_connector.json";
|
||||
# Legacy paths.
|
||||
"chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.gnome-browser-connector}/etc/chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
|
||||
"opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.gnome-browser-connector}/etc/opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.gnome-browser-connector ];
|
||||
|
||||
services.dbus.packages = [ pkgs.gnome-browser-connector ];
|
||||
|
||||
nixpkgs.config.firefox.enableGnomeExtensions = true;
|
||||
};
|
||||
}
|
|
@ -33,18 +33,26 @@ let
|
|||
mkEtcFile = p: nameValuePair (mkName p) { source = p; };
|
||||
in listToAttrs (map mkEtcFile cfg.extraTrustedKeys);
|
||||
|
||||
# We cannot include the file in $out and rely on filesInstalledToEtc
|
||||
# to install it because it would create a cyclic dependency between
|
||||
# the outputs. We also need to enable the remote,
|
||||
# which should not be done by default.
|
||||
testRemote = if cfg.enableTestRemote then {
|
||||
"fwupd/remotes.d/fwupd-tests.conf" = {
|
||||
source = pkgs.runCommand "fwupd-tests-enabled.conf" {} ''
|
||||
enableRemote = base: remote: {
|
||||
"fwupd/remotes.d/${remote}.conf" = {
|
||||
source = pkgs.runCommand "${remote}-enabled.conf" {} ''
|
||||
sed "s,^Enabled=false,Enabled=true," \
|
||||
"${cfg.package.installedTests}/etc/fwupd/remotes.d/fwupd-tests.conf" > "$out"
|
||||
"${base}/etc/fwupd/remotes.d/${remote}.conf" > "$out"
|
||||
'';
|
||||
};
|
||||
} else {};
|
||||
};
|
||||
remotes = (foldl'
|
||||
(configFiles: remote: configFiles // (enableRemote cfg.package remote))
|
||||
{}
|
||||
cfg.extraRemotes
|
||||
) // (
|
||||
# We cannot include the file in $out and rely on filesInstalledToEtc
|
||||
# to install it because it would create a cyclic dependency between
|
||||
# the outputs. We also need to enable the remote,
|
||||
# which should not be done by default.
|
||||
if cfg.enableTestRemote then (enableRemote cfg.package.installedTests "fwupd-tests") else {}
|
||||
);
|
||||
|
||||
in {
|
||||
|
||||
###### interface
|
||||
|
@ -86,6 +94,15 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
extraRemotes = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
example = [ "lvfs-testing" ];
|
||||
description = lib.mdDoc ''
|
||||
Enables extra remotes in fwupd. See `/etc/fwupd/remotes.d`.
|
||||
'';
|
||||
};
|
||||
|
||||
enableTestRemote = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -119,7 +136,7 @@ in {
|
|||
environment.systemPackages = [ cfg.package ];
|
||||
|
||||
# customEtc overrides some files from the package
|
||||
environment.etc = originalEtc // customEtc // extraTrustedKeys // testRemote;
|
||||
environment.etc = originalEtc // customEtc // extraTrustedKeys // remotes;
|
||||
|
||||
services.dbus.packages = [ cfg.package ];
|
||||
|
||||
|
|
|
@ -171,10 +171,10 @@ let
|
|||
mv etc/udev/hwdb.bin $out
|
||||
'';
|
||||
|
||||
compressFirmware = if config.boot.kernelPackages.kernelAtLeast "5.3" then
|
||||
pkgs.compressFirmwareXz
|
||||
compressFirmware = firmware: if (config.boot.kernelPackages.kernelAtLeast "5.3" && (firmware.compressFirmware or true)) then
|
||||
pkgs.compressFirmwareXz firmware
|
||||
else
|
||||
id;
|
||||
id firmware;
|
||||
|
||||
# Udev has a 512-character limit for ENV{PATH}, so create a symlink
|
||||
# tree to work around this.
|
||||
|
|
|
@ -162,7 +162,7 @@ in
|
|||
|
||||
environment.systemPackages = with pkgs; [
|
||||
(runCommand "etebase-server" {
|
||||
buildInputs = [ makeWrapper ];
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
} ''
|
||||
makeWrapper ${pythonEnv}/bin/etebase-server \
|
||||
$out/bin/etebase-server \
|
||||
|
|
|
@ -6,6 +6,9 @@ let
|
|||
cfg = config.services.gitlab;
|
||||
opt = options.services.gitlab;
|
||||
|
||||
toml = pkgs.formats.toml {};
|
||||
yaml = pkgs.formats.yaml {};
|
||||
|
||||
ruby = cfg.packages.gitlab.ruby;
|
||||
|
||||
postgresqlPackage = if config.services.postgresql.enable then
|
||||
|
@ -89,17 +92,18 @@ let
|
|||
repos_path = "${cfg.statePath}/repositories";
|
||||
secret_file = "${cfg.statePath}/gitlab_shell_secret";
|
||||
log_file = "${cfg.statePath}/log/gitlab-shell.log";
|
||||
redis = {
|
||||
bin = "${pkgs.redis}/bin/redis-cli";
|
||||
host = "127.0.0.1";
|
||||
port = config.services.redis.servers.gitlab.port;
|
||||
database = 0;
|
||||
namespace = "resque:gitlab";
|
||||
};
|
||||
};
|
||||
|
||||
redisConfig.production.url = cfg.redisUrl;
|
||||
|
||||
cableYml = yaml.generate "cable.yml" {
|
||||
production = {
|
||||
adapter = "redis";
|
||||
url = cfg.redisUrl;
|
||||
channel_prefix = "gitlab_production";
|
||||
};
|
||||
};
|
||||
|
||||
pagesArgs = [
|
||||
"-pages-domain" gitlabConfig.production.pages.host
|
||||
"-pages-root" "${gitlabConfig.production.shared.path}/pages"
|
||||
|
@ -188,16 +192,27 @@ let
|
|||
MALLOC_ARENA_MAX = "2";
|
||||
} // cfg.extraEnv;
|
||||
|
||||
runtimeDeps = with pkgs; [
|
||||
nodejs
|
||||
gzip
|
||||
git
|
||||
gnutar
|
||||
postgresqlPackage
|
||||
coreutils
|
||||
procps
|
||||
findutils # Needed for gitlab:cleanup:orphan_job_artifact_files
|
||||
];
|
||||
|
||||
gitlab-rake = pkgs.stdenv.mkDerivation {
|
||||
name = "gitlab-rake";
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
dontBuild = true;
|
||||
dontUnpack = true;
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
|
||||
${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
|
||||
--set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar postgresqlPackage pkgs.coreutils pkgs.procps ]}:$PATH' \
|
||||
--set PATH '${lib.makeBinPath runtimeDeps}:$PATH' \
|
||||
--set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
|
||||
--chdir '${cfg.packages.gitlab}/share/gitlab'
|
||||
'';
|
||||
|
@ -205,14 +220,14 @@ let
|
|||
|
||||
gitlab-rails = pkgs.stdenv.mkDerivation {
|
||||
name = "gitlab-rails";
|
||||
buildInputs = [ pkgs.makeWrapper ];
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
dontBuild = true;
|
||||
dontUnpack = true;
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rails $out/bin/gitlab-rails \
|
||||
${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
|
||||
--set PATH '${lib.makeBinPath [ pkgs.nodejs pkgs.gzip pkgs.git pkgs.gnutar postgresqlPackage pkgs.coreutils pkgs.procps ]}:$PATH' \
|
||||
--set PATH '${lib.makeBinPath runtimeDeps}:$PATH' \
|
||||
--chdir '${cfg.packages.gitlab}/share/gitlab'
|
||||
'';
|
||||
};
|
||||
|
@ -468,9 +483,9 @@ in {
|
|||
|
||||
redisUrl = mkOption {
|
||||
type = types.str;
|
||||
default = "redis://localhost:${toString config.services.redis.servers.gitlab.port}/";
|
||||
defaultText = literalExpression ''redis://localhost:''${toString config.services.redis.servers.gitlab.port}/'';
|
||||
description = lib.mdDoc "Redis URL for all GitLab services except gitlab-shell";
|
||||
default = "unix:/run/gitlab/redis.sock";
|
||||
example = "redis://localhost:6379/";
|
||||
description = lib.mdDoc "Redis URL for all GitLab services.";
|
||||
};
|
||||
|
||||
extraGitlabRb = mkOption {
|
||||
|
@ -867,8 +882,41 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
workhorse.config = mkOption {
|
||||
type = toml.type;
|
||||
default = {};
|
||||
example = literalExpression ''
|
||||
{
|
||||
object_storage.provider = "AWS";
|
||||
object_storage.s3 = {
|
||||
aws_access_key_id = "AKIAXXXXXXXXXXXXXXXX";
|
||||
aws_secret_access_key = { _secret = "/var/keys/aws_secret_access_key"; };
|
||||
};
|
||||
};
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
Configuration options to add to Workhorse's configuration
|
||||
file.
|
||||
|
||||
See
|
||||
<https://gitlab.com/gitlab-org/gitlab/-/blob/master/workhorse/config.toml.example>
|
||||
and
|
||||
<https://docs.gitlab.com/ee/development/workhorse/configuration.html>
|
||||
for examples and option documentation.
|
||||
|
||||
Options containing secret data should be set to an attribute
|
||||
set containing the attribute `_secret` - a string pointing
|
||||
to a file containing the value the option should be set
|
||||
to. See the example to get a better picture of this: in the
|
||||
resulting configuration file, the
|
||||
`object_storage.s3.aws_secret_access_key` key will be set to
|
||||
the contents of the {file}`/var/keys/aws_secret_access_key`
|
||||
file.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs;
|
||||
type = yaml.type;
|
||||
default = {};
|
||||
example = literalExpression ''
|
||||
{
|
||||
|
@ -972,8 +1020,9 @@ in {
|
|||
# Redis is required for the sidekiq queue runner.
|
||||
services.redis.servers.gitlab = {
|
||||
enable = mkDefault true;
|
||||
port = mkDefault 31636;
|
||||
bind = mkDefault "127.0.0.1";
|
||||
user = mkDefault cfg.user;
|
||||
unixSocket = mkDefault "/run/gitlab/redis.sock";
|
||||
unixSocketPerm = mkDefault 770;
|
||||
};
|
||||
|
||||
# We use postgres as the main data store.
|
||||
|
@ -1062,6 +1111,7 @@ in {
|
|||
# Ensure Docker Registry launches after the certificate generation job
|
||||
systemd.services.docker-registry = optionalAttrs cfg.registry.enable {
|
||||
wants = [ "gitlab-registry-cert.service" ];
|
||||
after = [ "gitlab-registry-cert.service" ];
|
||||
};
|
||||
|
||||
# Enable Docker Registry, if GitLab-Container Registry is enabled
|
||||
|
@ -1115,6 +1165,7 @@ in {
|
|||
"d ${gitlabConfig.production.shared.path}/lfs-objects 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/packages 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/pages 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/registry 0750 ${cfg.user} ${cfg.group} -"
|
||||
"d ${gitlabConfig.production.shared.path}/terraform_state 0750 ${cfg.user} ${cfg.group} -"
|
||||
"L+ /run/gitlab/config - - - - ${cfg.statePath}/config"
|
||||
"L+ /run/gitlab/log - - - - ${cfg.statePath}/log"
|
||||
|
@ -1168,6 +1219,7 @@ in {
|
|||
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
|
||||
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
|
||||
ln -sf ${extraGitlabRb} ${cfg.statePath}/config/initializers/extra-gitlab.rb
|
||||
ln -sf ${cableYml} ${cfg.statePath}/config/cable.yml
|
||||
|
||||
${cfg.packages.gitlab-shell}/bin/install
|
||||
|
||||
|
@ -1357,6 +1409,7 @@ in {
|
|||
wantedBy = [ "gitlab.target" ];
|
||||
partOf = [ "gitlab.target" ];
|
||||
path = with pkgs; [
|
||||
remarshal
|
||||
exiftool
|
||||
gitPackage
|
||||
gnutar
|
||||
|
@ -1371,6 +1424,17 @@ in {
|
|||
TimeoutSec = "infinity";
|
||||
Restart = "on-failure";
|
||||
WorkingDirectory = gitlabEnv.HOME;
|
||||
ExecStartPre = pkgs.writeShellScript "gitlab-workhorse-pre-start" ''
|
||||
set -o errexit -o pipefail -o nounset
|
||||
shopt -s dotglob nullglob inherit_errexit
|
||||
|
||||
${utils.genJqSecretsReplacementSnippet
|
||||
cfg.workhorse.config
|
||||
"${cfg.statePath}/config/gitlab-workhorse.json"}
|
||||
|
||||
json2toml "${cfg.statePath}/config/gitlab-workhorse.json" "${cfg.statePath}/config/gitlab-workhorse.toml"
|
||||
rm "${cfg.statePath}/config/gitlab-workhorse.json"
|
||||
'';
|
||||
ExecStart =
|
||||
"${cfg.packages.gitlab-workhorse}/bin/workhorse "
|
||||
+ "-listenUmask 0 "
|
||||
|
@ -1378,6 +1442,7 @@ in {
|
|||
+ "-listenAddr /run/gitlab/gitlab-workhorse.socket "
|
||||
+ "-authSocket ${gitlabSocket} "
|
||||
+ "-documentRoot ${cfg.packages.gitlab}/share/gitlab/public "
|
||||
+ "-config ${cfg.statePath}/config/gitlab-workhorse.toml "
|
||||
+ "-secretPath ${cfg.statePath}/.gitlab_workhorse_secret";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -101,6 +101,14 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
description = mkOption {
|
||||
type = types.str;
|
||||
default = "Gitolite user";
|
||||
description = lib.mdDoc ''
|
||||
Gitolite user account's description.
|
||||
'';
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "gitolite";
|
||||
|
@ -145,7 +153,7 @@ in
|
|||
'';
|
||||
|
||||
users.users.${cfg.user} = {
|
||||
description = "Gitolite user";
|
||||
description = cfg.description;
|
||||
home = cfg.dataDir;
|
||||
uid = config.ids.uids.gitolite;
|
||||
group = cfg.group;
|
||||
|
|
100
third_party/nixpkgs/nixos/modules/services/misc/ntfy-sh.nix
vendored
Normal file
100
third_party/nixpkgs/nixos/modules/services/misc/ntfy-sh.nix
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.ntfy-sh;
|
||||
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
in
|
||||
|
||||
{
|
||||
options.services.ntfy-sh = {
|
||||
enable = mkEnableOption (mdDoc "[ntfy-sh](https://ntfy.sh), a push notification service");
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.ntfy-sh;
|
||||
defaultText = literalExpression "pkgs.ntfy-sh";
|
||||
description = mdDoc "The ntfy.sh package to use.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
default = "ntfy-sh";
|
||||
type = types.str;
|
||||
description = lib.mdDoc "User the ntfy-sh server runs under.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
default = "ntfy-sh";
|
||||
type = types.str;
|
||||
description = lib.mdDoc "Primary group of ntfy-sh user.";
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = types.submodule { freeformType = settingsFormat.type; };
|
||||
|
||||
default = { };
|
||||
|
||||
example = literalExpression ''
|
||||
{
|
||||
listen-http = ":8080";
|
||||
}
|
||||
'';
|
||||
|
||||
description = mdDoc ''
|
||||
Configuration for ntfy.sh, supported values are [here](https://ntfy.sh/docs/config/#config-options).
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
configuration = settingsFormat.generate "server.yml" cfg.settings;
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
# to configure access control via the cli
|
||||
environment = {
|
||||
etc."ntfy/server.yml".source = configuration;
|
||||
systemPackages = [ cfg.package ];
|
||||
};
|
||||
|
||||
systemd.services.ntfy-sh = {
|
||||
description = "Push notifications server";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = "${cfg.package}/bin/ntfy serve -c ${configuration}";
|
||||
User = cfg.user;
|
||||
|
||||
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
|
||||
PrivateTmp = true;
|
||||
NoNewPrivileges = true;
|
||||
CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
|
||||
ProtectSystem = "full";
|
||||
ProtectKernelTunables = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectControlGroups = true;
|
||||
PrivateDevices = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
};
|
||||
};
|
||||
|
||||
users.groups = optionalAttrs (cfg.group == "ntfy-sh") {
|
||||
ntfy-sh = { };
|
||||
};
|
||||
|
||||
users.users = optionalAttrs (cfg.user == "ntfy-sh") {
|
||||
ntfy-sh = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue