Project import generated by Copybara.
GitOrigin-RevId: b85ed9dcbf187b909ef7964774f8847d554fab3b
This commit is contained in:
parent
fa38944194
commit
c7f94ff3ce
2420 changed files with 71794 additions and 75990 deletions
19
third_party/nixpkgs/.github/CODEOWNERS
vendored
19
third_party/nixpkgs/.github/CODEOWNERS
vendored
|
@ -63,6 +63,15 @@
|
|||
/.github/PULL_REQUEST_TEMPLATE.md @infinisil
|
||||
/doc/contributing/ @fricklerhandwerk @infinisil
|
||||
/doc/contributing/contributing-to-documentation.chapter.md @jtojnar @fricklerhandwerk @infinisil
|
||||
/lib/README.md @infinisil
|
||||
/doc/README.md @infinisil
|
||||
/nixos/README.md @infinisil
|
||||
/pkgs/README.md @infinisil
|
||||
/maintainers/README.md @infinisil
|
||||
|
||||
# User-facing development documentation
|
||||
/doc/development.md @infinisil
|
||||
/doc/development @infinisil
|
||||
|
||||
# NixOS Internals
|
||||
/nixos/default.nix @infinisil
|
||||
|
@ -285,8 +294,10 @@ pkgs/development/python-modules/buildcatrust/ @ajs124 @lukegb @mweinelt
|
|||
/nixos/tests/matrix-conduit.nix @piegamesde
|
||||
|
||||
# Dotnet
|
||||
/pkgs/build-support/dotnet @IvarWithoutBones
|
||||
/pkgs/development/compilers/dotnet @IvarWithoutBones
|
||||
/pkgs/build-support/dotnet @IvarWithoutBones
|
||||
/pkgs/development/compilers/dotnet @IvarWithoutBones
|
||||
/pkgs/test/dotnet @IvarWithoutBones
|
||||
/doc/languages-frameworks/dotnet.section.md @IvarWithoutBones
|
||||
|
||||
# Node.js
|
||||
/pkgs/build-support/node/build-npm-package @lilyinstarlight @winterqt
|
||||
|
@ -305,5 +316,9 @@ nixos/lib/make-multi-disk-zfs-image.nix @raitobezarius
|
|||
nixos/modules/tasks/filesystems/zfs.nix @raitobezarius
|
||||
nixos/tests/zfs.nix @raitobezarius
|
||||
|
||||
# Zig
|
||||
/pkgs/development/compilers/zig @AndersonTorres @figsoda
|
||||
/doc/hooks/zig.section.md @AndersonTorres @figsoda
|
||||
|
||||
# Linux Kernel
|
||||
pkgs/os-specific/linux/kernel/manual-config.nix @amjoseph-nixpkgs
|
||||
|
|
5
third_party/nixpkgs/.github/labeler.yml
vendored
5
third_party/nixpkgs/.github/labeler.yml
vendored
|
@ -170,6 +170,7 @@
|
|||
|
||||
"6.topic: TeX":
|
||||
- doc/languages-frameworks/texlive.section.md
|
||||
- pkgs/test/texlive/**
|
||||
- pkgs/tools/typesetting/tex/**/*
|
||||
|
||||
"6.topic: vim":
|
||||
|
@ -188,6 +189,10 @@
|
|||
- nixos/tests/xfce.nix
|
||||
- pkgs/desktops/xfce/**/*
|
||||
|
||||
"6.topic: zig":
|
||||
- pkgs/development/compilers/zig/**/*
|
||||
- doc/hooks/zig.section.md
|
||||
|
||||
"8.has: changelog":
|
||||
- nixos/doc/manual/release-notes/**/*
|
||||
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
name: "Direct Push Warning"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-**
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write # for peter-evans/commit-comment to comment on commit
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'NixOS'
|
||||
env:
|
||||
GITHUB_SHA: ${{ github.sha }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
steps:
|
||||
- name: Check if commit is a merge commit
|
||||
id: ismerge
|
||||
run: |
|
||||
ISMERGE=$(curl -H 'Accept: application/vnd.github.groot-preview+json' -H "authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/${{ env.GITHUB_REPOSITORY }}/commits/${{ env.GITHUB_SHA }}/pulls | jq -r '.[] | select(.merge_commit_sha == "${{ env.GITHUB_SHA }}") | any')
|
||||
echo "ismerge=$ISMERGE" >> $GITHUB_OUTPUT
|
||||
# github events are eventually consistent, so wait until changes propagate to thier DB
|
||||
- run: sleep 60
|
||||
if: steps.ismerge.outputs.ismerge != 'true'
|
||||
- name: Warn if the commit was a direct push
|
||||
if: steps.ismerge.outputs.ismerge != 'true'
|
||||
uses: peter-evans/commit-comment@v2
|
||||
with:
|
||||
body: |
|
||||
@${{ github.actor }}, you pushed a commit directly to master/release branch
|
||||
instead of going through a Pull Request.
|
||||
|
||||
That's highly discouraged beyond the few exceptions listed
|
||||
on https://github.com/NixOS/nixpkgs/issues/118661
|
716
third_party/nixpkgs/CONTRIBUTING.md
vendored
716
third_party/nixpkgs/CONTRIBUTING.md
vendored
|
@ -1,76 +1,198 @@
|
|||
# How to contribute
|
||||
# Contributing to Nixpkgs
|
||||
|
||||
Note: contributing implies licensing those contributions
|
||||
under the terms of [COPYING](COPYING), which is an MIT-like license.
|
||||
This document is for people wanting to contribute to the implementation of Nixpkgs.
|
||||
This involves interacting with implementation changes that are proposed using [GitHub](https://github.com/) [pull requests](https://docs.github.com/pull-requests) to the [Nixpkgs](https://github.com/nixos/nixpkgs/) repository (which you're in right now).
|
||||
|
||||
## Opening issues
|
||||
As such, a GitHub account is recommended, which you can sign up for [here](https://github.com/signup).
|
||||
See [here](https://discourse.nixos.org/t/about-the-patches-category/477) for how to contribute without a GitHub account.
|
||||
|
||||
* Make sure you have a [GitHub account](https://github.com/signup/free)
|
||||
* Make sure there is no open issue on the topic
|
||||
* [Submit a new issue](https://github.com/NixOS/nixpkgs/issues/new/choose) by choosing the kind of topic and fill out the template
|
||||
Additionally this document assumes that you already know how to use GitHub and Git.
|
||||
If that's not the case, we recommend learning about it first [here](https://docs.github.com/en/get-started/quickstart/hello-world).
|
||||
|
||||
## Submitting changes
|
||||
## Overview
|
||||
[overview]: #overview
|
||||
|
||||
Read the ["Submitting changes"](https://nixos.org/nixpkgs/manual/#chap-submitting-changes) section of the nixpkgs manual. It explains how to write, test, and iterate on your change, and which branch to base your pull request against.
|
||||
This file contains general contributing information, but individual parts also have more specific information to them in their respective `README.md` files, linked here:
|
||||
- [`lib`](./lib/README.md): Sources and documentation of the [library functions](https://nixos.org/manual/nixpkgs/stable/#chap-functions)
|
||||
- [`maintainers`](./maintainers/README.md): Nixpkgs maintainer and team listings, maintainer scripts
|
||||
- [`pkgs`](./pkgs/README.md): Package and [builder](https://nixos.org/manual/nixpkgs/stable/#part-builders) definitions
|
||||
- [`doc`](./doc/README.md): Sources and infrastructure for the [Nixpkgs manual](https://nixos.org/manual/nixpkgs/stable/)
|
||||
- [`nixos`](./nixos/README.md): Implementation of [NixOS](https://nixos.org/manual/nixos/stable/)
|
||||
|
||||
Below is a short excerpt of some points in there:
|
||||
# How to's
|
||||
|
||||
* Format the commit messages in the following way:
|
||||
## How to create pull requests
|
||||
[pr-create]: #how-to-create-pull-requests
|
||||
|
||||
```
|
||||
(pkg-name | nixos/<module>): (from -> to | init at version | refactor | etc)
|
||||
This section describes in some detail how changes can be made and proposed with pull requests.
|
||||
|
||||
(Motivation for change. Link to release notes. Additional information.)
|
||||
> **Note**
|
||||
> Be aware that contributing implies licensing those contributions under the terms of [COPYING](./COPYING), an MIT-like license.
|
||||
|
||||
0. Set up a local version of Nixpkgs to work with using GitHub and Git
|
||||
1. [Fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo#forking-a-repository) the [Nixpkgs repository](https://github.com/nixos/nixpkgs/).
|
||||
1. [Clone the forked repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#cloning-your-forked-repository) into a local `nixpkgs` directory.
|
||||
1. [Configure the upstream Nixpkgs repository](https://docs.github.com/en/get-started/quickstart/fork-a-repo#configuring-git-to-sync-your-fork-with-the-upstream-repository).
|
||||
|
||||
1. Figure out the branch that should be used for this change by going through [this section][branch].
|
||||
If in doubt use `master`, that's where most changes should go.
|
||||
This can be changed later by [rebasing][rebase].
|
||||
|
||||
2. Create and switch to a new Git branch, ideally such that:
|
||||
- The name of the branch hints at the change you'd like to implement, e.g. `update-hello`.
|
||||
- The base of the branch includes the most recent changes on the base branch from step 1, we'll assume `master` here.
|
||||
|
||||
```bash
|
||||
# Make sure you have the latest changes from upstream Nixpkgs
|
||||
git fetch upstream
|
||||
|
||||
# Create and switch to a new branch based off the master branch in Nixpkgs
|
||||
git switch --create update-hello upstream/master
|
||||
```
|
||||
|
||||
To avoid having to download and build potentially many derivations, at the expense of using a potentially outdated version, you can base the branch off a specific [Git commit](https://www.git-scm.com/docs/gitglossary#def_commit) instead:
|
||||
- The commit of the latest `nixpkgs-unstable` channel, available [here](https://channels.nixos.org/nixpkgs-unstable/git-revision).
|
||||
- The commit of a local Nixpkgs downloaded using [nix-channel](https://nixos.org/manual/nix/stable/command-ref/nix-channel), available using `nix-instantiate --eval --expr '(import <nixpkgs/lib>).trivial.revisionWithDefault null'`
|
||||
- If you're using NixOS, the commit of your NixOS installation, available with `nixos-version --revision`.
|
||||
|
||||
Once you have an appropriate commit you can use it instead of `upstream/master` in the above command:
|
||||
```bash
|
||||
git switch --create update-hello <the desired base commit>
|
||||
```
|
||||
|
||||
3. Make the desired changes in the local Nixpkgs repository using an editor of your choice.
|
||||
Make sure to:
|
||||
- Adhere to both the [general code conventions][code-conventions], and the code conventions specific to the part you're making changes to.
|
||||
See the [overview section][overview] for more specific information.
|
||||
- Test the changes.
|
||||
See the [overview section][overview] for more specific information.
|
||||
- If necessary, document the change.
|
||||
See the [overview section][overview] for more specific information.
|
||||
|
||||
4. Commit your changes using `git commit`.
|
||||
Make sure to adhere to the [commit conventions](#commit-conventions).
|
||||
|
||||
Repeat the steps 3-4 as many times as necessary.
|
||||
Advance to the next step if all the commits (viewable with `git log`) make sense together.
|
||||
|
||||
5. Push your commits to your fork of Nixpkgs.
|
||||
```
|
||||
git push --set-upstream origin HEAD
|
||||
```
|
||||
|
||||
The above command will output a link that allows you to directly quickly do the next step:
|
||||
```
|
||||
remote: Create a pull request for 'update-hello' on GitHub by visiting:
|
||||
remote: https://github.com/myUser/nixpkgs/pull/new/update-hello
|
||||
```
|
||||
|
||||
6. [Create a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request#creating-the-pull-request) from the new branch in your Nixpkgs fork to the upstream Nixpkgs repository.
|
||||
Use the branch from step 2 as the pull requests base branch.
|
||||
Go through the [pull request template](#pull-request-template) in the pre-filled default description.
|
||||
|
||||
7. Respond to review comments, potential CI failures and potential merge conflicts by updating the pull request.
|
||||
Always keep the pull request in a mergeable state.
|
||||
|
||||
The custom [OfBorg](https://github.com/NixOS/ofborg) CI system will perform various checks to help ensure code quality, whose results you can see at the bottom of the pull request.
|
||||
See [the OfBorg Readme](https://github.com/NixOS/ofborg#readme) for more details.
|
||||
|
||||
- To add new commits, repeat steps 3-4 and push the result using
|
||||
```
|
||||
git push
|
||||
```
|
||||
|
||||
- To change existing commits you will have to [rewrite Git history](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
|
||||
Useful Git commands that can help a lot with this are `git commit --patch --amend` and `git rebase --interactive`.
|
||||
With a rewritten history you need to force-push the commits using
|
||||
```
|
||||
git push --force-with-lease
|
||||
```
|
||||
|
||||
- In case of merge conflicts you will also have to [rebase the branch](https://git-scm.com/book/en/v2/Git-Branching-Rebasing) on top of current `master`.
|
||||
Sometimes this can be done [on GitHub directly](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/keeping-your-pull-request-in-sync-with-the-base-branch#updating-your-pull-request-branch), but if not you will have to rebase locally using
|
||||
```
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
git push --force-with-lease
|
||||
```
|
||||
|
||||
- If you need to change the base branch of the pull request, you can do so by [rebasing][rebase].
|
||||
|
||||
8. If your pull request is merged and [acceptable for releases][release-acceptable] you may [backport][pr-backport] the pull request.
|
||||
|
||||
### Pull request template
|
||||
[pr-template]: #pull-request-template
|
||||
|
||||
The pull request template helps determine what steps have been made for a contribution so far, and will help guide maintainers on the status of a change. The motivation section of the PR should include any extra details the title does not address and link any existing issues related to the pull request.
|
||||
|
||||
When a PR is created, it will be pre-populated with some checkboxes detailed below:
|
||||
|
||||
#### Tested using sandboxing
|
||||
|
||||
When sandbox builds are enabled, Nix will setup an isolated environment for each build process. It is used to remove further hidden dependencies set by the build environment to improve reproducibility. This includes access to the network during the build outside of `fetch*` functions and files outside the Nix store. Depending on the operating system access to other resources are blocked as well (ex. inter process communication is isolated on Linux); see [sandbox](https://nixos.org/nix/manual/#conf-sandbox) in Nix manual for details.
|
||||
|
||||
Sandboxing is not enabled by default in Nix due to a small performance hit on each build. In pull requests for [nixpkgs](https://github.com/NixOS/nixpkgs/) people are asked to test builds with sandboxing enabled (see `Tested using sandboxing` in the pull request template) because in [Hydra](https://nixos.org/hydra/) sandboxing is also used.
|
||||
|
||||
Depending if you use NixOS or other platforms you can use one of the following methods to enable sandboxing **before** building the package:
|
||||
|
||||
- **Globally enable sandboxing on NixOS**: add the following to `configuration.nix`
|
||||
|
||||
```nix
|
||||
nix.settings.sandbox = true;
|
||||
```
|
||||
|
||||
For consistency, there should not be a period at the end of the commit message's summary line (the first line of the commit message).
|
||||
- **Globally enable sandboxing on non-NixOS platforms**: add the following to: `/etc/nix/nix.conf`
|
||||
|
||||
Examples:
|
||||
```ini
|
||||
sandbox = true
|
||||
```
|
||||
|
||||
* nginx: init at 2.0.1
|
||||
* firefox: 54.0.1 -> 55.0
|
||||
https://www.mozilla.org/en-US/firefox/55.0/releasenotes/
|
||||
* nixos/hydra: add bazBaz option
|
||||
#### Built on platform(s)
|
||||
|
||||
Dual baz behavior is needed to do foo.
|
||||
* nixos/nginx: refactor config generation
|
||||
Many Nix packages are designed to run on multiple platforms. As such, it’s important to let the maintainer know which platforms your changes have been tested on. It’s not always practical to test a change on all platforms, and is not required for a pull request to be merged. Only check the systems you tested the build on in this section.
|
||||
|
||||
The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
|
||||
#### Tested via one or more NixOS test(s) if existing and applicable for the change (look inside nixos/tests)
|
||||
|
||||
* `meta.description` should:
|
||||
* Be short, just one sentence.
|
||||
* Be capitalized.
|
||||
* Not start with the package name.
|
||||
* More generally, it should not refer to the package name.
|
||||
* Not end with a period (or any punctuation for that matter).
|
||||
* Aim to inform while avoiding subjective language.
|
||||
* `meta.license` must be set and fit the upstream license.
|
||||
* If there is no upstream license, `meta.license` should default to `lib.licenses.unfree`.
|
||||
* If in doubt, try to contact the upstream developers for clarification.
|
||||
* `meta.maintainers` must be set.
|
||||
Packages with automated tests are much more likely to be merged in a timely fashion because it doesn’t require as much manual testing by the maintainer to verify the functionality of the package. If there are existing tests for the package, they should be run to verify your changes do not break the tests. Tests can only be run on Linux. For more details on writing and running tests, see the [section in the NixOS manual](https://nixos.org/nixos/manual/index.html#sec-nixos-tests).
|
||||
|
||||
See the nixpkgs manual for more details on [standard meta-attributes](https://nixos.org/nixpkgs/manual/#sec-standard-meta-attributes).
|
||||
#### Tested compilation of all pkgs that depend on this change using `nixpkgs-review`
|
||||
|
||||
## Writing good commit messages
|
||||
If you are modifying a package, you can use `nixpkgs-review` to make sure all packages that depend on the updated package still compile correctly. The `nixpkgs-review` utility can look for and build all dependencies either based on uncommitted changes with the `wip` option or specifying a GitHub pull request number.
|
||||
|
||||
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work.
|
||||
Review changes from pull request number 12345:
|
||||
|
||||
Package version upgrades usually allow for simpler commit messages, including attribute name, old and new version, as well as a reference to the relevant release notes/changelog. Every once in a while a package upgrade requires more extensive changes, and that subsequently warrants a more verbose message.
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review pr 12345"
|
||||
```
|
||||
|
||||
Pull requests should not be squash merged in order to keep complete commit messages and GPG signatures intact and must not be when the change doesn't make sense as a single commit.
|
||||
This means that, when addressing review comments in order to keep the pull request in an always mergeable status, you will sometimes need to rewrite your branch's history and then force-push it with `git push --force-with-lease`.
|
||||
Useful git commands that can help a lot with this are `git commit --patch --amend` and `git rebase --interactive`. For more details consult the git man pages or online resources like [git-rebase.io](https://git-rebase.io/) or [The Pro Git Book](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
|
||||
Alternatively, with flakes (and analogously for the other commands below):
|
||||
|
||||
## Testing changes
|
||||
```ShellSession
|
||||
nix run nixpkgs#nixpkgs-review -- pr 12345
|
||||
```
|
||||
|
||||
To run the main types of tests locally:
|
||||
Review uncommitted changes:
|
||||
|
||||
- Run package-internal tests with `nix-build --attr pkgs.PACKAGE.passthru.tests`
|
||||
- Run [NixOS tests](https://nixos.org/manual/nixos/unstable/#sec-nixos-tests) with `nix-build --attr nixosTest.NAME`, where `NAME` is the name of the test listed in `nixos/tests/all-tests.nix`
|
||||
- Run [global package tests](https://nixos.org/manual/nixpkgs/unstable/#sec-package-tests) with `nix-build --attr tests.PACKAGE`, where `PACKAGE` is the name of the test listed in `pkgs/test/default.nix`
|
||||
- See `lib/tests/NAME.nix` for instructions on running specific library tests
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review wip"
|
||||
```
|
||||
|
||||
## Rebasing between branches (i.e. from master to staging)
|
||||
Review changes from last commit:
|
||||
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review rev HEAD"
|
||||
```
|
||||
|
||||
#### Tested execution of all binary files (usually in `./result/bin/`)
|
||||
|
||||
It’s important to test any executables generated by a build when you change or create a package in nixpkgs. This can be done by looking in `./result/bin` and running any files in there, or at a minimum, the main executable for the package. For example, if you make a change to texlive, you probably would only check the binaries associated with the change you made rather than testing all of them.
|
||||
|
||||
#### Meets Nixpkgs contribution standards
|
||||
|
||||
The last checkbox is about whether it fits the guidelines in this `CONTRIBUTING.md` file. This document has detailed information on standards the Nix community has for commit messages, reviews, licensing of contributions you make to the project, etc... Everyone should read and understand the standards the community has for contributing before submitting a pull request.
|
||||
|
||||
### Rebasing between branches (i.e. from master to staging)
|
||||
[rebase]: #rebasing-between-branches-ie-from-master-to-staging
|
||||
|
||||
From time to time, changes between branches must be rebased, for example, if the
|
||||
number of new rebuilds they would cause is too large for the target branch. When
|
||||
|
@ -114,7 +236,7 @@ git status
|
|||
git push origin feature --force-with-lease
|
||||
```
|
||||
|
||||
### Something went wrong and a lot of people were pinged
|
||||
#### Something went wrong and a lot of people were pinged
|
||||
|
||||
It happens. Remember to be kind, especially to new contributors.
|
||||
There is no way back, so the pull request should be closed and locked
|
||||
|
@ -144,32 +266,486 @@ for review, which allows you to sidestep this issue.
|
|||
This is not a bulletproof method though, as OfBorg still does review requests even on draft PRs.
|
||||
```
|
||||
|
||||
## Backporting changes
|
||||
## How to backport pull requests
|
||||
[pr-backport]: #how-to-backport-pull-requests
|
||||
|
||||
Follow these steps to backport a change into a release branch in compliance with the [commit policy](https://nixos.org/nixpkgs/manual/#submitting-changes-stable-release-branches).
|
||||
Once a pull request has been merged into `master`, a backport pull request to the corresponding `release-YY.MM` branch can be created either automatically or manually.
|
||||
|
||||
You can add a label such as `backport release-23.05` to a PR, so that merging it will
|
||||
automatically create a backport (via [a GitHub Action](.github/workflows/backport.yml)).
|
||||
This also works for pull requests that have already been merged, and might take a couple of minutes to trigger.
|
||||
### Automatically backporting changes
|
||||
|
||||
You can also create the backport manually:
|
||||
> **Note**
|
||||
> You have to be a [Nixpkgs maintainer](./maintainers) to automatically create a backport pull request.
|
||||
|
||||
1. Take note of the commits in which the change was introduced into `master` branch.
|
||||
2. Check out the target _release branch_, e.g. `release-23.05`. Do not use a _channel branch_ like `nixos-23.05` or `nixpkgs-23.05-darwin`.
|
||||
3. Create a branch for your change, e.g. `git checkout -b backport`.
|
||||
4. When the reason to backport is not obvious from the original commit message, use `git cherry-pick -xe <original commit>` and add a reason. Otherwise use `git cherry-pick -x <original commit>`. That's fine for minor version updates that only include security and bug fixes, commits that fixes an otherwise broken package or similar. Please also ensure the commits exists on the master branch; in the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
|
||||
5. Push to GitHub and open a backport pull request. Make sure to select the release branch (e.g. `release-23.05`) as the target branch of the pull request, and link to the pull request in which the original change was committed to `master`. The pull request title should be the commit title with the release version as prefix, e.g. `[23.05]`.
|
||||
6. When the backport pull request is merged and you have the necessary privileges you can also replace the label `9.needs: port to stable` with `8.has: port to stable` on the original pull request. This way maintainers can keep track of missing backports easier.
|
||||
Add the [`backport release-YY.MM` label](https://github.com/NixOS/nixpkgs/labels?q=backport) to the pull request on the `master` branch.
|
||||
This will cause [a GitHub Action](.github/workflows/backport.yml) to open a pull request to the `release-YY.MM` branch a few minutes later.
|
||||
This can be done on both open or already merged pull requests.
|
||||
|
||||
## Criteria for Backporting changes
|
||||
### Manually backporting changes
|
||||
|
||||
Anything that does not cause user or downstream dependency regressions can be backported. This includes:
|
||||
- New Packages / Modules
|
||||
- Security / Patch updates
|
||||
- Version updates which include new functionality (but no breaking changes)
|
||||
- Services which require a client to be up-to-date regardless. (E.g. `spotify`, `steam`, or `discord`)
|
||||
- Security critical applications (E.g. `firefox`)
|
||||
To manually create a backport pull request, follow [the standard pull request process][pr-create], with these notable differences:
|
||||
|
||||
## Reviewing contributions
|
||||
- Use `release-YY.MM` for the base branch, both for the local branch and the pull request.
|
||||
> **Warning**
|
||||
> Do not use the `nixos-YY.MM` branch, that is a branch pointing to the tested release channel commit
|
||||
|
||||
See the nixpkgs manual for more details on how to [Review contributions](https://nixos.org/nixpkgs/manual/#chap-reviewing-contributions).
|
||||
- Instead of manually making and committing the changes, use [`git cherry-pick -x`](https://git-scm.com/docs/git-cherry-pick) for each commit from the pull request you'd like to backport.
|
||||
Either `git cherry-pick -x <commit>` when the reason for the backport is obvious (such as minor versions, fixes, etc.), otherwise use `git cherry-pick -xe <commit>` to add a reason for the backport to the commit message.
|
||||
Here is [an example](https://github.com/nixos/nixpkgs/commit/5688c39af5a6c5f3d646343443683da880eaefb8) of this.
|
||||
|
||||
> **Warning**
|
||||
> Ensure the commits exists on the master branch.
|
||||
> In the case of squashed or rebased merges, the commit hash will change and the new commits can be found in the merge message at the bottom of the master pull request.
|
||||
|
||||
- In the pull request description, link to the original pull request to `master`.
|
||||
The pull request title should include `[YY.MM]` matching the release you're backporting to.
|
||||
|
||||
- When the backport pull request is merged and you have the necessary privileges you can also replace the label `9.needs: port to stable` with `8.has: port to stable` on the original pull request.
|
||||
This way maintainers can keep track of missing backports easier.
|
||||
|
||||
## How to review pull requests
|
||||
[pr-review]: #how-to-review-pull-requests
|
||||
|
||||
> **Warning**
|
||||
> The following section is a draft, and the policy for reviewing is still being discussed in issues such as [#11166](https://github.com/NixOS/nixpkgs/issues/11166) and [#20836](https://github.com/NixOS/nixpkgs/issues/20836).
|
||||
|
||||
The Nixpkgs project receives a fairly high number of contributions via GitHub pull requests. Reviewing and approving these is an important task and a way to contribute to the project.
|
||||
|
||||
The high change rate of Nixpkgs makes any pull request that remains open for too long subject to conflicts that will require extra work from the submitter or the merger. Reviewing pull requests in a timely manner and being responsive to the comments is the key to avoid this issue. GitHub provides sort filters that can be used to see the [most recently](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc) and the [least recently](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc) updated pull requests. We highly encourage looking at [this list of ready to merge, unreviewed pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone).
|
||||
|
||||
When reviewing a pull request, please always be nice and polite. Controversial changes can lead to controversial opinions, but it is important to respect every community member and their work.
|
||||
|
||||
GitHub provides reactions as a simple and quick way to provide feedback to pull requests or any comments. The thumb-down reaction should be used with care and if possible accompanied with some explanation so the submitter has directions to improve their contribution.
|
||||
|
||||
Pull request reviews should include a list of what has been reviewed in a comment, so other reviewers and mergers can know the state of the review.
|
||||
|
||||
All the review template samples provided in this section are generic and meant as examples. Their usage is optional and the reviewer is free to adapt them to their liking.
|
||||
|
||||
To get more information about how to review specific parts of Nixpkgs, refer to the documents linked to in the [overview section][overview].
|
||||
|
||||
If you consider having enough knowledge and experience in a topic and would like to be a long-term reviewer for related submissions, please contact the current reviewers for that topic. They will give you information about the reviewing process. The main reviewers for a topic can be hard to find as there is no list, but checking past pull requests to see who reviewed or git-blaming the code to see who committed to that topic can give some hints.
|
||||
|
||||
Container system, boot system and library changes are some examples of the pull requests fitting this category.
|
||||
|
||||
## How to merge pull requests
|
||||
[pr-merge]: #how-to-merge-pull-requests
|
||||
|
||||
The *Nixpkgs committers* are people who have been given
|
||||
permission to merge.
|
||||
|
||||
It is possible for community members that have enough knowledge and experience on a special topic to contribute by merging pull requests.
|
||||
|
||||
In case the PR is stuck waiting for the original author to apply a trivial
|
||||
change (a typo, capitalisation change, etc.) and the author allowed the members
|
||||
to modify the PR, consider applying it yourself (or commit the existing review
|
||||
suggestion). You should pay extra attention to make sure the addition doesn't go
|
||||
against the idea of the original PR and would not be opposed by the author.
|
||||
|
||||
<!--
|
||||
The following paragraphs about how to deal with unactive contributors is just a proposition and should be modified to what the community agrees to be the right policy.
|
||||
|
||||
Please note that contributors with commit rights unactive for more than three months will have their commit rights revoked.
|
||||
-->
|
||||
|
||||
Please see the discussion in [GitHub nixpkgs issue #50105](https://github.com/NixOS/nixpkgs/issues/50105) for information on how to proceed to be granted this level of access.
|
||||
|
||||
In a case a contributor definitively leaves the Nix community, they should create an issue or post on [Discourse](https://discourse.nixos.org) with references of packages and modules they maintain so the maintainership can be taken over by other contributors.
|
||||
|
||||
# Flow of merged pull requests
|
||||
|
||||
After a pull requests is merged, it eventually makes it to the [official Hydra CI](https://hydra.nixos.org/).
|
||||
Hydra regularly evaluates and builds Nixpkgs, updating [the official channels](http://channels.nixos.org/) when specific Hydra jobs succeeded.
|
||||
See [Nix Channel Status](https://status.nixos.org/) for the current channels and their state.
|
||||
Here's a brief overview of the main Git branches and what channels they're used for:
|
||||
|
||||
- `master`: The main branch, used for the unstable channels such as `nixpkgs-unstable`, `nixos-unstable` and `nixos-unstable-small`.
|
||||
- `release-YY.MM` (e.g. `release-23.05`): The NixOS release branches, used for the stable channels such as `nixos-23.05`, `nixos-23.05-small` and `nixpkgs-23.05-darwin`.
|
||||
|
||||
When a channel is updated, a corresponding Git branch is also updated to point to the corresponding commit.
|
||||
So e.g. the [`nixpkgs-unstable` branch](https://github.com/nixos/nixpkgs/tree/nixpkgs-unstable) corresponds to the Git commit from the [`nixpkgs-unstable` channel](https://channels.nixos.org/nixpkgs-unstable).
|
||||
|
||||
Nixpkgs in its entirety is tied to the NixOS release process, which is documented in the [NixOS Release Wiki](https://nixos.github.io/release-wiki/).
|
||||
|
||||
See [this section][branch] to know when to use the release branches.
|
||||
|
||||
## Staging
|
||||
[staging]: #staging
|
||||
|
||||
The staging workflow exists to batch Hydra builds of many packages together.
|
||||
|
||||
It works by directing commits that cause [mass rebuilds][mass-rebuild] to a separate `staging` branch that isn't directly built by Hydra.
|
||||
Regularly, the `staging` branch is _manually_ merged into a `staging-next` branch to be built by Hydra using the [`nixpkgs:staging-next` jobset](https://hydra.nixos.org/jobset/nixpkgs/staging-next).
|
||||
The `staging-next` branch should then only receive direct commits in order to fix Hydra builds.
|
||||
Once it is verified that there are no major regressions, it is merged into `master` using [a pull requests](https://github.com/NixOS/nixpkgs/pulls?q=head%3Astaging-next).
|
||||
This is done manually in order to ensure it's a good use of Hydra's computing resources.
|
||||
By keeping the `staging-next` branch separate from `staging`, this batching does not block developers from merging changes into `staging`.
|
||||
|
||||
In order for the `staging` and `staging-next` branches to be up-to-date with the latest commits on `master`, there are regular _automated_ merges from `master` into `staging-next` and `staging`.
|
||||
This is implemented using GitHub workflows [here](.github/workflows/periodic-merge-6h.yml) and [here](.github/workflows/periodic-merge-24h.yml).
|
||||
|
||||
> **Note**
|
||||
> Changes must be sufficiently tested before being merged into any branch.
|
||||
> Hydra builds should not be used as testing platform.
|
||||
|
||||
Here is a Git history diagram showing the flow of commits between the three branches:
|
||||
```mermaid
|
||||
%%{init: {
|
||||
'theme': 'base',
|
||||
'themeVariables': {
|
||||
'gitInv0': '#ff0000',
|
||||
'gitInv1': '#ff0000',
|
||||
'git2': '#ff4444',
|
||||
'commitLabelFontSize': '15px'
|
||||
},
|
||||
'gitGraph': {
|
||||
'showCommitLabel':true,
|
||||
'mainBranchName': 'master',
|
||||
'rotateCommitLabel': true
|
||||
}
|
||||
} }%%
|
||||
gitGraph
|
||||
commit id:" "
|
||||
branch staging-next
|
||||
branch staging
|
||||
|
||||
checkout master
|
||||
checkout staging
|
||||
checkout master
|
||||
commit id:" "
|
||||
checkout staging-next
|
||||
merge master id:"automatic"
|
||||
checkout staging
|
||||
merge staging-next id:"automatic "
|
||||
|
||||
checkout staging-next
|
||||
merge staging type:HIGHLIGHT id:"manual"
|
||||
commit id:"fixup"
|
||||
|
||||
checkout master
|
||||
checkout staging
|
||||
checkout master
|
||||
commit id:" "
|
||||
checkout staging-next
|
||||
merge master id:"automatic "
|
||||
checkout staging
|
||||
merge staging-next id:"automatic "
|
||||
|
||||
checkout staging-next
|
||||
commit id:"fixup "
|
||||
checkout master
|
||||
merge staging-next type:HIGHLIGHT id:"manual (PR)"
|
||||
```
|
||||
|
||||
|
||||
Here's an overview of the different branches:
|
||||
|
||||
| branch | `master` | `staging` | `staging-next` |
|
||||
| --- | --- | --- | --- |
|
||||
| Used for development | ✔️ | ✔️ | ❌ |
|
||||
| Built by Hydra | ✔️ | ❌ | ✔️ |
|
||||
| [Mass rebuilds][mass-rebuild] | ❌ | ✔️ | ⚠️ Only to fix Hydra builds |
|
||||
| Critical security fixes | ✔️ for non-mass-rebuilds | ❌ | ✔️ for mass-rebuilds |
|
||||
| Automatically merged into | `staging-next` | - | `staging` |
|
||||
| Manually merged into | - | `staging-next` | `master` |
|
||||
|
||||
The staging workflow is used for all main branches, `master` and `release-YY.MM`, with corresponding names:
|
||||
- `master`/`release-YY.MM`
|
||||
- `staging`/`staging-YY.MM`
|
||||
- `staging-next`/`staging-next-YY.MM`
|
||||
|
||||
# Conventions
|
||||
|
||||
## Branch conventions
|
||||
<!-- This section is relevant to both contributors and reviewers -->
|
||||
[branch]: #branch-conventions
|
||||
|
||||
Most changes should go to the `master` branch, but sometimes other branches should be used instead.
|
||||
Use the following decision process to figure out which one it should be:
|
||||
|
||||
Is the change [acceptable for releases][release-acceptable] and do you wish to have the change in the release?
|
||||
- No: Use the `master` branch, do not backport the pull request.
|
||||
- Yes: Can the change be implemented the same way on the `master` and release branches?
|
||||
For example, a packages major version might differ between the `master` and release branches, such that separate security patches are required.
|
||||
- Yes: Use the `master` branch and [backport the pull request](#backporting-changes).
|
||||
- No: Create separate pull requests to the `master` and `release-XX.YY` branches.
|
||||
|
||||
Furthermore, if the change causes a [mass rebuild][mass-rebuild], use the appropriate staging branch instead:
|
||||
- Mass rebuilds to `master` should go to `staging` instead.
|
||||
- Mass rebuilds to `release-XX.YY` should go to `staging-XX.YY` instead.
|
||||
|
||||
See [this section][staging] for more details about such changes propagate between the branches.
|
||||
|
||||
### Changes acceptable for releases
|
||||
[release-acceptable]: #changes-acceptable-for-releases
|
||||
|
||||
Only changes to supported releases may be accepted.
|
||||
The oldest supported release (`YYMM`) can be found using
|
||||
```
|
||||
nix-instantiate --eval -A lib.trivial.oldestSupportedRelease
|
||||
```
|
||||
|
||||
The release branches should generally not receive any breaking changes, both for the Nix expressions and derivations.
|
||||
So these changes are acceptable to backport:
|
||||
- New packages, modules and functions
|
||||
- Security fixes
|
||||
- Package version updates
|
||||
- Patch versions with fixes
|
||||
- Minor versions with new functionality, but no breaking changes
|
||||
|
||||
In addition, major package version updates with breaking changes are also acceptable for:
|
||||
- Services that would fail without up-to-date client software, such as `spotify`, `steam`, and `discord`
|
||||
- Security critical applications, such as `firefox` and `chromium`
|
||||
|
||||
### Changes causing mass rebuilds
|
||||
[mass-rebuild]: #changes-causing-mass-rebuilds
|
||||
|
||||
Which changes cause mass rebuilds is not formally defined.
|
||||
In order to help the decision, CI automatically assigns [`rebuild` labels](https://github.com/NixOS/nixpkgs/labels?q=rebuild) to pull requests based on the number of packages they cause rebuilds for.
|
||||
As a rule of thumb, if the number of rebuilds is **over 500**, it can be considered a mass rebuild.
|
||||
To get a sense for what changes are considered mass rebuilds, see [previously merged pull requests to the staging branches](https://github.com/NixOS/nixpkgs/issues?q=base%3Astaging+-base%3Astaging-next+is%3Amerged).
|
||||
|
||||
## Commit conventions
|
||||
[commit-conventions]: #commit-conventions
|
||||
|
||||
- Create a commit for each logical unit.
|
||||
|
||||
- Check for unnecessary whitespace with `git diff --check` before committing.
|
||||
|
||||
- If you have commits `pkg-name: oh, forgot to insert whitespace`: squash commits in this case. Use `git rebase -i`.
|
||||
|
||||
- Format the commit messages in the following way:
|
||||
|
||||
```
|
||||
(pkg-name | nixos/<module>): (from -> to | init at version | refactor | etc)
|
||||
|
||||
(Motivation for change. Link to release notes. Additional information.)
|
||||
```
|
||||
|
||||
For consistency, there should not be a period at the end of the commit message's summary line (the first line of the commit message).
|
||||
|
||||
Examples:
|
||||
|
||||
* nginx: init at 2.0.1
|
||||
* firefox: 54.0.1 -> 55.0
|
||||
|
||||
https://www.mozilla.org/en-US/firefox/55.0/releasenotes/
|
||||
* nixos/hydra: add bazBaz option
|
||||
|
||||
Dual baz behavior is needed to do foo.
|
||||
* nixos/nginx: refactor config generation
|
||||
|
||||
The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
|
||||
|
||||
### Writing good commit messages
|
||||
|
||||
In addition to writing properly formatted commit messages, it's important to include relevant information so other developers can later understand *why* a change was made. While this information usually can be found by digging code, mailing list/Discourse archives, pull request discussions or upstream changes, it may require a lot of work.
|
||||
|
||||
Package version upgrades usually allow for simpler commit messages, including attribute name, old and new version, as well as a reference to the relevant release notes/changelog. Every once in a while a package upgrade requires more extensive changes, and that subsequently warrants a more verbose message.
|
||||
|
||||
Pull requests should not be squash merged in order to keep complete commit messages and GPG signatures intact and must not be when the change doesn't make sense as a single commit.
|
||||
|
||||
## Code conventions
|
||||
[code-conventions]: #code-conventions
|
||||
|
||||
### Release notes
|
||||
|
||||
If you removed packages or made some major NixOS changes, write about it in the release notes for the next stable release in [`nixos/doc/manual/release-notes`](./nixos/doc/manual/release-notes).
|
||||
|
||||
### File naming and organisation
|
||||
|
||||
Names of files and directories should be in lowercase, with dashes between words — not in camel case. For instance, it should be `all-packages.nix`, not `allPackages.nix` or `AllPackages.nix`.
|
||||
|
||||
### Syntax
|
||||
|
||||
- Use 2 spaces of indentation per indentation level in Nix expressions, 4 spaces in shell scripts.
|
||||
|
||||
- Do not use tab characters, i.e. configure your editor to use soft tabs. For instance, use `(setq-default indent-tabs-mode nil)` in Emacs. Everybody has different tab settings so it’s asking for trouble.
|
||||
|
||||
- Use `lowerCamelCase` for variable names, not `UpperCamelCase`. Note, this rule does not apply to package attribute names, which instead follow the rules in [](#sec-package-naming).
|
||||
|
||||
- Function calls with attribute set arguments are written as
|
||||
|
||||
```nix
|
||||
foo {
|
||||
arg = ...;
|
||||
}
|
||||
```
|
||||
|
||||
not
|
||||
|
||||
```nix
|
||||
foo
|
||||
{
|
||||
arg = ...;
|
||||
}
|
||||
```
|
||||
|
||||
Also fine is
|
||||
|
||||
```nix
|
||||
foo { arg = ...; }
|
||||
```
|
||||
|
||||
if it's a short call.
|
||||
|
||||
- In attribute sets or lists that span multiple lines, the attribute names or list elements should be aligned:
|
||||
|
||||
```nix
|
||||
# A long list.
|
||||
list = [
|
||||
elem1
|
||||
elem2
|
||||
elem3
|
||||
];
|
||||
|
||||
# A long attribute set.
|
||||
attrs = {
|
||||
attr1 = short_expr;
|
||||
attr2 =
|
||||
if true then big_expr else big_expr;
|
||||
};
|
||||
|
||||
# Combined
|
||||
listOfAttrs = [
|
||||
{
|
||||
attr1 = 3;
|
||||
attr2 = "fff";
|
||||
}
|
||||
{
|
||||
attr1 = 5;
|
||||
attr2 = "ggg";
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
- Short lists or attribute sets can be written on one line:
|
||||
|
||||
```nix
|
||||
# A short list.
|
||||
list = [ elem1 elem2 elem3 ];
|
||||
|
||||
# A short set.
|
||||
attrs = { x = 1280; y = 1024; };
|
||||
```
|
||||
|
||||
- Breaking in the middle of a function argument can give hard-to-read code, like
|
||||
|
||||
```nix
|
||||
someFunction { x = 1280;
|
||||
y = 1024; } otherArg
|
||||
yetAnotherArg
|
||||
```
|
||||
|
||||
(especially if the argument is very large, spanning multiple lines).
|
||||
|
||||
Better:
|
||||
|
||||
```nix
|
||||
someFunction
|
||||
{ x = 1280; y = 1024; }
|
||||
otherArg
|
||||
yetAnotherArg
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```nix
|
||||
let res = { x = 1280; y = 1024; };
|
||||
in someFunction res otherArg yetAnotherArg
|
||||
```
|
||||
|
||||
- The bodies of functions, asserts, and withs are not indented to prevent a lot of superfluous indentation levels, i.e.
|
||||
|
||||
```nix
|
||||
{ arg1, arg2 }:
|
||||
assert system == "i686-linux";
|
||||
stdenv.mkDerivation { ...
|
||||
```
|
||||
|
||||
not
|
||||
|
||||
```nix
|
||||
{ arg1, arg2 }:
|
||||
assert system == "i686-linux";
|
||||
stdenv.mkDerivation { ...
|
||||
```
|
||||
|
||||
- Function formal arguments are written as:
|
||||
|
||||
```nix
|
||||
{ arg1, arg2, arg3 }:
|
||||
```
|
||||
|
||||
but if they don't fit on one line they're written as:
|
||||
|
||||
```nix
|
||||
{ arg1, arg2, arg3
|
||||
, arg4, ...
|
||||
, # Some comment...
|
||||
argN
|
||||
}:
|
||||
```
|
||||
|
||||
- Functions should list their expected arguments as precisely as possible. That is, write
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl }: ...
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
args: with args; ...
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl, ... }: ...
|
||||
```
|
||||
|
||||
For functions that are truly generic in the number of arguments (such as wrappers around `mkDerivation`) that have some required arguments, you should write them using an `@`-pattern:
|
||||
|
||||
```nix
|
||||
{ stdenv, doCoverageAnalysis ? false, ... } @ args:
|
||||
|
||||
stdenv.mkDerivation (args // {
|
||||
... if doCoverageAnalysis then "bla" else "" ...
|
||||
})
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
args:
|
||||
|
||||
args.stdenv.mkDerivation (args // {
|
||||
... if args ? doCoverageAnalysis && args.doCoverageAnalysis then "bla" else "" ...
|
||||
})
|
||||
```
|
||||
|
||||
- Unnecessary string conversions should be avoided. Do
|
||||
|
||||
```nix
|
||||
rev = version;
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
rev = "${version}";
|
||||
```
|
||||
|
||||
- Building lists conditionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
|
||||
|
||||
```nix
|
||||
buildInputs = lib.optional stdenv.isDarwin iconv;
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
buildInputs = if stdenv.isDarwin then [ iconv ] else null;
|
||||
```
|
||||
|
||||
As an exception, an explicit conditional expression with null can be used when fixing a important bug without triggering a mass rebuild.
|
||||
If this is done a follow up pull request _should_ be created to change the code to `lib.optional(s)`.
|
||||
|
|
21
third_party/nixpkgs/README.md
vendored
21
third_party/nixpkgs/README.md
vendored
|
@ -70,26 +70,7 @@ Linux distribution. The [GitHub Insights](https://github.com/NixOS/nixpkgs/pulse
|
|||
page gives a sense of the project activity.
|
||||
|
||||
Community contributions are always welcome through GitHub Issues and
|
||||
Pull Requests. When pull requests are made, our tooling automation bot,
|
||||
[OfBorg](https://github.com/NixOS/ofborg) will perform various checks
|
||||
to help ensure expression quality.
|
||||
|
||||
The *Nixpkgs maintainers* are people who have assigned themselves to
|
||||
maintain specific individual packages. We encourage people who care
|
||||
about a package to assign themselves as a maintainer. When a pull
|
||||
request is made against a package, OfBorg will notify the appropriate
|
||||
maintainer(s). The *Nixpkgs committers* are people who have been given
|
||||
permission to merge.
|
||||
|
||||
Most contributions are based on and merged into these branches:
|
||||
|
||||
* `master` is the main branch where all small contributions go
|
||||
* `staging` is branched from master, changes that have a big impact on
|
||||
Hydra builds go to this branch
|
||||
* `staging-next` is branched from staging and only fixes to stabilize
|
||||
and security fixes with a big impact on Hydra builds should be
|
||||
contributed to this branch. This branch is merged into master when
|
||||
deemed of sufficiently high quality
|
||||
Pull Requests.
|
||||
|
||||
For more information about contributing to the project, please visit
|
||||
the [contributing page](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
|
111
third_party/nixpkgs/doc/README.md
vendored
111
third_party/nixpkgs/doc/README.md
vendored
|
@ -1,5 +1,4 @@
|
|||
|
||||
# Nixpkgs/doc
|
||||
# Contributing to the Nixpkgs manual
|
||||
|
||||
This directory houses the sources files for the Nixpkgs manual.
|
||||
|
||||
|
@ -7,6 +6,110 @@ You can find the [rendered documentation for Nixpkgs `unstable` on nixos.org](ht
|
|||
|
||||
[Docs for Nixpkgs stable](https://nixos.org/manual/nixpkgs/stable/) are also available.
|
||||
|
||||
If you want to contribute to the documentation, [here's how to do it](https://nixos.org/manual/nixpkgs/unstable/#chap-contributing).
|
||||
|
||||
If you're only getting started with Nix, go to [nixos.org/learn](https://nixos.org/learn).
|
||||
|
||||
## Contributing to this documentation
|
||||
|
||||
You can quickly check your edits with `nix-build`:
|
||||
|
||||
```ShellSession
|
||||
$ cd /path/to/nixpkgs
|
||||
$ nix-build doc
|
||||
```
|
||||
|
||||
If the build succeeds, the manual will be in `./result/share/doc/nixpkgs/manual.html`.
|
||||
|
||||
### devmode
|
||||
|
||||
The shell in the manual source directory makes available a command, `devmode`.
|
||||
It is a daemon, that:
|
||||
1. watches the manual's source for changes and when they occur — rebuilds
|
||||
2. HTTP serves the manual, injecting a script that triggers reload on changes
|
||||
3. opens the manual in the default browser
|
||||
|
||||
## Syntax
|
||||
|
||||
As per [RFC 0072](https://github.com/NixOS/rfcs/pull/72), all new documentation content should be written in [CommonMark](https://commonmark.org/) Markdown dialect.
|
||||
|
||||
Additional syntax extensions are available, all of which can be used in NixOS option documentation. The following extensions are currently used:
|
||||
|
||||
#### Tables
|
||||
|
||||
Tables, using the [GitHub-flavored Markdown syntax](https://github.github.com/gfm/#tables-extension-).
|
||||
|
||||
#### Anchors
|
||||
|
||||
Explicitly defined **anchors** on headings, to allow linking to sections. These should be always used, to ensure the anchors can be linked even when the heading text changes, and to prevent conflicts between [automatically assigned identifiers](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/auto_identifiers.md).
|
||||
|
||||
It uses the widely compatible [header attributes](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/attributes.md) syntax:
|
||||
|
||||
```markdown
|
||||
## Syntax {#sec-contributing-markup}
|
||||
```
|
||||
|
||||
> **Note**
|
||||
> NixOS option documentation does not support headings in general.
|
||||
|
||||
#### Inline Anchors
|
||||
|
||||
Allow linking arbitrary place in the text (e.g. individual list items, sentences…).
|
||||
|
||||
They are defined using a hybrid of the link syntax with the attributes syntax known from headings, called [bracketed spans](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/bracketed_spans.md):
|
||||
|
||||
```markdown
|
||||
- []{#ssec-gnome-hooks-glib} `glib` setup hook will populate `GSETTINGS_SCHEMAS_PATH` and then `wrapGAppsHook` will prepend it to `XDG_DATA_DIRS`.
|
||||
```
|
||||
|
||||
#### Automatic links
|
||||
|
||||
If you **omit a link text** for a link pointing to a section, the text will be substituted automatically. For example `[](#chap-contributing)`.
|
||||
|
||||
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/using/syntax.html#targets-and-cross-referencing).
|
||||
|
||||
#### Roles
|
||||
|
||||
If you want to link to a man page, you can use `` {manpage}`nix.conf(5)` ``. The references will turn into links when a mapping exists in [`doc/manpage-urls.json`](./manpage-urls.json).
|
||||
|
||||
A few markups for other kinds of literals are also available:
|
||||
|
||||
- `` {command}`rm -rfi` ``
|
||||
- `` {env}`XDG_DATA_DIRS` ``
|
||||
- `` {file}`/etc/passwd` ``
|
||||
- `` {option}`networking.useDHCP` ``
|
||||
- `` {var}`/etc/passwd` ``
|
||||
|
||||
These literal kinds are used mostly in NixOS option documentation.
|
||||
|
||||
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax.
|
||||
|
||||
#### Admonitions
|
||||
|
||||
Set off from the text to bring attention to something.
|
||||
|
||||
It uses pandoc’s [fenced `div`s syntax](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/fenced_divs.md):
|
||||
|
||||
```markdown
|
||||
::: {.warning}
|
||||
This is a warning
|
||||
:::
|
||||
```
|
||||
|
||||
The following are supported:
|
||||
|
||||
- [`caution`](https://tdg.docbook.org/tdg/5.0/caution.html)
|
||||
- [`important`](https://tdg.docbook.org/tdg/5.0/important.html)
|
||||
- [`note`](https://tdg.docbook.org/tdg/5.0/note.html)
|
||||
- [`tip`](https://tdg.docbook.org/tdg/5.0/tip.html)
|
||||
- [`warning`](https://tdg.docbook.org/tdg/5.0/warning.html)
|
||||
|
||||
#### [Definition lists](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/definition_lists.md)
|
||||
|
||||
For defining a group of terms:
|
||||
|
||||
```markdown
|
||||
pear
|
||||
: green or yellow bulbous fruit
|
||||
|
||||
watermelon
|
||||
: green fruit with red flesh
|
||||
```
|
||||
|
|
|
@ -103,14 +103,14 @@ You can install it like any other packages via `nix-env -iA myEmacs`. However, t
|
|||
|
||||
This provides a fairly full Emacs start file. It will load in addition to the user's personal config. You can always disable it by passing `-q` to the Emacs command.
|
||||
|
||||
Sometimes `emacs.pkgs.withPackages` is not enough, as this package set has some priorities imposed on packages (with the lowest priority assigned to Melpa Unstable, and the highest for packages manually defined in `pkgs/top-level/emacs-packages.nix`). But you can't control these priorities when some package is installed as a dependency. You can override it on a per-package-basis, providing all the required dependencies manually, but it's tedious and there is always a possibility that an unwanted dependency will sneak in through some other package. To completely override such a package, you can use `overrideScope'`.
|
||||
Sometimes `emacs.pkgs.withPackages` is not enough, as this package set has some priorities imposed on packages (with the lowest priority assigned to Melpa Unstable, and the highest for packages manually defined in `pkgs/top-level/emacs-packages.nix`). But you can't control these priorities when some package is installed as a dependency. You can override it on a per-package-basis, providing all the required dependencies manually, but it's tedious and there is always a possibility that an unwanted dependency will sneak in through some other package. To completely override such a package, you can use `overrideScope`.
|
||||
|
||||
```nix
|
||||
overrides = self: super: rec {
|
||||
haskell-mode = self.melpaPackages.haskell-mode;
|
||||
...
|
||||
};
|
||||
((emacsPackagesFor emacs).overrideScope' overrides).withPackages
|
||||
((emacsPackagesFor emacs).overrideScope overrides).withPackages
|
||||
(p: with p; [
|
||||
# here both these package will use haskell-mode of our own choice
|
||||
ghc-mod
|
||||
|
|
|
@ -1,693 +1,63 @@
|
|||
# Coding conventions {#chap-conventions}
|
||||
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Syntax {#sec-syntax}
|
||||
|
||||
- Use 2 spaces of indentation per indentation level in Nix expressions, 4 spaces in shell scripts.
|
||||
|
||||
- Do not use tab characters, i.e. configure your editor to use soft tabs. For instance, use `(setq-default indent-tabs-mode nil)` in Emacs. Everybody has different tab settings so it’s asking for trouble.
|
||||
|
||||
- Use `lowerCamelCase` for variable names, not `UpperCamelCase`. Note, this rule does not apply to package attribute names, which instead follow the rules in [](#sec-package-naming).
|
||||
|
||||
- Function calls with attribute set arguments are written as
|
||||
|
||||
```nix
|
||||
foo {
|
||||
arg = ...;
|
||||
}
|
||||
```
|
||||
|
||||
not
|
||||
|
||||
```nix
|
||||
foo
|
||||
{
|
||||
arg = ...;
|
||||
}
|
||||
```
|
||||
|
||||
Also fine is
|
||||
|
||||
```nix
|
||||
foo { arg = ...; }
|
||||
```
|
||||
|
||||
if it's a short call.
|
||||
|
||||
- In attribute sets or lists that span multiple lines, the attribute names or list elements should be aligned:
|
||||
|
||||
```nix
|
||||
# A long list.
|
||||
list = [
|
||||
elem1
|
||||
elem2
|
||||
elem3
|
||||
];
|
||||
|
||||
# A long attribute set.
|
||||
attrs = {
|
||||
attr1 = short_expr;
|
||||
attr2 =
|
||||
if true then big_expr else big_expr;
|
||||
};
|
||||
|
||||
# Combined
|
||||
listOfAttrs = [
|
||||
{
|
||||
attr1 = 3;
|
||||
attr2 = "fff";
|
||||
}
|
||||
{
|
||||
attr1 = 5;
|
||||
attr2 = "ggg";
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
- Short lists or attribute sets can be written on one line:
|
||||
|
||||
```nix
|
||||
# A short list.
|
||||
list = [ elem1 elem2 elem3 ];
|
||||
|
||||
# A short set.
|
||||
attrs = { x = 1280; y = 1024; };
|
||||
```
|
||||
|
||||
- Breaking in the middle of a function argument can give hard-to-read code, like
|
||||
|
||||
```nix
|
||||
someFunction { x = 1280;
|
||||
y = 1024; } otherArg
|
||||
yetAnotherArg
|
||||
```
|
||||
|
||||
(especially if the argument is very large, spanning multiple lines).
|
||||
|
||||
Better:
|
||||
|
||||
```nix
|
||||
someFunction
|
||||
{ x = 1280; y = 1024; }
|
||||
otherArg
|
||||
yetAnotherArg
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```nix
|
||||
let res = { x = 1280; y = 1024; };
|
||||
in someFunction res otherArg yetAnotherArg
|
||||
```
|
||||
|
||||
- The bodies of functions, asserts, and withs are not indented to prevent a lot of superfluous indentation levels, i.e.
|
||||
|
||||
```nix
|
||||
{ arg1, arg2 }:
|
||||
assert system == "i686-linux";
|
||||
stdenv.mkDerivation { ...
|
||||
```
|
||||
|
||||
not
|
||||
|
||||
```nix
|
||||
{ arg1, arg2 }:
|
||||
assert system == "i686-linux";
|
||||
stdenv.mkDerivation { ...
|
||||
```
|
||||
|
||||
- Function formal arguments are written as:
|
||||
|
||||
```nix
|
||||
{ arg1, arg2, arg3 }:
|
||||
```
|
||||
|
||||
but if they don't fit on one line they're written as:
|
||||
|
||||
```nix
|
||||
{ arg1, arg2, arg3
|
||||
, arg4, ...
|
||||
, # Some comment...
|
||||
argN
|
||||
}:
|
||||
```
|
||||
|
||||
- Functions should list their expected arguments as precisely as possible. That is, write
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl }: ...
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
args: with args; ...
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```nix
|
||||
{ stdenv, fetchurl, perl, ... }: ...
|
||||
```
|
||||
|
||||
For functions that are truly generic in the number of arguments (such as wrappers around `mkDerivation`) that have some required arguments, you should write them using an `@`-pattern:
|
||||
|
||||
```nix
|
||||
{ stdenv, doCoverageAnalysis ? false, ... } @ args:
|
||||
|
||||
stdenv.mkDerivation (args // {
|
||||
... if doCoverageAnalysis then "bla" else "" ...
|
||||
})
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
args:
|
||||
|
||||
args.stdenv.mkDerivation (args // {
|
||||
... if args ? doCoverageAnalysis && args.doCoverageAnalysis then "bla" else "" ...
|
||||
})
|
||||
```
|
||||
|
||||
- Unnecessary string conversions should be avoided. Do
|
||||
|
||||
```nix
|
||||
rev = version;
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
rev = "${version}";
|
||||
```
|
||||
|
||||
- Building lists conditionally _should_ be done with `lib.optional(s)` instead of using `if cond then [ ... ] else null` or `if cond then [ ... ] else [ ]`.
|
||||
|
||||
```nix
|
||||
buildInputs = lib.optional stdenv.isDarwin iconv;
|
||||
```
|
||||
|
||||
instead of
|
||||
|
||||
```nix
|
||||
buildInputs = if stdenv.isDarwin then [ iconv ] else null;
|
||||
```
|
||||
|
||||
As an exception, an explicit conditional expression with null can be used when fixing a important bug without triggering a mass rebuild.
|
||||
If this is done a follow up pull request _should_ be created to change the code to `lib.optional(s)`.
|
||||
|
||||
- Arguments should be listed in the order they are used, with the exception of `lib`, which always goes first.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Package naming {#sec-package-naming}
|
||||
|
||||
The key words _must_, _must not_, _required_, _shall_, _shall not_, _should_, _should not_, _recommended_, _may_, and _optional_ in this section are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119). Only _emphasized_ words are to be interpreted in this way.
|
||||
|
||||
In Nixpkgs, there are generally three different names associated with a package:
|
||||
|
||||
- The `pname` attribute of the derivation. This is what most users see, in particular when using `nix-env`.
|
||||
|
||||
- The variable name used for the instantiated package in `all-packages.nix`, and when passing it as a dependency to other functions. Typically this is called the _package attribute name_. This is what Nix expression authors see. It can also be used when installing using `nix-env -iA`.
|
||||
|
||||
- The filename for (the directory containing) the Nix expression.
|
||||
|
||||
Most of the time, these are the same. For instance, the package `e2fsprogs` has a `pname` attribute `"e2fsprogs"`, is bound to the variable name `e2fsprogs` in `all-packages.nix`, and the Nix expression is in `pkgs/os-specific/linux/e2fsprogs/default.nix`.
|
||||
|
||||
There are a few naming guidelines:
|
||||
|
||||
- The `pname` attribute _should_ be identical to the upstream package name.
|
||||
|
||||
- The `pname` and the `version` attribute _must not_ contain uppercase letters — e.g., `"mplayer" instead of `"MPlayer"`.
|
||||
|
||||
- The `version` attribute _must_ start with a digit e.g`"0.3.1rc2".
|
||||
|
||||
- If a package is a commit from a repository without a version assigned, then the `version` attribute _should_ be the latest upstream version preceding that commit, followed by `-unstable-` and the date of the (fetched) commit. The date _must_ be in `"YYYY-MM-DD"` format.
|
||||
|
||||
Example: Given a project had its latest releases `2.2` in November 2021, and `3.0` in January 2022, a commit authored on March 15, 2022 for an upcoming bugfix release `2.2.1` would have `version = "2.2-unstable-2022-03-15"`.
|
||||
|
||||
- Dashes in the package `pname` _should_ be preserved in new variable names, rather than converted to underscores or camel cased — e.g., `http-parser` instead of `http_parser` or `httpParser`. The hyphenated style is preferred in all three package names.
|
||||
|
||||
- If there are multiple versions of a package, this _should_ be reflected in the variable names in `all-packages.nix`, e.g. `json-c_0_9` and `json-c_0_11`. If there is an obvious “default” version, make an attribute like `json-c = json-c_0_9;`. See also [](#sec-versioning)
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## File naming and organisation {#sec-organisation}
|
||||
|
||||
Names of files and directories should be in lowercase, with dashes between words — not in camel case. For instance, it should be `all-packages.nix`, not `allPackages.nix` or `AllPackages.nix`.
|
||||
|
||||
### Hierarchy {#sec-hierarchy}
|
||||
|
||||
Each package should be stored in its own directory somewhere in the `pkgs/` tree, i.e. in `pkgs/category/subcategory/.../pkgname`. Below are some rules for picking the right category for a package. Many packages fall under several categories; what matters is the _primary_ purpose of a package. For example, the `libxml2` package builds both a library and some tools; but it’s a library foremost, so it goes under `pkgs/development/libraries`.
|
||||
|
||||
When in doubt, consider refactoring the `pkgs/` tree, e.g. creating new categories or splitting up an existing category.
|
||||
|
||||
**If it’s used to support _software development_:**
|
||||
|
||||
- **If it’s a _library_ used by other packages:**
|
||||
|
||||
- `development/libraries` (e.g. `libxml2`)
|
||||
|
||||
- **If it’s a _compiler_:**
|
||||
|
||||
- `development/compilers` (e.g. `gcc`)
|
||||
|
||||
- **If it’s an _interpreter_:**
|
||||
|
||||
- `development/interpreters` (e.g. `guile`)
|
||||
|
||||
- **If it’s a (set of) development _tool(s)_:**
|
||||
|
||||
- **If it’s a _parser generator_ (including lexers):**
|
||||
|
||||
- `development/tools/parsing` (e.g. `bison`, `flex`)
|
||||
|
||||
- **If it’s a _build manager_:**
|
||||
|
||||
- `development/tools/build-managers` (e.g. `gnumake`)
|
||||
|
||||
- **If it’s a _language server_:**
|
||||
|
||||
- `development/tools/language-servers` (e.g. `ccls` or `rnix-lsp`)
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `development/tools/misc` (e.g. `binutils`)
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `development/misc`
|
||||
|
||||
**If it’s a (set of) _tool(s)_:**
|
||||
|
||||
(A tool is a relatively small program, especially one intended to be used non-interactively.)
|
||||
|
||||
- **If it’s for _networking_:**
|
||||
|
||||
- `tools/networking` (e.g. `wget`)
|
||||
|
||||
- **If it’s for _text processing_:**
|
||||
|
||||
- `tools/text` (e.g. `diffutils`)
|
||||
|
||||
- **If it’s a _system utility_, i.e., something related or essential to the operation of a system:**
|
||||
|
||||
- `tools/system` (e.g. `cron`)
|
||||
|
||||
- **If it’s an _archiver_ (which may include a compression function):**
|
||||
|
||||
- `tools/archivers` (e.g. `zip`, `tar`)
|
||||
|
||||
- **If it’s a _compression_ program:**
|
||||
|
||||
- `tools/compression` (e.g. `gzip`, `bzip2`)
|
||||
|
||||
- **If it’s a _security_-related program:**
|
||||
|
||||
- `tools/security` (e.g. `nmap`, `gnupg`)
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `tools/misc`
|
||||
|
||||
**If it’s a _shell_:**
|
||||
|
||||
- `shells` (e.g. `bash`)
|
||||
|
||||
**If it’s a _server_:**
|
||||
|
||||
- **If it’s a web server:**
|
||||
|
||||
- `servers/http` (e.g. `apache-httpd`)
|
||||
|
||||
- **If it’s an implementation of the X Windowing System:**
|
||||
|
||||
- `servers/x11` (e.g. `xorg` — this includes the client libraries and programs)
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `servers/misc`
|
||||
|
||||
**If it’s a _desktop environment_:**
|
||||
|
||||
- `desktops` (e.g. `kde`, `gnome`, `enlightenment`)
|
||||
|
||||
**If it’s a _window manager_:**
|
||||
|
||||
- `applications/window-managers` (e.g. `awesome`, `stumpwm`)
|
||||
|
||||
**If it’s an _application_:**
|
||||
|
||||
A (typically large) program with a distinct user interface, primarily used interactively.
|
||||
|
||||
- **If it’s a _version management system_:**
|
||||
|
||||
- `applications/version-management` (e.g. `subversion`)
|
||||
|
||||
- **If it’s a _terminal emulator_:**
|
||||
|
||||
- `applications/terminal-emulators` (e.g. `alacritty` or `rxvt` or `termite`)
|
||||
|
||||
- **If it’s a _file manager_:**
|
||||
|
||||
- `applications/file-managers` (e.g. `mc` or `ranger` or `pcmanfm`)
|
||||
|
||||
- **If it’s for _video playback / editing_:**
|
||||
|
||||
- `applications/video` (e.g. `vlc`)
|
||||
|
||||
- **If it’s for _graphics viewing / editing_:**
|
||||
|
||||
- `applications/graphics` (e.g. `gimp`)
|
||||
|
||||
- **If it’s for _networking_:**
|
||||
|
||||
- **If it’s a _mailreader_:**
|
||||
|
||||
- `applications/networking/mailreaders` (e.g. `thunderbird`)
|
||||
|
||||
- **If it’s a _newsreader_:**
|
||||
|
||||
- `applications/networking/newsreaders` (e.g. `pan`)
|
||||
|
||||
- **If it’s a _web browser_:**
|
||||
|
||||
- `applications/networking/browsers` (e.g. `firefox`)
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `applications/networking/misc`
|
||||
|
||||
- **Else:**
|
||||
|
||||
- `applications/misc`
|
||||
|
||||
**If it’s _data_ (i.e., does not have a straight-forward executable semantics):**
|
||||
|
||||
- **If it’s a _font_:**
|
||||
|
||||
- `data/fonts`
|
||||
|
||||
- **If it’s an _icon theme_:**
|
||||
|
||||
- `data/icons`
|
||||
|
||||
- **If it’s related to _SGML/XML processing_:**
|
||||
|
||||
- **If it’s an _XML DTD_:**
|
||||
|
||||
- `data/sgml+xml/schemas/xml-dtd` (e.g. `docbook`)
|
||||
|
||||
- **If it’s an _XSLT stylesheet_:**
|
||||
|
||||
(Okay, these are executable...)
|
||||
|
||||
- `data/sgml+xml/stylesheets/xslt` (e.g. `docbook-xsl`)
|
||||
|
||||
- **If it’s a _theme_ for a _desktop environment_, a _window manager_ or a _display manager_:**
|
||||
|
||||
- `data/themes`
|
||||
|
||||
**If it’s a _game_:**
|
||||
|
||||
- `games`
|
||||
|
||||
**Else:**
|
||||
|
||||
- `misc`
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Versioning {#sec-versioning}
|
||||
|
||||
Because every version of a package in Nixpkgs creates a potential maintenance burden, old versions of a package should not be kept unless there is a good reason to do so. For instance, Nixpkgs contains several versions of GCC because other packages don’t build with the latest version of GCC. Other examples are having both the latest stable and latest pre-release version of a package, or to keep several major releases of an application that differ significantly in functionality.
|
||||
|
||||
If there is only one version of a package, its Nix expression should be named `e2fsprogs/default.nix`. If there are multiple versions, this should be reflected in the filename, e.g. `e2fsprogs/1.41.8.nix` and `e2fsprogs/1.41.9.nix`. The version in the filename should leave out unnecessary detail. For instance, if we keep the latest Firefox 2.0.x and 3.5.x versions in Nixpkgs, they should be named `firefox/2.0.nix` and `firefox/3.5.nix`, respectively (which, at a given point, might contain versions `2.0.0.20` and `3.5.4`). If a version requires many auxiliary files, you can use a subdirectory for each version, e.g. `firefox/2.0/default.nix` and `firefox/3.5/default.nix`.
|
||||
|
||||
All versions of a package _must_ be included in `all-packages.nix` to make sure that they evaluate correctly.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Fetching Sources {#sec-sources}
|
||||
|
||||
There are multiple ways to fetch a package source in nixpkgs. The general guideline is that you should package reproducible sources with a high degree of availability. Right now there is only one fetcher which has mirroring support and that is `fetchurl`. Note that you should also prefer protocols which have a corresponding proxy environment variable.
|
||||
|
||||
You can find many source fetch helpers in `pkgs/build-support/fetch*`.
|
||||
|
||||
In the file `pkgs/top-level/all-packages.nix` you can find fetch helpers, these have names on the form `fetchFrom*`. The intention of these are to provide snapshot fetches but using the same api as some of the version controlled fetchers from `pkgs/build-support/`. As an example going from bad to good:
|
||||
|
||||
- Bad: Uses `git://` which won't be proxied.
|
||||
|
||||
```nix
|
||||
src = fetchgit {
|
||||
url = "git@github.com:NixOS/nix.git"
|
||||
url = "git://github.com/NixOS/nix.git";
|
||||
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
||||
hash = "sha256-7D4m+saJjbSFP5hOwpQq2FGR2rr+psQMTcyb1ZvtXsQ=";
|
||||
}
|
||||
```
|
||||
|
||||
- Better: This is ok, but an archive fetch will still be faster.
|
||||
|
||||
```nix
|
||||
src = fetchgit {
|
||||
url = "https://github.com/NixOS/nix.git";
|
||||
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
||||
hash = "sha256-7D4m+saJjbSFP5hOwpQq2FGR2rr+psQMTcyb1ZvtXsQ=";
|
||||
}
|
||||
```
|
||||
|
||||
- Best: Fetches a snapshot archive and you get the rev you want.
|
||||
|
||||
```nix
|
||||
src = fetchFromGitHub {
|
||||
owner = "NixOS";
|
||||
repo = "nix";
|
||||
rev = "1f795f9f44607cc5bec70d1300150bfefcef2aae";
|
||||
hash = "sha256-7D4m+saJjbSFP5hOwpQq2FGR2rr+psQMTcyb1ZvtXsQ=";
|
||||
}
|
||||
```
|
||||
|
||||
When fetching from GitHub, commits must always be referenced by their full commit hash. This is because GitHub shares commit hashes among all forks and returns `404 Not Found` when a short commit hash is ambiguous. It already happens for some short, 6-character commit hashes in `nixpkgs`.
|
||||
It is a practical vector for a denial-of-service attack by pushing large amounts of auto generated commits into forks and was already [demonstrated against GitHub Actions Beta](https://blog.teddykatz.com/2019/11/12/github-actions-dos.html).
|
||||
|
||||
Find the value to put as `hash` by running `nix-shell -p nix-prefetch-github --run "nix-prefetch-github --rev 1f795f9f44607cc5bec70d1300150bfefcef2aae NixOS nix"`.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Obtaining source hash {#sec-source-hashes}
|
||||
|
||||
Preferred source hash type is sha256. There are several ways to get it.
|
||||
|
||||
1. Prefetch URL (with `nix-prefetch-XXX URL`, where `XXX` is one of `url`, `git`, `hg`, `cvs`, `bzr`, `svn`). Hash is printed to stdout.
|
||||
|
||||
2. Prefetch by package source (with `nix-prefetch-url '<nixpkgs>' -A PACKAGE.src`, where `PACKAGE` is package attribute name). Hash is printed to stdout.
|
||||
|
||||
This works well when you've upgraded existing package version and want to find out new hash, but is useless if package can't be accessed by attribute or package has multiple sources (`.srcs`, architecture-dependent sources, etc).
|
||||
|
||||
3. Upstream provided hash: use it when upstream provides `sha256` or `sha512` (when upstream provides `md5`, don't use it, compute `sha256` instead).
|
||||
|
||||
A little nuance is that `nix-prefetch-*` tools produce hash encoded with `base32`, but upstream usually provides hexadecimal (`base16`) encoding. Fetchers understand both formats. Nixpkgs does not standardize on any one format.
|
||||
|
||||
You can convert between formats with nix-hash, for example:
|
||||
|
||||
```ShellSession
|
||||
$ nix-hash --type sha256 --to-base32 HASH
|
||||
```
|
||||
|
||||
4. Extracting hash from local source tarball can be done with `sha256sum`. Use `nix-prefetch-url file:///path/to/tarball` if you want base32 hash.
|
||||
|
||||
5. Fake hash: set the hash to one of
|
||||
|
||||
- `""`
|
||||
- `lib.fakeHash`
|
||||
- `lib.fakeSha256`
|
||||
- `lib.fakeSha512`
|
||||
|
||||
in the package expression, attempt build and extract correct hash from error messages.
|
||||
|
||||
::: {.warning}
|
||||
You must use one of these four fake hashes and not some arbitrarily-chosen hash.
|
||||
|
||||
See [](#sec-source-hashes-security).
|
||||
:::
|
||||
|
||||
This is last resort method when reconstructing source URL is non-trivial and `nix-prefetch-url -A` isn’t applicable (for example, [one of `kodi` dependencies](https://github.com/NixOS/nixpkgs/blob/d2ab091dd308b99e4912b805a5eb088dd536adb9/pkgs/applications/video/kodi/default.nix#L73)). The easiest way then would be replace hash with a fake one and rebuild. Nix build will fail and error message will contain desired hash.
|
||||
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Obtaining hashes securely {#sec-source-hashes-security}
|
||||
|
||||
Let's say Man-in-the-Middle (MITM) sits close to your network. Then instead of fetching source you can fetch malware, and instead of source hash you get hash of malware. Here are security considerations for this scenario:
|
||||
|
||||
- `http://` URLs are not secure to prefetch hash from;
|
||||
|
||||
- hashes from upstream (in method 3) should be obtained via secure protocol;
|
||||
|
||||
- `https://` URLs are secure in methods 1, 2, 3;
|
||||
|
||||
- `https://` URLs are secure in method 5 *only if* you use one of the listed fake hashes. If you use any other hash, `fetchurl` will pass `--insecure` to `curl` and may then degrade to HTTP in case of TLS certificate expiration.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Patches {#sec-patches}
|
||||
|
||||
Patches available online should be retrieved using `fetchpatch`.
|
||||
|
||||
```nix
|
||||
patches = [
|
||||
(fetchpatch {
|
||||
name = "fix-check-for-using-shared-freetype-lib.patch";
|
||||
url = "http://git.ghostscript.com/?p=ghostpdl.git;a=patch;h=8f5d285";
|
||||
hash = "sha256-uRcxaCjd+WAuGrXOmGfFeu79cUILwkRdBu48mwcBE7g=";
|
||||
})
|
||||
];
|
||||
```
|
||||
|
||||
Otherwise, you can add a `.patch` file to the `nixpkgs` repository. In the interest of keeping our maintenance burden to a minimum, only patches that are unique to `nixpkgs` should be added in this way.
|
||||
|
||||
If a patch is available online but does not cleanly apply, it can be modified in some fixed ways by using additional optional arguments for `fetchpatch`. Check [](#fetchpatch) for details.
|
||||
|
||||
```nix
|
||||
patches = [ ./0001-changes.patch ];
|
||||
```
|
||||
|
||||
If you do need to do create this sort of patch file, one way to do so is with git:
|
||||
|
||||
1. Move to the root directory of the source code you're patching.
|
||||
|
||||
```ShellSession
|
||||
$ cd the/program/source
|
||||
```
|
||||
|
||||
2. If a git repository is not already present, create one and stage all of the source files.
|
||||
|
||||
```ShellSession
|
||||
$ git init
|
||||
$ git add .
|
||||
```
|
||||
|
||||
3. Edit some files to make whatever changes need to be included in the patch.
|
||||
|
||||
4. Use git to create a diff, and pipe the output to a patch file:
|
||||
|
||||
```ShellSession
|
||||
$ git diff -a > nixpkgs/pkgs/the/package/0001-changes.patch
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Package tests {#sec-package-tests}
|
||||
|
||||
Tests are important to ensure quality and make reviews and automatic updates easy.
|
||||
|
||||
The following types of tests exists:
|
||||
|
||||
* [NixOS **module tests**](https://nixos.org/manual/nixos/stable/#sec-nixos-tests), which spawn one or more NixOS VMs. They exercise both NixOS modules and the packaged programs used within them. For example, a NixOS module test can start a web server VM running the `nginx` module, and a client VM running `curl` or a graphical `firefox`, and test that they can talk to each other and display the correct content.
|
||||
* Nix **package tests** are a lightweight alternative to NixOS module tests. They should be used to create simple integration tests for packages, but cannot test NixOS services, and some programs with graphical user interfaces may also be difficult to test with them.
|
||||
* The **`checkPhase` of a package**, which should execute the unit tests that are included in the source code of a package.
|
||||
|
||||
Here in the nixpkgs manual we describe mostly _package tests_; for _module tests_ head over to the corresponding [section in the NixOS manual](https://nixos.org/manual/nixos/stable/#sec-nixos-tests).
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Writing inline package tests {#ssec-inline-package-tests-writing}
|
||||
|
||||
For very simple tests, they can be written inline:
|
||||
|
||||
```nix
|
||||
{ …, yq-go }:
|
||||
|
||||
buildGoModule rec {
|
||||
…
|
||||
|
||||
passthru.tests = {
|
||||
simple = runCommand "${pname}-test" {} ''
|
||||
echo "test: 1" | ${yq-go}/bin/yq eval -j > $out
|
||||
[ "$(cat $out | tr -d $'\n ')" = '{"test":1}' ]
|
||||
'';
|
||||
};
|
||||
}
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Writing larger package tests {#ssec-package-tests-writing}
|
||||
|
||||
This is an example using the `phoronix-test-suite` package with the current best practices.
|
||||
|
||||
Add the tests in `passthru.tests` to the package definition like this:
|
||||
|
||||
```nix
|
||||
{ stdenv, lib, fetchurl, callPackage }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
…
|
||||
|
||||
passthru.tests = {
|
||||
simple-execution = callPackage ./tests.nix { };
|
||||
};
|
||||
|
||||
meta = { … };
|
||||
}
|
||||
```
|
||||
|
||||
Create `tests.nix` in the package directory:
|
||||
|
||||
```nix
|
||||
{ runCommand, phoronix-test-suite }:
|
||||
|
||||
let
|
||||
inherit (phoronix-test-suite) pname version;
|
||||
in
|
||||
|
||||
runCommand "${pname}-tests" { meta.timeout = 60; }
|
||||
''
|
||||
# automatic initial setup to prevent interactive questions
|
||||
${phoronix-test-suite}/bin/phoronix-test-suite enterprise-setup >/dev/null
|
||||
# get version of installed program and compare with package version
|
||||
if [[ `${phoronix-test-suite}/bin/phoronix-test-suite version` != *"${version}"* ]]; then
|
||||
echo "Error: program version does not match package version"
|
||||
exit 1
|
||||
fi
|
||||
# run dummy command
|
||||
${phoronix-test-suite}/bin/phoronix-test-suite dummy_module.dummy-command >/dev/null
|
||||
# needed for Nix to register the command as successful
|
||||
touch $out
|
||||
''
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Running package tests {#ssec-package-tests-running}
|
||||
|
||||
You can run these tests with:
|
||||
|
||||
```ShellSession
|
||||
$ cd path/to/nixpkgs
|
||||
$ nix-build -A phoronix-test-suite.tests
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Examples of package tests {#ssec-package-tests-examples}
|
||||
|
||||
Here are examples of package tests:
|
||||
|
||||
- [Jasmin compile test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/compilers/jasmin/test-assemble-hello-world/default.nix)
|
||||
- [Lobster compile test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/compilers/lobster/test-can-run-hello-world.nix)
|
||||
- [Spacy annotation test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/python-modules/spacy/annotation-test/default.nix)
|
||||
- [Libtorch test](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/science/math/libtorch/test/default.nix)
|
||||
- [Multiple tests for nanopb](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/nanopb/default.nix)
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Linking NixOS module tests to a package {#ssec-nixos-tests-linking}
|
||||
|
||||
Like [package tests](#ssec-package-tests-writing) as shown above, [NixOS module tests](https://nixos.org/manual/nixos/stable/#sec-nixos-tests) can also be linked to a package, so that the tests can be easily run when changing the related package.
|
||||
|
||||
For example, assuming we're packaging `nginx`, we can link its module test via `passthru.tests`:
|
||||
|
||||
```nix
|
||||
{ stdenv, lib, nixosTests }:
|
||||
|
||||
stdenv.mkDerivation {
|
||||
...
|
||||
|
||||
passthru.tests = {
|
||||
nginx = nixosTests.nginx;
|
||||
};
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Import From Derivation {#ssec-import-from-derivation}
|
||||
|
||||
Import From Derivation (IFD) is disallowed in Nixpkgs for performance reasons:
|
||||
[Hydra] evaluates the entire package set, and sequential builds during evaluation would increase evaluation times to become impractical.
|
||||
|
||||
[Hydra]: https://github.com/NixOS/hydra
|
||||
|
||||
Import From Derivation can be worked around in some cases by committing generated intermediate files to version control and reading those instead.
|
||||
|
||||
<!-- TODO: remove the following and link to Nix manual once https://github.com/NixOS/nix/pull/7332 is merged -->
|
||||
|
||||
See also [NixOS Wiki: Import From Derivation].
|
||||
|
||||
[NixOS Wiki: Import From Derivation]: https://nixos.wiki/wiki/Import_From_Derivation
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
|
|
@ -1,112 +1,11 @@
|
|||
# Contributing to this documentation {#chap-contributing}
|
||||
# Contributing to Nixpkgs documentation {#chap-contributing}
|
||||
|
||||
The sources of the Nixpkgs manual are in the [doc](https://github.com/NixOS/nixpkgs/tree/master/doc) subdirectory of the Nixpkgs repository.
|
||||
|
||||
You can quickly check your edits with `nix-build`:
|
||||
|
||||
```ShellSession
|
||||
$ cd /path/to/nixpkgs
|
||||
$ nix-build doc
|
||||
```
|
||||
|
||||
If the build succeeds, the manual will be in `./result/share/doc/nixpkgs/manual.html`.
|
||||
This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
|
||||
|
||||
## devmode {#sec-contributing-devmode}
|
||||
|
||||
The shell in the manual source directory makes available a command, `devmode`.
|
||||
It is a daemon, that:
|
||||
1. watches the manual's source for changes and when they occur — rebuilds
|
||||
2. HTTP serves the manual, injecting a script that triggers reload on changes
|
||||
3. opens the manual in the default browser
|
||||
This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
|
||||
|
||||
## Syntax {#sec-contributing-markup}
|
||||
|
||||
As per [RFC 0072](https://github.com/NixOS/rfcs/pull/72), all new documentation content should be written in [CommonMark](https://commonmark.org/) Markdown dialect.
|
||||
|
||||
Additional syntax extensions are available, all of which can be used in NixOS option documentation. The following extensions are currently used:
|
||||
|
||||
- []{#ssec-contributing-markup-anchors}
|
||||
Explicitly defined **anchors** on headings, to allow linking to sections. These should be always used, to ensure the anchors can be linked even when the heading text changes, and to prevent conflicts between [automatically assigned identifiers](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/auto_identifiers.md).
|
||||
|
||||
It uses the widely compatible [header attributes](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/attributes.md) syntax:
|
||||
|
||||
```markdown
|
||||
## Syntax {#sec-contributing-markup}
|
||||
```
|
||||
|
||||
::: {.note}
|
||||
NixOS option documentation does not support headings in general.
|
||||
:::
|
||||
|
||||
- []{#ssec-contributing-markup-anchors-inline}
|
||||
**Inline anchors**, which allow linking arbitrary place in the text (e.g. individual list items, sentences…).
|
||||
|
||||
They are defined using a hybrid of the link syntax with the attributes syntax known from headings, called [bracketed spans](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/bracketed_spans.md):
|
||||
|
||||
```markdown
|
||||
- []{#ssec-gnome-hooks-glib} `glib` setup hook will populate `GSETTINGS_SCHEMAS_PATH` and then `wrapGAppsHook` will prepend it to `XDG_DATA_DIRS`.
|
||||
```
|
||||
|
||||
- []{#ssec-contributing-markup-automatic-links}
|
||||
If you **omit a link text** for a link pointing to a section, the text will be substituted automatically. For example, `[](#chap-contributing)` will result in [](#chap-contributing).
|
||||
|
||||
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/using/syntax.html#targets-and-cross-referencing).
|
||||
|
||||
- []{#ssec-contributing-markup-inline-roles}
|
||||
If you want to link to a man page, you can use `` {manpage}`nix.conf(5)` ``, which will turn into {manpage}`nix.conf(5)`. The references will turn into links when a mapping exists in {file}`doc/manpage-urls.json`.
|
||||
|
||||
A few markups for other kinds of literals are also available:
|
||||
|
||||
- `` {command}`rm -rfi` `` turns into {command}`rm -rfi`
|
||||
- `` {env}`XDG_DATA_DIRS` `` turns into {env}`XDG_DATA_DIRS`
|
||||
- `` {file}`/etc/passwd` `` turns into {file}`/etc/passwd`
|
||||
- `` {option}`networking.useDHCP` `` turns into {option}`networking.useDHCP`
|
||||
- `` {var}`/etc/passwd` `` turns into {var}`/etc/passwd`
|
||||
|
||||
These literal kinds are used mostly in NixOS option documentation.
|
||||
|
||||
This syntax is taken from [MyST](https://myst-parser.readthedocs.io/en/latest/syntax/syntax.html#roles-an-in-line-extension-point). Though, the feature originates from [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-manpage) with slightly different syntax.
|
||||
|
||||
- []{#ssec-contributing-markup-admonitions}
|
||||
**Admonitions**, set off from the text to bring attention to something.
|
||||
|
||||
It uses pandoc’s [fenced `div`s syntax](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/fenced_divs.md):
|
||||
|
||||
```markdown
|
||||
::: {.warning}
|
||||
This is a warning
|
||||
:::
|
||||
```
|
||||
|
||||
which renders as
|
||||
|
||||
> ::: {.warning}
|
||||
> This is a warning.
|
||||
> :::
|
||||
|
||||
The following are supported:
|
||||
|
||||
- [`caution`](https://tdg.docbook.org/tdg/5.0/caution.html)
|
||||
- [`important`](https://tdg.docbook.org/tdg/5.0/important.html)
|
||||
- [`note`](https://tdg.docbook.org/tdg/5.0/note.html)
|
||||
- [`tip`](https://tdg.docbook.org/tdg/5.0/tip.html)
|
||||
- [`warning`](https://tdg.docbook.org/tdg/5.0/warning.html)
|
||||
|
||||
- []{#ssec-contributing-markup-definition-lists}
|
||||
[**Definition lists**](https://github.com/jgm/commonmark-hs/blob/master/commonmark-extensions/test/definition_lists.md), for defining a group of terms:
|
||||
|
||||
```markdown
|
||||
pear
|
||||
: green or yellow bulbous fruit
|
||||
|
||||
watermelon
|
||||
: green fruit with red flesh
|
||||
```
|
||||
|
||||
which renders as
|
||||
|
||||
> pear
|
||||
> : green or yellow bulbous fruit
|
||||
>
|
||||
> watermelon
|
||||
> : green fruit with red flesh
|
||||
This section has been moved to [doc/README.md](https://github.com/NixOS/nixpkgs/blob/master/doc/README.md).
|
||||
|
|
|
@ -1,77 +1,3 @@
|
|||
# Quick Start to Adding a Package {#chap-quick-start}
|
||||
|
||||
To add a package to Nixpkgs:
|
||||
|
||||
1. Checkout the Nixpkgs source tree:
|
||||
|
||||
```ShellSession
|
||||
$ git clone https://github.com/NixOS/nixpkgs
|
||||
$ cd nixpkgs
|
||||
```
|
||||
|
||||
2. Find a good place in the Nixpkgs tree to add the Nix expression for your package. For instance, a library package typically goes into `pkgs/development/libraries/pkgname`, while a web browser goes into `pkgs/applications/networking/browsers/pkgname`. See [](#sec-organisation) for some hints on the tree organisation. Create a directory for your package, e.g.
|
||||
|
||||
```ShellSession
|
||||
$ mkdir pkgs/development/libraries/libfoo
|
||||
```
|
||||
|
||||
3. In the package directory, create a Nix expression — a piece of code that describes how to build the package. In this case, it should be a _function_ that is called with the package dependencies as arguments, and returns a build of the package in the Nix store. The expression should usually be called `default.nix`.
|
||||
|
||||
```ShellSession
|
||||
$ emacs pkgs/development/libraries/libfoo/default.nix
|
||||
$ git add pkgs/development/libraries/libfoo/default.nix
|
||||
```
|
||||
|
||||
You can have a look at the existing Nix expressions under `pkgs/` to see how it’s done. Here are some good ones:
|
||||
|
||||
- GNU Hello: [`pkgs/applications/misc/hello/default.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/hello/default.nix). Trivial package, which specifies some `meta` attributes which is good practice.
|
||||
|
||||
- GNU cpio: [`pkgs/tools/archivers/cpio/default.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/archivers/cpio/default.nix). Also a simple package. The generic builder in `stdenv` does everything for you. It has no dependencies beyond `stdenv`.
|
||||
|
||||
- GNU Multiple Precision arithmetic library (GMP): [`pkgs/development/libraries/gmp/5.1.x.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/gmp/5.1.x.nix). Also done by the generic builder, but has a dependency on `m4`.
|
||||
|
||||
- Pan, a GTK-based newsreader: [`pkgs/applications/networking/newsreaders/pan/default.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/newsreaders/pan/default.nix). Has an optional dependency on `gtkspell`, which is only built if `spellCheck` is `true`.
|
||||
|
||||
- Apache HTTPD: [`pkgs/servers/http/apache-httpd/2.4.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/http/apache-httpd/2.4.nix). A bunch of optional features, variable substitutions in the configure flags, a post-install hook, and miscellaneous hackery.
|
||||
|
||||
- buildMozillaMach: [`pkgs/applications/networking/browser/firefox/common.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/browsers/firefox/common.nix). A reusable build function for Firefox, Thunderbird and Librewolf.
|
||||
|
||||
- JDiskReport, a Java utility: [`pkgs/tools/misc/jdiskreport/default.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/jdiskreport/default.nix). Nixpkgs doesn’t have a decent `stdenv` for Java yet so this is pretty ad-hoc.
|
||||
|
||||
- XML::Simple, a Perl module: [`pkgs/top-level/perl-packages.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/perl-packages.nix) (search for the `XMLSimple` attribute). Most Perl modules are so simple to build that they are defined directly in `perl-packages.nix`; no need to make a separate file for them.
|
||||
|
||||
- Adobe Reader: [`pkgs/applications/misc/adobe-reader/default.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/adobe-reader/default.nix). Shows how binary-only packages can be supported. In particular the [builder](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/misc/adobe-reader/builder.sh) uses `patchelf` to set the RUNPATH and ELF interpreter of the executables so that the right libraries are found at runtime.
|
||||
|
||||
Some notes:
|
||||
|
||||
- All [`meta`](#chap-meta) attributes are optional, but it’s still a good idea to provide at least the `description`, `homepage` and [`license`](#sec-meta-license).
|
||||
|
||||
- You can use `nix-prefetch-url url` to get the SHA-256 hash of source distributions. There are similar commands as `nix-prefetch-git` and `nix-prefetch-hg` available in `nix-prefetch-scripts` package.
|
||||
|
||||
- A list of schemes for `mirror://` URLs can be found in [`pkgs/build-support/fetchurl/mirrors.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/fetchurl/mirrors.nix).
|
||||
|
||||
The exact syntax and semantics of the Nix expression language, including the built-in function, are described in the Nix manual in the [chapter on writing Nix expressions](https://hydra.nixos.org/job/nix/trunk/tarball/latest/download-by-type/doc/manual/#chap-writing-nix-expressions).
|
||||
|
||||
4. Add a call to the function defined in the previous step to [`pkgs/top-level/all-packages.nix`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/top-level/all-packages.nix) with some descriptive name for the variable, e.g. `libfoo`.
|
||||
|
||||
```ShellSession
|
||||
$ emacs pkgs/top-level/all-packages.nix
|
||||
```
|
||||
|
||||
The attributes in that file are sorted by category (like “Development / Libraries”) that more-or-less correspond to the directory structure of Nixpkgs, and then by attribute name.
|
||||
|
||||
5. To test whether the package builds, run the following command from the root of the nixpkgs source tree:
|
||||
|
||||
```ShellSession
|
||||
$ nix-build -A libfoo
|
||||
```
|
||||
|
||||
where `libfoo` should be the variable name defined in the previous step. You may want to add the flag `-K` to keep the temporary build directory in case something fails. If the build succeeds, a symlink `./result` to the package in the Nix store is created.
|
||||
|
||||
6. If you want to install the package into your profile (optional), do
|
||||
|
||||
```ShellSession
|
||||
$ nix-env -f . -iA libfoo
|
||||
```
|
||||
|
||||
7. Optionally commit the new package and open a pull request [to nixpkgs](https://github.com/NixOS/nixpkgs/pulls), or use [the Patches category](https://discourse.nixos.org/t/about-the-patches-category/477) on Discourse for sending a patch without a GitHub account.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
|
|
@ -1,322 +1,35 @@
|
|||
# Reviewing contributions {#chap-reviewing-contributions}
|
||||
|
||||
::: {.warning}
|
||||
The following section is a draft, and the policy for reviewing is still being discussed in issues such as [#11166](https://github.com/NixOS/nixpkgs/issues/11166) and [#20836](https://github.com/NixOS/nixpkgs/issues/20836).
|
||||
:::
|
||||
|
||||
The Nixpkgs project receives a fairly high number of contributions via GitHub pull requests. Reviewing and approving these is an important task and a way to contribute to the project.
|
||||
|
||||
The high change rate of Nixpkgs makes any pull request that remains open for too long subject to conflicts that will require extra work from the submitter or the merger. Reviewing pull requests in a timely manner and being responsive to the comments is the key to avoid this issue. GitHub provides sort filters that can be used to see the [most recently](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc) and the [least recently](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-asc) updated pull requests. We highly encourage looking at [this list of ready to merge, unreviewed pull requests](https://github.com/NixOS/nixpkgs/pulls?q=is%3Apr+is%3Aopen+review%3Anone+status%3Asuccess+-label%3A%222.status%3A+work-in-progress%22+no%3Aproject+no%3Aassignee+no%3Amilestone).
|
||||
|
||||
When reviewing a pull request, please always be nice and polite. Controversial changes can lead to controversial opinions, but it is important to respect every community member and their work.
|
||||
|
||||
GitHub provides reactions as a simple and quick way to provide feedback to pull requests or any comments. The thumb-down reaction should be used with care and if possible accompanied with some explanation so the submitter has directions to improve their contribution.
|
||||
|
||||
Pull request reviews should include a list of what has been reviewed in a comment, so other reviewers and mergers can know the state of the review.
|
||||
|
||||
All the review template samples provided in this section are generic and meant as examples. Their usage is optional and the reviewer is free to adapt them to their liking.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Package updates {#reviewing-contributions-package-updates}
|
||||
|
||||
A package update is the most trivial and common type of pull request. These pull requests mainly consist of updating the version part of the package name and the source hash.
|
||||
|
||||
It can happen that non-trivial updates include patches or more complex changes.
|
||||
|
||||
Reviewing process:
|
||||
|
||||
- Ensure that the package versioning fits the guidelines.
|
||||
- Ensure that the commit text fits the guidelines.
|
||||
- Ensure that the package maintainers are notified.
|
||||
- [CODEOWNERS](https://help.github.com/articles/about-codeowners) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
|
||||
- Ensure that the meta field information is correct.
|
||||
- License can change with version updates, so it should be checked to match the upstream license.
|
||||
- If the package has no maintainer, a maintainer must be set. This can be the update submitter or a community member that accepts to take maintainership of the package.
|
||||
- Ensure that the code contains no typos.
|
||||
- Building the package locally.
|
||||
- pull requests are often targeted to the master or staging branch, and building the pull request locally when it is submitted can trigger many source builds.
|
||||
- It is possible to rebase the changes on nixos-unstable or nixpkgs-unstable for easier review by running the following commands from a nixpkgs clone.
|
||||
|
||||
```ShellSession
|
||||
$ git fetch origin nixos-unstable
|
||||
$ git fetch origin pull/PRNUMBER/head
|
||||
$ git rebase --onto nixos-unstable BASEBRANCH FETCH_HEAD
|
||||
```
|
||||
|
||||
- The first command fetches the nixos-unstable branch.
|
||||
- The second command fetches the pull request changes, `PRNUMBER` is the number at the end of the pull request title and `BASEBRANCH` the base branch of the pull request.
|
||||
- The third command rebases the pull request changes to the nixos-unstable branch.
|
||||
- The [nixpkgs-review](https://github.com/Mic92/nixpkgs-review) tool can be used to review a pull request content in a single command. `PRNUMBER` should be replaced by the number at the end of the pull request title. You can also provide the full github pull request url.
|
||||
|
||||
```ShellSession
|
||||
$ nix-shell -p nixpkgs-review --run "nixpkgs-review pr PRNUMBER"
|
||||
```
|
||||
- Running every binary.
|
||||
|
||||
Sample template for a package update review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] package name fits guidelines
|
||||
- [ ] package version fits guidelines
|
||||
- [ ] package build on ARCHITECTURE
|
||||
- [ ] executables tested on ARCHITECTURE
|
||||
- [ ] all depending packages build
|
||||
- [ ] patches have a comment describing either the upstream URL or a reason why the patch wasn't upstreamed
|
||||
- [ ] patches that are remotely available are fetched rather than vendored
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## New packages {#reviewing-contributions-new-packages}
|
||||
|
||||
New packages are a common type of pull requests. These pull requests consists in adding a new nix-expression for a package.
|
||||
|
||||
Review process:
|
||||
|
||||
- Ensure that the package versioning fits the guidelines.
|
||||
- Ensure that the commit name fits the guidelines.
|
||||
- Ensure that the meta fields contain correct information.
|
||||
- License must match the upstream license.
|
||||
- Platforms should be set (or the package will not get binary substitutes).
|
||||
- Maintainers must be set. This can be the package submitter or a community member that accepts taking up maintainership of the package.
|
||||
- Report detected typos.
|
||||
- Ensure the package source:
|
||||
- Uses mirror URLs when available.
|
||||
- Uses the most appropriate functions (e.g. packages from GitHub should use `fetchFromGitHub`).
|
||||
- Building the package locally.
|
||||
- Running every binary.
|
||||
|
||||
Sample template for a new package review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] package path fits guidelines
|
||||
- [ ] package name fits guidelines
|
||||
- [ ] package version fits guidelines
|
||||
- [ ] package build on ARCHITECTURE
|
||||
- [ ] executables tested on ARCHITECTURE
|
||||
- [ ] `meta.description` is set and fits guidelines
|
||||
- [ ] `meta.license` fits upstream license
|
||||
- [ ] `meta.platforms` is set
|
||||
- [ ] `meta.maintainers` is set
|
||||
- [ ] build time only dependencies are declared in `nativeBuildInputs`
|
||||
- [ ] source is fetched using the appropriate function
|
||||
- [ ] the list of `phases` is not overridden
|
||||
- [ ] when a phase (like `installPhase`) is overridden it starts with `runHook preInstall` and ends with `runHook postInstall`.
|
||||
- [ ] patches have a comment describing either the upstream URL or a reason why the patch wasn't upstreamed
|
||||
- [ ] patches that are remotely available are fetched rather than vendored
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Module updates {#reviewing-contributions-module-updates}
|
||||
|
||||
Module updates are submissions changing modules in some ways. These often contains changes to the options or introduce new options.
|
||||
|
||||
Reviewing process:
|
||||
|
||||
- Ensure that the module maintainers are notified.
|
||||
- [CODEOWNERS](https://help.github.com/articles/about-codeowners/) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
|
||||
- Ensure that the module tests, if any, are succeeding.
|
||||
- Ensure that the introduced options are correct.
|
||||
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
|
||||
- Description, default and example should be provided.
|
||||
- Ensure that option changes are backward compatible.
|
||||
- `mkRenamedOptionModuleWith` provides a way to make option changes backward compatible.
|
||||
- Ensure that removed options are declared with `mkRemovedOptionModule`
|
||||
- Ensure that changes that are not backward compatible are mentioned in release notes.
|
||||
- Ensure that documentations affected by the change is updated.
|
||||
|
||||
Sample template for a module update review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] changes are backward compatible
|
||||
- [ ] removed options are declared with `mkRemovedOptionModule`
|
||||
- [ ] changes that are not backward compatible are documented in release notes
|
||||
- [ ] module tests succeed on ARCHITECTURE
|
||||
- [ ] options types are appropriate
|
||||
- [ ] options description is set
|
||||
- [ ] options example is provided
|
||||
- [ ] documentation affected by the changes is updated
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
||||
This section has been moved to [nixos/README.md](https://github.com/NixOS/nixpkgs/blob/master/nixos/README.md).
|
||||
|
||||
## New modules {#reviewing-contributions-new-modules}
|
||||
|
||||
New modules submissions introduce a new module to NixOS.
|
||||
|
||||
Reviewing process:
|
||||
|
||||
- Ensure that the module tests, if any, are succeeding.
|
||||
- Ensure that the introduced options are correct.
|
||||
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
|
||||
- Description, default and example should be provided.
|
||||
- Ensure that module `meta` field is present
|
||||
- Maintainers should be declared in `meta.maintainers`.
|
||||
- Module documentation should be declared with `meta.doc`.
|
||||
- Ensure that the module respect other modules functionality.
|
||||
- For example, enabling a module should not open firewall ports by default.
|
||||
|
||||
Sample template for a new module review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] module path fits the guidelines
|
||||
- [ ] module tests succeed on ARCHITECTURE
|
||||
- [ ] options have appropriate types
|
||||
- [ ] options have default
|
||||
- [ ] options have example
|
||||
- [ ] options have descriptions
|
||||
- [ ] No unneeded package is added to environment.systemPackages
|
||||
- [ ] meta.maintainers is set
|
||||
- [ ] module documentation is declared in meta.doc
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
||||
This section has been moved to [nixos/README.md](https://github.com/NixOS/nixpkgs/blob/master/nixos/README.md).
|
||||
|
||||
## Individual maintainer list {#reviewing-contributions-individual-maintainer-list}
|
||||
|
||||
When adding users to `maintainers/maintainer-list.nix`, the following
|
||||
checks should be performed:
|
||||
|
||||
- If the user has specified a GPG key, verify that the commit is
|
||||
signed by their key.
|
||||
|
||||
First, validate that the commit adding the maintainer is signed by
|
||||
the key the maintainer listed. Check out the pull request and
|
||||
compare its signing key with the listed key in the commit.
|
||||
|
||||
If the commit is not signed or it is signed by a different user, ask
|
||||
them to either recommit using that key or to remove their key
|
||||
information.
|
||||
|
||||
Given a maintainer entry like this:
|
||||
|
||||
``` nix
|
||||
{
|
||||
example = {
|
||||
email = "user@example.com";
|
||||
name = "Example User";
|
||||
keys = [{
|
||||
fingerprint = "0000 0000 2A70 6423 0AED 3C11 F04F 7A19 AAA6 3AFE";
|
||||
}];
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
First receive their key from a keyserver:
|
||||
|
||||
$ gpg --recv-keys 0xF04F7A19AAA63AFE
|
||||
gpg: key 0xF04F7A19AAA63AFE: public key "Example <user@example.com>" imported
|
||||
gpg: Total number processed: 1
|
||||
gpg: imported: 1
|
||||
|
||||
Then check the commit is signed by that key:
|
||||
|
||||
$ git log --show-signature
|
||||
commit b87862a4f7d32319b1de428adb6cdbdd3a960153
|
||||
gpg: Signature made Wed Mar 12 13:32:24 2003 +0000
|
||||
gpg: using RSA key 000000002A7064230AED3C11F04F7A19AAA63AFE
|
||||
gpg: Good signature from "Example User <user@example.com>
|
||||
Author: Example User <user@example.com>
|
||||
Date: Wed Mar 12 13:32:24 2003 +0000
|
||||
|
||||
maintainers: adding example
|
||||
|
||||
and validate that there is a `Good signature` and the printed key
|
||||
matches the user's submitted key.
|
||||
|
||||
Note: GitHub's "Verified" label does not display the user's full key
|
||||
fingerprint, and should not be used for validating the key matches.
|
||||
|
||||
- If the user has specified a `github` account name, ensure they have
|
||||
also specified a `githubId` and verify the two match.
|
||||
|
||||
Maintainer entries that include a `github` field must also include
|
||||
their `githubId`. People can and do change their GitHub name
|
||||
frequently, and the ID is used as the official and stable identity
|
||||
of the maintainer.
|
||||
|
||||
Given a maintainer entry like this:
|
||||
|
||||
``` nix
|
||||
{
|
||||
example = {
|
||||
email = "user@example.com";
|
||||
name = "Example User";
|
||||
github = "ghost";
|
||||
githubId = 10137;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
First, make sure that the listed GitHub handle matches the author of
|
||||
the commit.
|
||||
|
||||
Then, visit the URL `https://api.github.com/users/ghost` and
|
||||
validate that the `id` field matches the provided `githubId`.
|
||||
This section has been moved to [maintainers/README.md](https://github.com/NixOS/nixpkgs/blob/master/maintainers/README.md).
|
||||
|
||||
## Maintainer teams {#reviewing-contributions-maintainer-teams}
|
||||
|
||||
Feel free to create a new maintainer team in `maintainers/team-list.nix`
|
||||
when a group is collectively responsible for a collection of packages.
|
||||
Use taste and personal judgement when deciding if a team is warranted.
|
||||
|
||||
Teams are allowed to define their own rules about membership.
|
||||
|
||||
For example, some teams will represent a business or other group which
|
||||
wants to carefully track its members. Other teams may be very open about
|
||||
who can join, and allow anybody to participate.
|
||||
|
||||
When reviewing changes to a team, read the team's scope and the context
|
||||
around the member list for indications about the team's membership
|
||||
policy.
|
||||
|
||||
In any case, request reviews from the existing team members. If the team
|
||||
lists no specific membership policy, feel free to merge changes to the
|
||||
team after giving the existing members a few days to respond.
|
||||
|
||||
*Important:* If a team says it is a closed group, do not merge additions
|
||||
to the team without an approval by at least one existing member.
|
||||
This section has been moved to [maintainers/README.md](https://github.com/NixOS/nixpkgs/blob/master/maintainers/README.md).
|
||||
|
||||
## Other submissions {#reviewing-contributions-other-submissions}
|
||||
|
||||
Other type of submissions requires different reviewing steps.
|
||||
|
||||
If you consider having enough knowledge and experience in a topic and would like to be a long-term reviewer for related submissions, please contact the current reviewers for that topic. They will give you information about the reviewing process. The main reviewers for a topic can be hard to find as there is no list, but checking past pull requests to see who reviewed or git-blaming the code to see who committed to that topic can give some hints.
|
||||
|
||||
Container system, boot system and library changes are some examples of the pull requests fitting this category.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Merging pull requests {#reviewing-contributions--merging-pull-requests}
|
||||
|
||||
It is possible for community members that have enough knowledge and experience on a special topic to contribute by merging pull requests.
|
||||
|
||||
In case the PR is stuck waiting for the original author to apply a trivial
|
||||
change (a typo, capitalisation change, etc.) and the author allowed the members
|
||||
to modify the PR, consider applying it yourself. (or commit the existing review
|
||||
suggestion) You should pay extra attention to make sure the addition doesn't go
|
||||
against the idea of the original PR and would not be opposed by the author.
|
||||
|
||||
<!--
|
||||
The following paragraphs about how to deal with unactive contributors is just a proposition and should be modified to what the community agrees to be the right policy.
|
||||
|
||||
Please note that contributors with commit rights unactive for more than three months will have their commit rights revoked.
|
||||
-->
|
||||
|
||||
Please see the discussion in [GitHub nixpkgs issue #50105](https://github.com/NixOS/nixpkgs/issues/50105) for information on how to proceed to be granted this level of access.
|
||||
|
||||
In a case a contributor definitively leaves the Nix community, they should create an issue or post on [Discourse](https://discourse.nixos.org) with references of packages and modules they maintain so the maintainership can be taken over by other contributors.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
digraph {
|
||||
"small changes" [shape=none]
|
||||
"mass-rebuilds and other large changes" [shape=none]
|
||||
"critical security fixes" [shape=none]
|
||||
"broken staging-next fixes" [shape=none]
|
||||
|
||||
"small changes" -> master
|
||||
"mass-rebuilds and other large changes" -> staging
|
||||
"critical security fixes" -> master
|
||||
"broken staging-next fixes" -> "staging-next"
|
||||
|
||||
"staging-next" -> master [color="#E85EB0"] [label="stabilization ends"] [fontcolor="#E85EB0"]
|
||||
"staging" -> "staging-next" [color="#E85EB0"] [label="stabilization starts"] [fontcolor="#E85EB0"]
|
||||
|
||||
master -> "staging-next" -> staging [color="#5F5EE8"] [label="every six hours (GitHub Action)"] [fontcolor="#5F5EE8"]
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<!-- Generated by graphviz version 7.1.0 (0)
|
||||
-->
|
||||
<!-- Pages: 1 -->
|
||||
<svg width="743pt" height="291pt"
|
||||
viewBox="0.00 0.00 743.00 291.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 287)">
|
||||
<polygon fill="white" stroke="none" points="-4,4 -4,-287 739,-287 739,4 -4,4"/>
|
||||
<!-- small changes -->
|
||||
<g id="node1" class="node">
|
||||
<title>small changes</title>
|
||||
<text text-anchor="middle" x="59" y="-261.3" font-family="Times,serif" font-size="14.00">small changes</text>
|
||||
</g>
|
||||
<!-- master -->
|
||||
<g id="node5" class="node">
|
||||
<title>master</title>
|
||||
<ellipse fill="none" stroke="black" cx="139" cy="-192" rx="43.59" ry="18"/>
|
||||
<text text-anchor="middle" x="139" y="-188.3" font-family="Times,serif" font-size="14.00">master</text>
|
||||
</g>
|
||||
<!-- small changes->master -->
|
||||
<g id="edge1" class="edge">
|
||||
<title>small changes->master</title>
|
||||
<path fill="none" stroke="black" d="M77.96,-247.17C88.42,-237.89 101.55,-226.23 112.96,-216.11"/>
|
||||
<polygon fill="black" stroke="black" points="114.99,-218.99 120.14,-209.74 110.34,-213.76 114.99,-218.99"/>
|
||||
</g>
|
||||
<!-- mass-rebuilds and other large changes -->
|
||||
<g id="node2" class="node">
|
||||
<title>mass-rebuilds and other large changes</title>
|
||||
<text text-anchor="middle" x="588" y="-101.3" font-family="Times,serif" font-size="14.00">mass-rebuilds and other large changes</text>
|
||||
</g>
|
||||
<!-- staging -->
|
||||
<g id="node6" class="node">
|
||||
<title>staging</title>
|
||||
<ellipse fill="none" stroke="black" cx="438" cy="-18" rx="45.49" ry="18"/>
|
||||
<text text-anchor="middle" x="438" y="-14.3" font-family="Times,serif" font-size="14.00">staging</text>
|
||||
</g>
|
||||
<!-- mass-rebuilds and other large changes->staging -->
|
||||
<g id="edge2" class="edge">
|
||||
<title>mass-rebuilds and other large changes->staging</title>
|
||||
<path fill="none" stroke="black" d="M587.48,-87.47C586.26,-76.55 582.89,-62.7 574,-54 553.19,-33.63 522.2,-24.65 495.05,-20.86"/>
|
||||
<polygon fill="black" stroke="black" points="495.53,-17.39 485.2,-19.71 494.72,-24.35 495.53,-17.39"/>
|
||||
</g>
|
||||
<!-- critical security fixes -->
|
||||
<g id="node3" class="node">
|
||||
<title>critical security fixes</title>
|
||||
<text text-anchor="middle" x="219" y="-261.3" font-family="Times,serif" font-size="14.00">critical security fixes</text>
|
||||
</g>
|
||||
<!-- critical security fixes->master -->
|
||||
<g id="edge3" class="edge">
|
||||
<title>critical security fixes->master</title>
|
||||
<path fill="none" stroke="black" d="M200.04,-247.17C189.58,-237.89 176.45,-226.23 165.04,-216.11"/>
|
||||
<polygon fill="black" stroke="black" points="167.66,-213.76 157.86,-209.74 163.01,-218.99 167.66,-213.76"/>
|
||||
</g>
|
||||
<!-- broken staging-next fixes -->
|
||||
<g id="node4" class="node">
|
||||
<title>broken staging-next fixes</title>
|
||||
<text text-anchor="middle" x="414" y="-188.3" font-family="Times,serif" font-size="14.00">broken staging-next fixes</text>
|
||||
</g>
|
||||
<!-- staging-next -->
|
||||
<g id="node7" class="node">
|
||||
<title>staging-next</title>
|
||||
<ellipse fill="none" stroke="black" cx="272" cy="-105" rx="68.79" ry="18"/>
|
||||
<text text-anchor="middle" x="272" y="-101.3" font-family="Times,serif" font-size="14.00">staging-next</text>
|
||||
</g>
|
||||
<!-- broken staging-next fixes->staging-next -->
|
||||
<g id="edge4" class="edge">
|
||||
<title>broken staging-next fixes->staging-next</title>
|
||||
<path fill="none" stroke="black" d="M410.2,-174.42C406.88,-163.48 400.98,-149.62 391,-141 377.77,-129.56 360.96,-121.86 344.17,-116.67"/>
|
||||
<polygon fill="black" stroke="black" points="345.21,-113.33 334.63,-114.02 343.33,-120.07 345.21,-113.33"/>
|
||||
</g>
|
||||
<!-- master->staging-next -->
|
||||
<g id="edge7" class="edge">
|
||||
<title>master->staging-next</title>
|
||||
<path fill="none" stroke="#5f5ee8" d="M96.55,-187.26C53.21,-181.83 -4.5,-169.14 20,-141 41.99,-115.74 126.36,-108.13 191.48,-106.11"/>
|
||||
<polygon fill="#5f5ee8" stroke="#5f5ee8" points="191.57,-109.61 201.47,-105.85 191.38,-102.62 191.57,-109.61"/>
|
||||
<text text-anchor="middle" x="133" y="-144.8" font-family="Times,serif" font-size="14.00" fill="#5f5ee8">every six hours (GitHub Action)</text>
|
||||
</g>
|
||||
<!-- staging->staging-next -->
|
||||
<g id="edge6" class="edge">
|
||||
<title>staging->staging-next</title>
|
||||
<path fill="none" stroke="#e85eb0" d="M434.55,-36.2C431.48,-47.12 425.89,-60.72 416,-69 397.61,-84.41 373.51,-93.23 350.31,-98.23"/>
|
||||
<polygon fill="#e85eb0" stroke="#e85eb0" points="349.67,-94.79 340.5,-100.1 350.98,-101.66 349.67,-94.79"/>
|
||||
<text text-anchor="middle" x="493.5" y="-57.8" font-family="Times,serif" font-size="14.00" fill="#e85eb0">stabilization starts</text>
|
||||
</g>
|
||||
<!-- staging-next->master -->
|
||||
<g id="edge5" class="edge">
|
||||
<title>staging-next->master</title>
|
||||
<path fill="none" stroke="#e85eb0" d="M268.22,-123.46C265.05,-134.22 259.46,-147.52 250,-156 233.94,-170.4 211.98,-178.87 191.83,-183.86"/>
|
||||
<polygon fill="#e85eb0" stroke="#e85eb0" points="191.35,-180.38 182.34,-185.96 192.86,-187.22 191.35,-180.38"/>
|
||||
<text text-anchor="middle" x="323.5" y="-144.8" font-family="Times,serif" font-size="14.00" fill="#e85eb0">stabilization ends</text>
|
||||
</g>
|
||||
<!-- staging-next->staging -->
|
||||
<g id="edge8" class="edge">
|
||||
<title>staging-next->staging</title>
|
||||
<path fill="none" stroke="#5f5ee8" d="M221.07,-92.46C194.72,-84.14 170.92,-71.32 186,-54 210.78,-25.54 314.74,-19.48 381.15,-18.6"/>
|
||||
<polygon fill="#5f5ee8" stroke="#5f5ee8" points="380.79,-22.1 390.76,-18.51 380.73,-15.1 380.79,-22.1"/>
|
||||
<text text-anchor="middle" x="299" y="-57.8" font-family="Times,serif" font-size="14.00" fill="#5f5ee8">every six hours (GitHub Action)</text>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
Before Width: | Height: | Size: 5.6 KiB |
|
@ -1,344 +1,88 @@
|
|||
# Submitting changes {#chap-submitting-changes}
|
||||
|
||||
## Making patches {#submitting-changes-making-patches}
|
||||
|
||||
- Read [Manual (How to write packages for Nix)](https://nixos.org/nixpkgs/manual/).
|
||||
|
||||
- Fork [the Nixpkgs repository](https://github.com/nixos/nixpkgs/) on GitHub.
|
||||
|
||||
- Create a branch for your future fix.
|
||||
|
||||
- You can make branch from a commit of your local `nixos-version`. That will help you to avoid additional local compilations. Because you will receive packages from binary cache. For example
|
||||
|
||||
```ShellSession
|
||||
$ nixos-version --hash
|
||||
0998212
|
||||
$ git checkout 0998212
|
||||
$ git checkout -b 'fix/pkg-name-update'
|
||||
```
|
||||
|
||||
- Please avoid working directly on the `master` branch.
|
||||
|
||||
- Make commits of logical units.
|
||||
|
||||
- If you removed pkgs or made some major NixOS changes, write about it in the release notes for the next stable release. For example `nixos/doc/manual/release-notes/rl-2003.xml`.
|
||||
|
||||
- Check for unnecessary whitespace with `git diff --check` before committing.
|
||||
|
||||
- Format the commit in a following way:
|
||||
|
||||
```
|
||||
(pkg-name | nixos/<module>): (from -> to | init at version | refactor | etc)
|
||||
Additional information.
|
||||
```
|
||||
|
||||
- Examples:
|
||||
- `nginx: init at 2.0.1`
|
||||
- `firefox: 54.0.1 -> 55.0`
|
||||
- `nixos/hydra: add bazBaz option`
|
||||
- `nixos/nginx: refactor config generation`
|
||||
|
||||
- Test your changes. If you work with
|
||||
|
||||
- nixpkgs:
|
||||
|
||||
- update pkg
|
||||
- `nix-env -iA pkg-attribute-name -f <path to your local nixpkgs folder>`
|
||||
- add pkg
|
||||
- Make sure it’s in `pkgs/top-level/all-packages.nix`
|
||||
- `nix-env -iA pkg-attribute-name -f <path to your local nixpkgs folder>`
|
||||
- _If you don’t want to install pkg in you profile_.
|
||||
- `nix-build -A pkg-attribute-name <path to your local nixpkgs folder>` and check results in the folder `result`. It will appear in the same directory where you did `nix-build`.
|
||||
- If you installed your package with `nix-env`, you can run `nix-env -e pkg-name` where `pkg-name` is as reported by `nix-env -q` to uninstall it from your system.
|
||||
|
||||
- NixOS and its modules:
|
||||
- You can add new module to your NixOS configuration file (usually it’s `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
|
||||
|
||||
- If you have commits `pkg-name: oh, forgot to insert whitespace`: squash commits in this case. Use `git rebase -i`.
|
||||
|
||||
- [Rebase](https://git-scm.com/book/en/v2/Git-Branching-Rebasing) your branch against current `master`.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Submitting changes {#submitting-changes-submitting-changes}
|
||||
|
||||
- Push your changes to your fork of nixpkgs.
|
||||
- Create the pull request
|
||||
- Follow [the contribution guidelines](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md#submitting-changes).
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Submitting security fixes {#submitting-changes-submitting-security-fixes}
|
||||
|
||||
Security fixes are submitted in the same way as other changes and thus the same guidelines apply.
|
||||
|
||||
- If a new version fixing the vulnerability has been released, update the package;
|
||||
- If the security fix comes in the form of a patch and a CVE is available, then add the patch to the Nixpkgs tree, and apply it to the package.
|
||||
The name of the patch should be the CVE identifier, so e.g. `CVE-2019-13636.patch`; If a patch is fetched the name needs to be set as well, e.g.:
|
||||
|
||||
```nix
|
||||
(fetchpatch {
|
||||
name = "CVE-2019-11068.patch";
|
||||
url = "https://gitlab.gnome.org/GNOME/libxslt/commit/e03553605b45c88f0b4b2980adfbbb8f6fca2fd6.patch";
|
||||
hash = "sha256-SEKe/8HcW0UBHCfPTTOnpRlzmV2nQPPeL6HOMxBZd14=";
|
||||
})
|
||||
```
|
||||
|
||||
If a security fix applies to both master and a stable release then, similar to regular changes, they are preferably delivered via master first and cherry-picked to the release branch.
|
||||
|
||||
Critical security fixes may by-pass the staging branches and be delivered directly to release branches such as `master` and `release-*`.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Deprecating/removing packages {#submitting-changes-deprecating-packages}
|
||||
|
||||
There is currently no policy when to remove a package.
|
||||
|
||||
Before removing a package, one should try to find a new maintainer or fix smaller issues first.
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
### Steps to remove a package from Nixpkgs {#steps-to-remove-a-package-from-nixpkgs}
|
||||
|
||||
We use jbidwatcher as an example for a discontinued project here.
|
||||
|
||||
1. Have Nixpkgs checked out locally and up to date.
|
||||
1. Create a new branch for your change, e.g. `git checkout -b jbidwatcher`
|
||||
1. Remove the actual package including its directory, e.g. `git rm -rf pkgs/applications/misc/jbidwatcher`
|
||||
1. Remove the package from the list of all packages (`pkgs/top-level/all-packages.nix`).
|
||||
1. Add an alias for the package name in `pkgs/top-level/aliases.nix` (There is also `pkgs/applications/editors/vim/plugins/aliases.nix`. Package sets typically do not have aliases, so we can't add them there.)
|
||||
|
||||
For example in this case:
|
||||
|
||||
```
|
||||
jbidwatcher = throw "jbidwatcher was discontinued in march 2021"; # added 2021-03-15
|
||||
```
|
||||
|
||||
The throw message should explain in short why the package was removed for users that still have it installed.
|
||||
|
||||
1. Test if the changes introduced any issues by running `nix-env -qaP -f . --show-trace`. It should show the list of packages without errors.
|
||||
1. Commit the changes. Explain again why the package was removed. If it was declared discontinued upstream, add a link to the source.
|
||||
|
||||
```ShellSession
|
||||
$ git add pkgs/applications/misc/jbidwatcher/default.nix pkgs/top-level/all-packages.nix pkgs/top-level/aliases.nix
|
||||
$ git commit
|
||||
```
|
||||
|
||||
Example commit message:
|
||||
|
||||
```
|
||||
jbidwatcher: remove
|
||||
|
||||
project was discontinued in march 2021. the program does not work anymore because ebay changed the login.
|
||||
|
||||
https://web.archive.org/web/20210315205723/http://www.jbidwatcher.com/
|
||||
```
|
||||
|
||||
1. Push changes to your GitHub fork with `git push`
|
||||
1. Create a pull request against Nixpkgs. Mention the package maintainer.
|
||||
|
||||
This is how the pull request looks like in this case: [https://github.com/NixOS/nixpkgs/pull/116470](https://github.com/NixOS/nixpkgs/pull/116470)
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Pull Request Template {#submitting-changes-pull-request-template}
|
||||
|
||||
The pull request template helps determine what steps have been made for a contribution so far, and will help guide maintainers on the status of a change. The motivation section of the PR should include any extra details the title does not address and link any existing issues related to the pull request.
|
||||
|
||||
When a PR is created, it will be pre-populated with some checkboxes detailed below:
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Tested using sandboxing {#submitting-changes-tested-with-sandbox}
|
||||
|
||||
When sandbox builds are enabled, Nix will setup an isolated environment for each build process. It is used to remove further hidden dependencies set by the build environment to improve reproducibility. This includes access to the network during the build outside of `fetch*` functions and files outside the Nix store. Depending on the operating system access to other resources are blocked as well (ex. inter process communication is isolated on Linux); see [sandbox](https://nixos.org/nix/manual/#conf-sandbox) in Nix manual for details.
|
||||
|
||||
Sandboxing is not enabled by default in Nix due to a small performance hit on each build. In pull requests for [nixpkgs](https://github.com/NixOS/nixpkgs/) people are asked to test builds with sandboxing enabled (see `Tested using sandboxing` in the pull request template) because in<https://nixos.org/hydra/> sandboxing is also used.
|
||||
|
||||
Depending if you use NixOS or other platforms you can use one of the following methods to enable sandboxing **before** building the package:
|
||||
|
||||
- **Globally enable sandboxing on NixOS**: add the following to `configuration.nix`
|
||||
|
||||
```nix
|
||||
nix.useSandbox = true;
|
||||
```
|
||||
|
||||
- **Globally enable sandboxing on non-NixOS platforms**: add the following to: `/etc/nix/nix.conf`
|
||||
|
||||
```ini
|
||||
sandbox = true
|
||||
```
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Built on platform(s) {#submitting-changes-platform-diversity}
|
||||
|
||||
Many Nix packages are designed to run on multiple platforms. As such, it’s important to let the maintainer know which platforms your changes have been tested on. It’s not always practical to test a change on all platforms, and is not required for a pull request to be merged. Only check the systems you tested the build on in this section.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Tested via one or more NixOS test(s) if existing and applicable for the change (look inside nixos/tests) {#submitting-changes-nixos-tests}
|
||||
|
||||
Packages with automated tests are much more likely to be merged in a timely fashion because it doesn’t require as much manual testing by the maintainer to verify the functionality of the package. If there are existing tests for the package, they should be run to verify your changes do not break the tests. Tests can only be run on Linux. For more details on writing and running tests, see the [section in the NixOS manual](https://nixos.org/nixos/manual/index.html#sec-nixos-tests).
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Tested compilation of all pkgs that depend on this change using `nixpkgs-review` {#submitting-changes-tested-compilation}
|
||||
|
||||
If you are updating a package’s version, you can use `nixpkgs-review` to make sure all packages that depend on the updated package still compile correctly. The `nixpkgs-review` utility can look for and build all dependencies either based on uncommitted changes with the `wip` option or specifying a GitHub pull request number.
|
||||
|
||||
Review changes from pull request number 12345:
|
||||
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review pr 12345"
|
||||
```
|
||||
|
||||
Alternatively, with flakes (and analogously for the other commands below):
|
||||
|
||||
```ShellSession
|
||||
nix run nixpkgs#nixpkgs-review -- pr 12345
|
||||
```
|
||||
|
||||
Review uncommitted changes:
|
||||
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review wip"
|
||||
```
|
||||
|
||||
Review changes from last commit:
|
||||
|
||||
```ShellSession
|
||||
nix-shell -p nixpkgs-review --run "nixpkgs-review rev HEAD"
|
||||
```
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Tested execution of all binary files (usually in `./result/bin/`) {#submitting-changes-tested-execution}
|
||||
|
||||
It’s important to test any executables generated by a build when you change or create a package in nixpkgs. This can be done by looking in `./result/bin` and running any files in there, or at a minimum, the main executable for the package. For example, if you make a change to texlive, you probably would only check the binaries associated with the change you made rather than testing all of them.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Meets Nixpkgs contribution standards {#submitting-changes-contribution-standards}
|
||||
|
||||
The last checkbox is fits [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md). The contributing document has detailed information on standards the Nix community has for commit messages, reviews, licensing of contributions you make to the project, etc... Everyone should read and understand the standards the community has for contributing before submitting a pull request.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Hotfixing pull requests {#submitting-changes-hotfixing-pull-requests}
|
||||
|
||||
- Make the appropriate changes in you branch.
|
||||
- Don’t create additional commits, do
|
||||
- `git rebase -i`
|
||||
- `git push --force` to your branch.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Commit policy {#submitting-changes-commit-policy}
|
||||
|
||||
- Commits must be sufficiently tested before being merged, both for the master and staging branches.
|
||||
- Hydra builds for master and staging should not be used as testing platform, it’s a build farm for changes that have been already tested.
|
||||
- When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people’s installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from \@edolstra.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
### Branches {#submitting-changes-branches}
|
||||
|
||||
The `nixpkgs` repository has three major branches:
|
||||
- `master`
|
||||
- `staging`
|
||||
- `staging-next`
|
||||
|
||||
The most important distinction between them is that `staging`
|
||||
(colored red in the diagram below) can receive commits which cause
|
||||
a mass-rebuild (for example, anything that changes the `drvPath` of
|
||||
`stdenv`). The other two branches `staging-next` and `master`
|
||||
(colored green in the diagram below) can *not* receive commits which
|
||||
cause a mass-rebuild.
|
||||
|
||||
Arcs between the branches show possible merges into these branches,
|
||||
either from other branches or from independently submitted PRs. The
|
||||
colors of these edges likewise show whether or not they could
|
||||
trigger a mass rebuild (red) or must not trigger a mass rebuild
|
||||
(green).
|
||||
|
||||
Hydra runs automatic builds for the green branches.
|
||||
|
||||
Notice that the automatic merges are all green arrows. This is by
|
||||
design. Any merge which might cause a mass rebuild on a branch
|
||||
which has automatic builds (`staging-next`, `master`) will be a
|
||||
manual merge to make sure it is good use of compute power.
|
||||
|
||||
Nixpkgs has two branches so that there is one branch (`staging`)
|
||||
which accepts mass-rebuilding commits, and one fast-rebuilding
|
||||
branch which accepts independent PRs (`master`). The `staging-next`
|
||||
branch allows the Hydra operators to batch groups of commits to
|
||||
`staging` to be built. By keeping the `staging-next` branch
|
||||
separate from `staging`, this batching does not block
|
||||
developers from merging changes into `staging`.
|
||||
|
||||
```{.graphviz caption="Staging workflow"}
|
||||
digraph {
|
||||
master [color="green" fontcolor=green]
|
||||
"staging-next" [color="green" fontcolor=green]
|
||||
staging [color="red" fontcolor=red]
|
||||
|
||||
"small changes" [fontcolor=green shape=none]
|
||||
"small changes" -> master [color=green]
|
||||
|
||||
"mass-rebuilds and other large changes" [fontcolor=red shape=none]
|
||||
"mass-rebuilds and other large changes" -> staging [color=red]
|
||||
|
||||
"critical security fixes" [fontcolor=green shape=none]
|
||||
"critical security fixes" -> master [color=green]
|
||||
|
||||
"staging fixes which do not cause staging to mass-rebuild" [fontcolor=green shape=none]
|
||||
"staging fixes which do not cause staging to mass-rebuild" -> "staging-next" [color=green]
|
||||
|
||||
"staging-next" -> master [color="red"] [label="manual merge"] [fontcolor="red"]
|
||||
"staging" -> "staging-next" [color="red"] [label="manual merge"] [fontcolor="red"]
|
||||
|
||||
master -> "staging-next" [color="green"] [label="automatic merge (GitHub Action)"] [fontcolor="green"]
|
||||
"staging-next" -> staging [color="green"] [label="automatic merge (GitHub Action)"] [fontcolor="green"]
|
||||
}
|
||||
```
|
||||
|
||||
[This GitHub Action](https://github.com/NixOS/nixpkgs/blob/master/.github/workflows/periodic-merge-6h.yml) brings changes from `master` to `staging-next` and from `staging-next` to `staging` every 6 hours; these are the green arrows in the diagram above. The red arrows in the diagram above are done manually and much less frequently. You can get an idea of how often these merges occur by looking at the git history.
|
||||
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Master branch {#submitting-changes-master-branch}
|
||||
|
||||
The `master` branch is the main development branch. It should only see non-breaking commits that do not cause mass rebuilds.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Staging branch {#submitting-changes-staging-branch}
|
||||
|
||||
The `staging` branch is a development branch where mass-rebuilds go. Mass rebuilds are commits that cause rebuilds for many packages, like more than 500 (or perhaps, if it's 'light' packages, 1000). It should only see non-breaking mass-rebuild commits. That means it is not to be used for testing, and changes must have been well tested already. If the branch is already in a broken state, please refrain from adding extra new breakages.
|
||||
|
||||
During the process of a releasing a new NixOS version, this branch or the release-critical packages can be restricted to non-breaking changes.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Staging-next branch {#submitting-changes-staging-next-branch}
|
||||
|
||||
The `staging-next` branch is for stabilizing mass-rebuilds submitted to the `staging` branch prior to merging them into `master`. Mass-rebuilds must go via the `staging` branch. It must only see non-breaking commits that are fixing issues blocking it from being merged into the `master` branch.
|
||||
|
||||
If the branch is already in a broken state, please refrain from adding extra new breakages. Stabilize it for a few days and then merge into master.
|
||||
|
||||
During the process of a releasing a new NixOS version, this branch or the release-critical packages can be restricted to non-breaking changes.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Stable release branches {#submitting-changes-stable-release-branches}
|
||||
|
||||
The same staging workflow applies to stable release branches, but the main branch is called `release-*` instead of `master`.
|
||||
|
||||
Example branch names: `release-21.11`, `staging-21.11`, `staging-next-21.11`.
|
||||
|
||||
Most changes added to the stable release branches are cherry-picked (“backported”) from the `master` and staging branches.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Automatically backporting a Pull Request {#submitting-changes-stable-release-branches-automatic-backports}
|
||||
|
||||
Assign label `backport <branch>` (e.g. `backport release-21.11`) to the PR and a backport PR is automatically created after the PR is merged.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Manually backporting changes {#submitting-changes-stable-release-branches-manual-backports}
|
||||
|
||||
Cherry-pick changes via `git cherry-pick -x <original commit>` so that the original commit id is included in the commit message.
|
||||
|
||||
Add a reason for the backport when it is not obvious from the original commit message. You can do this by cherry picking with `git cherry-pick -xe <original commit>`, which allows editing the commit message. This is not needed for minor version updates that include security and bug fixes but don't add new features or when the commit fixes an otherwise broken package.
|
||||
|
||||
Here is an example of a cherry-picked commit message with good reason description:
|
||||
|
||||
```
|
||||
zfs: Keep trying root import until it works
|
||||
|
||||
Works around #11003.
|
||||
|
||||
(cherry picked from commit 98b213a11041af39b39473906b595290e2a4e2f9)
|
||||
|
||||
Reason: several people cannot boot with ZFS on NVMe
|
||||
```
|
||||
|
||||
Other examples of reasons are:
|
||||
|
||||
- Previously the build would fail due to, e.g., `getaddrinfo` not being defined
|
||||
- The previous download links were all broken
|
||||
- Crash when starting on some X11 systems
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
#### Acceptable backport criteria {#acceptable-backport-criteria}
|
||||
|
||||
The stable branch does have some changes which cannot be backported. Most notable are breaking changes. The desire is to have stable users be uninterrupted when updating packages.
|
||||
This section has been moved to [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
However, many changes are able to be backported, including:
|
||||
- New Packages / Modules
|
||||
- Security / Patch updates
|
||||
- Version updates which include new functionality (but no breaking changes)
|
||||
- Services which require a client to be up-to-date regardless. (E.g. `spotify`, `steam`, or `discord`)
|
||||
- Security critical applications (E.g. `firefox`)
|
||||
|
|
|
@ -1,45 +1,11 @@
|
|||
# Vulnerability Roundup {#chap-vulnerability-roundup}
|
||||
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Issues {#vulnerability-roundup-issues}
|
||||
|
||||
Vulnerable packages in Nixpkgs are managed using issues.
|
||||
Currently opened ones can be found using the following:
|
||||
|
||||
[github.com/NixOS/nixpkgs/issues?q=is:issue+is:open+"Vulnerability+roundup"](https://github.com/NixOS/nixpkgs/issues?q=is%3Aissue+is%3Aopen+%22Vulnerability+roundup%22)
|
||||
|
||||
Each issue correspond to a vulnerable version of a package; As a consequence:
|
||||
|
||||
- One issue can contain several CVEs;
|
||||
- One CVE can be shared across several issues;
|
||||
- A single package can be concerned by several issues.
|
||||
|
||||
|
||||
A "Vulnerability roundup" issue usually respects the following format:
|
||||
|
||||
```txt
|
||||
<link to relevant package search on search.nix.gsc.io>, <link to relevant files in Nixpkgs on GitHub>
|
||||
|
||||
<list of related CVEs, their CVSS score, and the impacted NixOS version>
|
||||
|
||||
<list of the scanned Nixpkgs versions>
|
||||
|
||||
<list of relevant contributors>
|
||||
```
|
||||
|
||||
Note that there can be an extra comment containing links to previously reported (and still open) issues for the same package.
|
||||
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
||||
## Triaging and Fixing {#vulnerability-roundup-triaging-and-fixing}
|
||||
|
||||
**Note**: An issue can be a "false positive" (i.e. automatically opened, but without the package it refers to being actually vulnerable).
|
||||
If you find such a "false positive", comment on the issue an explanation of why it falls into this category, linking as much information as the necessary to help maintainers double check.
|
||||
|
||||
If you are investigating a "true positive":
|
||||
|
||||
- Find the earliest patched version or a code patch in the CVE details;
|
||||
- Is the issue already patched (version up-to-date or patch applied manually) in Nixpkgs's `master` branch?
|
||||
- **No**:
|
||||
- [Submit a security fix](#submitting-changes-submitting-security-fixes);
|
||||
- Once the fix is merged into `master`, [submit the change to the vulnerable release branch(es)](https://nixos.org/manual/nixpkgs/stable/#submitting-changes-stable-release-branches);
|
||||
- **Yes**: [Backport the change to the vulnerable release branch(es)](https://nixos.org/manual/nixpkgs/stable/#submitting-changes-stable-release-branches).
|
||||
- When the patch has made it into all the relevant branches (`master`, and the vulnerable releases), close the relevant issue(s).
|
||||
This section has been moved to [pkgs/README.md](https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md).
|
||||
|
|
10
third_party/nixpkgs/doc/development.md
vendored
Normal file
10
third_party/nixpkgs/doc/development.md
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Development of Nixpkgs {#part-development}
|
||||
|
||||
This section shows you how Nixpkgs is being developed and how you can interact with the contributors and the latest updates.
|
||||
If you are interested in contributing yourself, see [CONTRIBUTING.md](https://github.com/NixOS/nixpkgs/blob/master/CONTRIBUTING.md).
|
||||
|
||||
<!-- In the future this section should also include: How to test pull requests, how to know if pull requests are available in channels, etc. -->
|
||||
|
||||
```{=include=} chapters
|
||||
development/opening-issues.chapter.md
|
||||
```
|
7
third_party/nixpkgs/doc/development/opening-issues.chapter.md
vendored
Normal file
7
third_party/nixpkgs/doc/development/opening-issues.chapter.md
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Opening issues {#sec-opening-issues}
|
||||
|
||||
* Make sure you have a [GitHub account](https://github.com/signup/free)
|
||||
* Make sure there is no open issue on the topic
|
||||
* [Submit a new issue](https://github.com/NixOS/nixpkgs/issues/new/choose) by choosing the kind of topic and fill out the template
|
||||
|
||||
<!-- In the future this section could also include more detailed information on the issue templates -->
|
|
@ -30,7 +30,7 @@ package set to make it the default. This guarantees you get a consistent package
|
|||
set.
|
||||
```nix
|
||||
mypkg = let
|
||||
cudaPackages = cudaPackages_11_5.overrideScope' (final: prev: {
|
||||
cudaPackages = cudaPackages_11_5.overrideScope (final: prev: {
|
||||
cudnn = prev.cudnn_8_3;
|
||||
}});
|
||||
in callPackage { inherit cudaPackages; };
|
||||
|
|
|
@ -210,3 +210,5 @@ buildDotnetGlobalTool {
|
|||
};
|
||||
}
|
||||
```
|
||||
|
||||
When packaging a new .NET application in nixpkgs, you can tag the [`@NixOS/dotnet`](https://github.com/orgs/nixos/teams/dotnet) team for help and code review.
|
||||
|
|
File diff suppressed because it is too large
Load diff
1
third_party/nixpkgs/doc/manual.md.in
vendored
1
third_party/nixpkgs/doc/manual.md.in
vendored
|
@ -10,5 +10,6 @@ using-nixpkgs.md
|
|||
lib.md
|
||||
stdenv.md
|
||||
builders.md
|
||||
development.md
|
||||
contributing.md
|
||||
```
|
||||
|
|
10
third_party/nixpkgs/doc/stdenv/stdenv.chapter.md
vendored
10
third_party/nixpkgs/doc/stdenv/stdenv.chapter.md
vendored
|
@ -425,6 +425,16 @@ A script to be run by `maintainers/scripts/update.nix` when the package is match
|
|||
};
|
||||
```
|
||||
|
||||
::: {.tip}
|
||||
A common pattern is to use the [`nix-update-script`](https://github.com/NixOS/nixpkgs/blob/master/pkgs/common-updater/nix-update.nix) attribute provided in Nixpkgs, which runs [`nix-update`](https://github.com/Mic92/nix-update):
|
||||
|
||||
```nix
|
||||
passthru.updateScript = nix-update-script { };
|
||||
```
|
||||
|
||||
For simple packages, this is often enough, and will ensure that the package is updated automatically by [`nixpkgs-update`](https://ryantm.github.io/nixpkgs-update) when a new version is released. The [update bot](https://nix-community.org/update-bot) runs periodically to attempt to automatically update packages, and will run `passthru.updateScript` if set. While not strictly necessary if the project is listed on [Repology](https://repology.org), using `nix-update-script` allows the package to update via many more sources (e.g. GitHub releases).
|
||||
:::
|
||||
|
||||
##### How update scripts are executed? {#var-passthru-updateScript-execution}
|
||||
|
||||
Update scripts are to be invoked by `maintainers/scripts/update.nix` script. You can run `nix-shell maintainers/scripts/update.nix` in the root of Nixpkgs repository for information on how to use it. `update.nix` offers several modes for selecting packages to update (e.g. select by attribute path, traverse Nixpkgs and filter by maintainer, etc.), and it will execute update scripts for all matched packages that have an `updateScript` attribute.
|
||||
|
|
40
third_party/nixpkgs/lib/customisation.nix
vendored
40
third_party/nixpkgs/lib/customisation.nix
vendored
|
@ -269,17 +269,33 @@ rec {
|
|||
let self = f self // {
|
||||
newScope = scope: newScope (self // scope);
|
||||
callPackage = self.newScope {};
|
||||
overrideScope = g: lib.warn
|
||||
"`overrideScope` (from `lib.makeScope`) is deprecated. Do `overrideScope' (self: super: { … })` instead of `overrideScope (super: self: { … })`. All other overrides have the parameters in that order, including other definitions of `overrideScope`. This was the only definition violating the pattern."
|
||||
(makeScope newScope (lib.fixedPoints.extends (lib.flip g) f));
|
||||
overrideScope' = g: makeScope newScope (lib.fixedPoints.extends g f);
|
||||
overrideScope = g: makeScope newScope (lib.fixedPoints.extends g f);
|
||||
# Remove after 24.11 is released.
|
||||
overrideScope' = g: lib.warnIf (lib.isInOldestRelease 2311)
|
||||
"`overrideScope'` (from `lib.makeScope`) has been renamed to `overrideScope`."
|
||||
(makeScope newScope (lib.fixedPoints.extends g f));
|
||||
packages = f;
|
||||
};
|
||||
in self;
|
||||
|
||||
/* Like the above, but aims to support cross compilation. It's still ugly, but
|
||||
/* backward compatibility with old uncurried form; deprecated */
|
||||
makeScopeWithSplicing =
|
||||
splicePackages: newScope: otherSplices: keep: extra: f:
|
||||
makeScopeWithSplicing'
|
||||
{ inherit splicePackages newScope; }
|
||||
{ inherit otherSplices keep extra f; };
|
||||
|
||||
/* Like makeScope, but aims to support cross compilation. It's still ugly, but
|
||||
hopefully it helps a little bit. */
|
||||
makeScopeWithSplicing = splicePackages: newScope: otherSplices: keep: extra: f:
|
||||
makeScopeWithSplicing' =
|
||||
{ splicePackages
|
||||
, newScope
|
||||
}:
|
||||
{ otherSplices
|
||||
, keep ? (_self: {})
|
||||
, extra ? (_spliced0: {})
|
||||
, f
|
||||
}:
|
||||
let
|
||||
spliced0 = splicePackages {
|
||||
pkgsBuildBuild = otherSplices.selfBuildBuild;
|
||||
|
@ -295,13 +311,11 @@ rec {
|
|||
callPackage = newScope spliced; # == self.newScope {};
|
||||
# N.B. the other stages of the package set spliced in are *not*
|
||||
# overridden.
|
||||
overrideScope = g: makeScopeWithSplicing
|
||||
splicePackages
|
||||
newScope
|
||||
otherSplices
|
||||
keep
|
||||
extra
|
||||
(lib.fixedPoints.extends g f);
|
||||
overrideScope = g: (makeScopeWithSplicing'
|
||||
{ inherit splicePackages newScope; }
|
||||
{ inherit otherSplices keep extra;
|
||||
f = lib.fixedPoints.extends g f;
|
||||
});
|
||||
packages = f;
|
||||
};
|
||||
in self;
|
||||
|
|
4
third_party/nixpkgs/lib/default.nix
vendored
4
third_party/nixpkgs/lib/default.nix
vendored
|
@ -112,11 +112,11 @@ let
|
|||
noDepEntry fullDepEntry packEntry stringAfter;
|
||||
inherit (self.customisation) overrideDerivation makeOverridable
|
||||
callPackageWith callPackagesWith extendDerivation hydraJob
|
||||
makeScope makeScopeWithSplicing;
|
||||
makeScope makeScopeWithSplicing makeScopeWithSplicing';
|
||||
inherit (self.derivations) lazyDerivation;
|
||||
inherit (self.meta) addMetaAttrs dontDistribute setName updateName
|
||||
appendToName mapDerivationAttrset setPrio lowPrio lowPrioSet hiPrio
|
||||
hiPrioSet getLicenseFromSpdxId getExe;
|
||||
hiPrioSet getLicenseFromSpdxId getExe getExe';
|
||||
inherit (self.filesystem) pathType pathIsDirectory pathIsRegularFile;
|
||||
inherit (self.sources) cleanSourceFilter
|
||||
cleanSource sourceByRegex sourceFilesBySuffices
|
||||
|
|
34
third_party/nixpkgs/lib/lists.nix
vendored
34
third_party/nixpkgs/lib/lists.nix
vendored
|
@ -638,6 +638,40 @@ rec {
|
|||
# Input list
|
||||
list: sublist count (length list) list;
|
||||
|
||||
/* Whether the first list is a prefix of the second list.
|
||||
|
||||
Type: hasPrefix :: [a] -> [a] -> bool
|
||||
|
||||
Example:
|
||||
hasPrefix [ 1 2 ] [ 1 2 3 4 ]
|
||||
=> true
|
||||
hasPrefix [ 0 1 ] [ 1 2 3 4 ]
|
||||
=> false
|
||||
*/
|
||||
hasPrefix =
|
||||
list1:
|
||||
list2:
|
||||
take (length list1) list2 == list1;
|
||||
|
||||
/* Remove the first list as a prefix from the second list.
|
||||
Error if the first list isn't a prefix of the second list.
|
||||
|
||||
Type: removePrefix :: [a] -> [a] -> [a]
|
||||
|
||||
Example:
|
||||
removePrefix [ 1 2 ] [ 1 2 3 4 ]
|
||||
=> [ 3 4 ]
|
||||
removePrefix [ 0 1 ] [ 1 2 3 4 ]
|
||||
=> <error>
|
||||
*/
|
||||
removePrefix =
|
||||
list1:
|
||||
list2:
|
||||
if hasPrefix list1 list2 then
|
||||
drop (length list1) list2
|
||||
else
|
||||
throw "lib.lists.removePrefix: First argument is not a list prefix of the second argument";
|
||||
|
||||
/* Return a list consisting of at most `count` elements of `list`,
|
||||
starting at index `start`.
|
||||
|
||||
|
|
25
third_party/nixpkgs/lib/meta.nix
vendored
25
third_party/nixpkgs/lib/meta.nix
vendored
|
@ -143,9 +143,24 @@ rec {
|
|||
=> "/nix/store/am9ml4f4ywvivxnkiaqwr0hyxka1xjsf-mustache-go-1.3.0/bin/mustache"
|
||||
*/
|
||||
getExe = x:
|
||||
"${lib.getBin x}/bin/${x.meta.mainProgram or (
|
||||
# This could be turned into an error when 23.05 is at end of life
|
||||
lib.warn "getExe: Package ${lib.strings.escapeNixIdentifier x.meta.name or x.pname or x.name} does not have the meta.mainProgram attribute. We'll assume that the main program has the same name for now, but this behavior is deprecated, because it leads to surprising errors when the assumption does not hold. If the package has a main program, please set `meta.mainProgram` in its definition to make this warning go away. Otherwise, if the package does not have a main program, or if you don't control its definition, specify the full path to the program, such as \"\${lib.getBin foo}/bin/bar\"."
|
||||
lib.getName x
|
||||
)}";
|
||||
let
|
||||
y = x.meta.mainProgram or (
|
||||
# This could be turned into an error when 23.05 is at end of life
|
||||
lib.warn "getExe: Package ${lib.strings.escapeNixIdentifier x.meta.name or x.pname or x.name} does not have the meta.mainProgram attribute. We'll assume that the main program has the same name for now, but this behavior is deprecated, because it leads to surprising errors when the assumption does not hold. If the package has a main program, please set `meta.mainProgram` in its definition to make this warning go away. Otherwise, if the package does not have a main program, or if you don't control its definition, use getExe' to specify the name to the program, such as lib.getExe' foo \"bar\"."
|
||||
lib.getName
|
||||
x
|
||||
);
|
||||
in
|
||||
getExe' x y;
|
||||
|
||||
/* Get the path of a program of a derivation.
|
||||
|
||||
Type: getExe' :: derivation -> string -> string
|
||||
Example:
|
||||
getExe' pkgs.hello "hello"
|
||||
=> "/nix/store/g124820p9hlv4lj8qplzxw1c44dxaw1k-hello-2.12/bin/hello"
|
||||
getExe' pkgs.imagemagick "convert"
|
||||
=> "/nix/store/5rs48jamq7k6sal98ymj9l4k2bnwq515-imagemagick-7.1.1-15/bin/convert"
|
||||
*/
|
||||
getExe' = x: y: "${lib.getBin x}/bin/${y}";
|
||||
}
|
||||
|
|
34
third_party/nixpkgs/lib/modules.nix
vendored
34
third_party/nixpkgs/lib/modules.nix
vendored
|
@ -630,7 +630,13 @@ let
|
|||
loc = prefix ++ [name];
|
||||
defns = pushedDownDefinitionsByName.${name} or [];
|
||||
defns' = rawDefinitionsByName.${name} or [];
|
||||
optionDecls = filter (m: isOption m.options) decls;
|
||||
optionDecls = filter
|
||||
(m: m.options?_type
|
||||
&& (m.options._type == "option"
|
||||
|| throwDeclarationTypeError loc m.options._type m._file
|
||||
)
|
||||
)
|
||||
decls;
|
||||
in
|
||||
if length optionDecls == length decls then
|
||||
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
|
||||
|
@ -692,6 +698,32 @@ let
|
|||
) unmatchedDefnsByName);
|
||||
};
|
||||
|
||||
throwDeclarationTypeError = loc: actualTag: file:
|
||||
let
|
||||
name = lib.strings.escapeNixIdentifier (lib.lists.last loc);
|
||||
path = showOption loc;
|
||||
depth = length loc;
|
||||
|
||||
paragraphs = [
|
||||
"In module ${file}: expected an option declaration at option path `${path}` but got an attribute set with type ${actualTag}"
|
||||
] ++ optional (actualTag == "option-type") ''
|
||||
When declaring an option, you must wrap the type in a `mkOption` call. It should look somewhat like:
|
||||
${comment}
|
||||
${name} = lib.mkOption {
|
||||
description = ...;
|
||||
type = <the type you wrote for ${name}>;
|
||||
...
|
||||
};
|
||||
'';
|
||||
|
||||
# Ideally we'd know the exact syntax they used, but short of that,
|
||||
# we can only reliably repeat the last. However, we repeat the
|
||||
# full path in a non-misleading way here, in case they overlook
|
||||
# the start of the message. Examples attract attention.
|
||||
comment = optionalString (depth > 1) "\n # ${showOption loc}";
|
||||
in
|
||||
throw (concatStringsSep "\n\n" paragraphs);
|
||||
|
||||
/* Merge multiple option declarations into a single declaration. In
|
||||
general, there should be only one declaration of each option.
|
||||
The exception is the ‘options’ attribute, which specifies
|
||||
|
|
320
third_party/nixpkgs/lib/path/default.nix
vendored
320
third_party/nixpkgs/lib/path/default.nix
vendored
|
@ -121,17 +121,18 @@ let
|
|||
|
||||
in /* No rec! Add dependencies on this file at the top. */ {
|
||||
|
||||
/* Append a subpath string to a path.
|
||||
/*
|
||||
Append a subpath string to a path.
|
||||
|
||||
Like `path + ("/" + string)` but safer, because it errors instead of returning potentially surprising results.
|
||||
More specifically, it checks that the first argument is a [path value type](https://nixos.org/manual/nix/stable/language/values.html#type-path"),
|
||||
and that the second argument is a valid subpath string (see `lib.path.subpath.isValid`).
|
||||
and that the second argument is a [valid subpath string](#function-library-lib.path.subpath.isValid).
|
||||
|
||||
Laws:
|
||||
|
||||
- Not influenced by subpath normalisation
|
||||
- Not influenced by subpath [normalisation](#function-library-lib.path.subpath.normalise):
|
||||
|
||||
append p s == append p (subpath.normalise s)
|
||||
append p s == append p (subpath.normalise s)
|
||||
|
||||
Type:
|
||||
append :: Path -> String -> Path
|
||||
|
@ -175,26 +176,26 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
path + ("/" + subpath);
|
||||
|
||||
/*
|
||||
Whether the first path is a component-wise prefix of the second path.
|
||||
Whether the first path is a component-wise prefix of the second path.
|
||||
|
||||
Laws:
|
||||
Laws:
|
||||
|
||||
- `hasPrefix p q` is only true if `q == append p s` for some subpath `s`.
|
||||
- `hasPrefix p q` is only true if [`q == append p s`](#function-library-lib.path.append) for some [subpath](#function-library-lib.path.subpath.isValid) `s`.
|
||||
|
||||
- `hasPrefix` is a [non-strict partial order](https://en.wikipedia.org/wiki/Partially_ordered_set#Non-strict_partial_order) over the set of all path values
|
||||
- `hasPrefix` is a [non-strict partial order](https://en.wikipedia.org/wiki/Partially_ordered_set#Non-strict_partial_order) over the set of all path values.
|
||||
|
||||
Type:
|
||||
hasPrefix :: Path -> Path -> Bool
|
||||
Type:
|
||||
hasPrefix :: Path -> Path -> Bool
|
||||
|
||||
Example:
|
||||
hasPrefix /foo /foo/bar
|
||||
=> true
|
||||
hasPrefix /foo /foo
|
||||
=> true
|
||||
hasPrefix /foo/bar /foo
|
||||
=> false
|
||||
hasPrefix /. /foo
|
||||
=> true
|
||||
Example:
|
||||
hasPrefix /foo /foo/bar
|
||||
=> true
|
||||
hasPrefix /foo /foo
|
||||
=> true
|
||||
hasPrefix /foo/bar /foo
|
||||
=> false
|
||||
hasPrefix /. /foo
|
||||
=> true
|
||||
*/
|
||||
hasPrefix =
|
||||
path1:
|
||||
|
@ -219,27 +220,27 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
take (length path1Deconstructed.components) path2Deconstructed.components == path1Deconstructed.components;
|
||||
|
||||
/*
|
||||
Remove the first path as a component-wise prefix from the second path.
|
||||
The result is a normalised subpath string, see `lib.path.subpath.normalise`.
|
||||
Remove the first path as a component-wise prefix from the second path.
|
||||
The result is a [normalised subpath string](#function-library-lib.path.subpath.normalise).
|
||||
|
||||
Laws:
|
||||
Laws:
|
||||
|
||||
- Inverts `append` for normalised subpaths:
|
||||
- Inverts [`append`](#function-library-lib.path.append) for [normalised subpath string](#function-library-lib.path.subpath.normalise):
|
||||
|
||||
removePrefix p (append p s) == subpath.normalise s
|
||||
removePrefix p (append p s) == subpath.normalise s
|
||||
|
||||
Type:
|
||||
removePrefix :: Path -> Path -> String
|
||||
Type:
|
||||
removePrefix :: Path -> Path -> String
|
||||
|
||||
Example:
|
||||
removePrefix /foo /foo/bar/baz
|
||||
=> "./bar/baz"
|
||||
removePrefix /foo /foo
|
||||
=> "./."
|
||||
removePrefix /foo/bar /foo
|
||||
=> <error>
|
||||
removePrefix /. /foo
|
||||
=> "./foo"
|
||||
Example:
|
||||
removePrefix /foo /foo/bar/baz
|
||||
=> "./bar/baz"
|
||||
removePrefix /foo /foo
|
||||
=> "./."
|
||||
removePrefix /foo/bar /foo
|
||||
=> <error>
|
||||
removePrefix /. /foo
|
||||
=> "./foo"
|
||||
*/
|
||||
removePrefix =
|
||||
path1:
|
||||
|
@ -272,41 +273,43 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
joinRelPath components;
|
||||
|
||||
/*
|
||||
Split the filesystem root from a [path](https://nixos.org/manual/nix/stable/language/values.html#type-path).
|
||||
The result is an attribute set with these attributes:
|
||||
- `root`: The filesystem root of the path, meaning that this directory has no parent directory.
|
||||
- `subpath`: The [normalised subpath string](#function-library-lib.path.subpath.normalise) that when [appended](#function-library-lib.path.append) to `root` returns the original path.
|
||||
Split the filesystem root from a [path](https://nixos.org/manual/nix/stable/language/values.html#type-path).
|
||||
The result is an attribute set with these attributes:
|
||||
- `root`: The filesystem root of the path, meaning that this directory has no parent directory.
|
||||
- `subpath`: The [normalised subpath string](#function-library-lib.path.subpath.normalise) that when [appended](#function-library-lib.path.append) to `root` returns the original path.
|
||||
|
||||
Laws:
|
||||
- [Appending](#function-library-lib.path.append) the `root` and `subpath` gives the original path:
|
||||
Laws:
|
||||
- [Appending](#function-library-lib.path.append) the `root` and `subpath` gives the original path:
|
||||
|
||||
p ==
|
||||
append
|
||||
(splitRoot p).root
|
||||
(splitRoot p).subpath
|
||||
p ==
|
||||
append
|
||||
(splitRoot p).root
|
||||
(splitRoot p).subpath
|
||||
|
||||
- Trying to get the parent directory of `root` using [`readDir`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-readDir) returns `root` itself:
|
||||
- Trying to get the parent directory of `root` using [`readDir`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-readDir) returns `root` itself:
|
||||
|
||||
dirOf (splitRoot p).root == (splitRoot p).root
|
||||
dirOf (splitRoot p).root == (splitRoot p).root
|
||||
|
||||
Type:
|
||||
splitRoot :: Path -> { root :: Path, subpath :: String }
|
||||
Type:
|
||||
splitRoot :: Path -> { root :: Path, subpath :: String }
|
||||
|
||||
Example:
|
||||
splitRoot /foo/bar
|
||||
=> { root = /.; subpath = "./foo/bar"; }
|
||||
Example:
|
||||
splitRoot /foo/bar
|
||||
=> { root = /.; subpath = "./foo/bar"; }
|
||||
|
||||
splitRoot /.
|
||||
=> { root = /.; subpath = "./."; }
|
||||
splitRoot /.
|
||||
=> { root = /.; subpath = "./."; }
|
||||
|
||||
# Nix neutralises `..` path components for all path values automatically
|
||||
splitRoot /foo/../bar
|
||||
=> { root = /.; subpath = "./bar"; }
|
||||
# Nix neutralises `..` path components for all path values automatically
|
||||
splitRoot /foo/../bar
|
||||
=> { root = /.; subpath = "./bar"; }
|
||||
|
||||
splitRoot "/foo/bar"
|
||||
=> <error>
|
||||
splitRoot "/foo/bar"
|
||||
=> <error>
|
||||
*/
|
||||
splitRoot = path:
|
||||
splitRoot =
|
||||
# The path to split the root off of
|
||||
path:
|
||||
assert assertMsg
|
||||
(isPath path)
|
||||
"lib.path.splitRoot: Argument is of type ${typeOf path}, but a path was expected";
|
||||
|
@ -317,46 +320,47 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
subpath = joinRelPath deconstructed.components;
|
||||
};
|
||||
|
||||
/* Whether a value is a valid subpath string.
|
||||
/*
|
||||
Whether a value is a valid subpath string.
|
||||
|
||||
A subpath string points to a specific file or directory within an absolute base directory.
|
||||
It is a stricter form of a relative path that excludes `..` components, since those could escape the base directory.
|
||||
A subpath string points to a specific file or directory within an absolute base directory.
|
||||
It is a stricter form of a relative path that excludes `..` components, since those could escape the base directory.
|
||||
|
||||
- The value is a string
|
||||
- The value is a string.
|
||||
|
||||
- The string is not empty
|
||||
- The string is not empty.
|
||||
|
||||
- The string doesn't start with a `/`
|
||||
- The string doesn't start with a `/`.
|
||||
|
||||
- The string doesn't contain any `..` path components
|
||||
- The string doesn't contain any `..` path components.
|
||||
|
||||
Type:
|
||||
subpath.isValid :: String -> Bool
|
||||
Type:
|
||||
subpath.isValid :: String -> Bool
|
||||
|
||||
Example:
|
||||
# Not a string
|
||||
subpath.isValid null
|
||||
=> false
|
||||
Example:
|
||||
# Not a string
|
||||
subpath.isValid null
|
||||
=> false
|
||||
|
||||
# Empty string
|
||||
subpath.isValid ""
|
||||
=> false
|
||||
# Empty string
|
||||
subpath.isValid ""
|
||||
=> false
|
||||
|
||||
# Absolute path
|
||||
subpath.isValid "/foo"
|
||||
=> false
|
||||
# Absolute path
|
||||
subpath.isValid "/foo"
|
||||
=> false
|
||||
|
||||
# Contains a `..` path component
|
||||
subpath.isValid "../foo"
|
||||
=> false
|
||||
# Contains a `..` path component
|
||||
subpath.isValid "../foo"
|
||||
=> false
|
||||
|
||||
# Valid subpath
|
||||
subpath.isValid "foo/bar"
|
||||
=> true
|
||||
# Valid subpath
|
||||
subpath.isValid "foo/bar"
|
||||
=> true
|
||||
|
||||
# Doesn't need to be normalised
|
||||
subpath.isValid "./foo//bar/"
|
||||
=> true
|
||||
# Doesn't need to be normalised
|
||||
subpath.isValid "./foo//bar/"
|
||||
=> true
|
||||
*/
|
||||
subpath.isValid =
|
||||
# The value to check
|
||||
|
@ -364,15 +368,16 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
subpathInvalidReason value == null;
|
||||
|
||||
|
||||
/* Join subpath strings together using `/`, returning a normalised subpath string.
|
||||
/*
|
||||
Join subpath strings together using `/`, returning a normalised subpath string.
|
||||
|
||||
Like `concatStringsSep "/"` but safer, specifically:
|
||||
|
||||
- All elements must be valid subpath strings, see `lib.path.subpath.isValid`
|
||||
- All elements must be [valid subpath strings](#function-library-lib.path.subpath.isValid).
|
||||
|
||||
- The result gets normalised, see `lib.path.subpath.normalise`
|
||||
- The result gets [normalised](#function-library-lib.path.subpath.normalise).
|
||||
|
||||
- The edge case of an empty list gets properly handled by returning the neutral subpath `"./."`
|
||||
- The edge case of an empty list gets properly handled by returning the neutral subpath `"./."`.
|
||||
|
||||
Laws:
|
||||
|
||||
|
@ -386,12 +391,12 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
subpath.join [ (subpath.normalise p) "./." ] == subpath.normalise p
|
||||
subpath.join [ "./." (subpath.normalise p) ] == subpath.normalise p
|
||||
|
||||
- Normalisation - the result is normalised according to `lib.path.subpath.normalise`:
|
||||
- Normalisation - the result is [normalised](#function-library-lib.path.subpath.normalise):
|
||||
|
||||
subpath.join ps == subpath.normalise (subpath.join ps)
|
||||
|
||||
- For non-empty lists, the implementation is equivalent to normalising the result of `concatStringsSep "/"`.
|
||||
Note that the above laws can be derived from this one.
|
||||
- For non-empty lists, the implementation is equivalent to [normalising](#function-library-lib.path.subpath.normalise) the result of `concatStringsSep "/"`.
|
||||
Note that the above laws can be derived from this one:
|
||||
|
||||
ps != [] -> subpath.join ps == subpath.normalise (concatStringsSep "/" ps)
|
||||
|
||||
|
@ -439,108 +444,109 @@ in /* No rec! Add dependencies on this file at the top. */ {
|
|||
) 0 subpaths;
|
||||
|
||||
/*
|
||||
Split [a subpath](#function-library-lib.path.subpath.isValid) into its path component strings.
|
||||
Throw an error if the subpath isn't valid.
|
||||
Note that the returned path components are also valid subpath strings, though they are intentionally not [normalised](#function-library-lib.path.subpath.normalise).
|
||||
Split [a subpath](#function-library-lib.path.subpath.isValid) into its path component strings.
|
||||
Throw an error if the subpath isn't valid.
|
||||
Note that the returned path components are also [valid subpath strings](#function-library-lib.path.subpath.isValid), though they are intentionally not [normalised](#function-library-lib.path.subpath.normalise).
|
||||
|
||||
Laws:
|
||||
Laws:
|
||||
|
||||
- Splitting a subpath into components and [joining](#function-library-lib.path.subpath.join) the components gives the same subpath but [normalised](#function-library-lib.path.subpath.normalise):
|
||||
- Splitting a subpath into components and [joining](#function-library-lib.path.subpath.join) the components gives the same subpath but [normalised](#function-library-lib.path.subpath.normalise):
|
||||
|
||||
subpath.join (subpath.components s) == subpath.normalise s
|
||||
subpath.join (subpath.components s) == subpath.normalise s
|
||||
|
||||
Type:
|
||||
subpath.components :: String -> [ String ]
|
||||
Type:
|
||||
subpath.components :: String -> [ String ]
|
||||
|
||||
Example:
|
||||
subpath.components "."
|
||||
=> [ ]
|
||||
Example:
|
||||
subpath.components "."
|
||||
=> [ ]
|
||||
|
||||
subpath.components "./foo//bar/./baz/"
|
||||
=> [ "foo" "bar" "baz" ]
|
||||
subpath.components "./foo//bar/./baz/"
|
||||
=> [ "foo" "bar" "baz" ]
|
||||
|
||||
subpath.components "/foo"
|
||||
=> <error>
|
||||
subpath.components "/foo"
|
||||
=> <error>
|
||||
*/
|
||||
subpath.components =
|
||||
# The subpath string to split into components
|
||||
subpath:
|
||||
assert assertMsg (isValid subpath) ''
|
||||
lib.path.subpath.components: Argument is not a valid subpath string:
|
||||
${subpathInvalidReason subpath}'';
|
||||
splitRelPath subpath;
|
||||
|
||||
/* Normalise a subpath. Throw an error if the subpath isn't valid, see
|
||||
`lib.path.subpath.isValid`
|
||||
/*
|
||||
Normalise a subpath. Throw an error if the subpath isn't [valid](#function-library-lib.path.subpath.isValid).
|
||||
|
||||
- Limit repeating `/` to a single one
|
||||
- Limit repeating `/` to a single one.
|
||||
|
||||
- Remove redundant `.` components
|
||||
- Remove redundant `.` components.
|
||||
|
||||
- Remove trailing `/` and `/.`
|
||||
- Remove trailing `/` and `/.`.
|
||||
|
||||
- Add leading `./`
|
||||
- Add leading `./`.
|
||||
|
||||
Laws:
|
||||
Laws:
|
||||
|
||||
- Idempotency - normalising multiple times gives the same result:
|
||||
- Idempotency - normalising multiple times gives the same result:
|
||||
|
||||
subpath.normalise (subpath.normalise p) == subpath.normalise p
|
||||
subpath.normalise (subpath.normalise p) == subpath.normalise p
|
||||
|
||||
- Uniqueness - there's only a single normalisation for the paths that lead to the same file system node:
|
||||
- Uniqueness - there's only a single normalisation for the paths that lead to the same file system node:
|
||||
|
||||
subpath.normalise p != subpath.normalise q -> $(realpath ${p}) != $(realpath ${q})
|
||||
subpath.normalise p != subpath.normalise q -> $(realpath ${p}) != $(realpath ${q})
|
||||
|
||||
- Don't change the result when appended to a Nix path value:
|
||||
- Don't change the result when [appended](#function-library-lib.path.append) to a Nix path value:
|
||||
|
||||
base + ("/" + p) == base + ("/" + subpath.normalise p)
|
||||
append base p == append base (subpath.normalise p)
|
||||
|
||||
- Don't change the path according to `realpath`:
|
||||
- Don't change the path according to `realpath`:
|
||||
|
||||
$(realpath ${p}) == $(realpath ${subpath.normalise p})
|
||||
$(realpath ${p}) == $(realpath ${subpath.normalise p})
|
||||
|
||||
- Only error on invalid subpaths:
|
||||
- Only error on [invalid subpaths](#function-library-lib.path.subpath.isValid):
|
||||
|
||||
builtins.tryEval (subpath.normalise p)).success == subpath.isValid p
|
||||
builtins.tryEval (subpath.normalise p)).success == subpath.isValid p
|
||||
|
||||
Type:
|
||||
subpath.normalise :: String -> String
|
||||
Type:
|
||||
subpath.normalise :: String -> String
|
||||
|
||||
Example:
|
||||
# limit repeating `/` to a single one
|
||||
subpath.normalise "foo//bar"
|
||||
=> "./foo/bar"
|
||||
Example:
|
||||
# limit repeating `/` to a single one
|
||||
subpath.normalise "foo//bar"
|
||||
=> "./foo/bar"
|
||||
|
||||
# remove redundant `.` components
|
||||
subpath.normalise "foo/./bar"
|
||||
=> "./foo/bar"
|
||||
# remove redundant `.` components
|
||||
subpath.normalise "foo/./bar"
|
||||
=> "./foo/bar"
|
||||
|
||||
# add leading `./`
|
||||
subpath.normalise "foo/bar"
|
||||
=> "./foo/bar"
|
||||
# add leading `./`
|
||||
subpath.normalise "foo/bar"
|
||||
=> "./foo/bar"
|
||||
|
||||
# remove trailing `/`
|
||||
subpath.normalise "foo/bar/"
|
||||
=> "./foo/bar"
|
||||
# remove trailing `/`
|
||||
subpath.normalise "foo/bar/"
|
||||
=> "./foo/bar"
|
||||
|
||||
# remove trailing `/.`
|
||||
subpath.normalise "foo/bar/."
|
||||
=> "./foo/bar"
|
||||
# remove trailing `/.`
|
||||
subpath.normalise "foo/bar/."
|
||||
=> "./foo/bar"
|
||||
|
||||
# Return the current directory as `./.`
|
||||
subpath.normalise "."
|
||||
=> "./."
|
||||
# Return the current directory as `./.`
|
||||
subpath.normalise "."
|
||||
=> "./."
|
||||
|
||||
# error on `..` path components
|
||||
subpath.normalise "foo/../bar"
|
||||
=> <error>
|
||||
# error on `..` path components
|
||||
subpath.normalise "foo/../bar"
|
||||
=> <error>
|
||||
|
||||
# error on empty string
|
||||
subpath.normalise ""
|
||||
=> <error>
|
||||
# error on empty string
|
||||
subpath.normalise ""
|
||||
=> <error>
|
||||
|
||||
# error on absolute path
|
||||
subpath.normalise "/foo"
|
||||
=> <error>
|
||||
# error on absolute path
|
||||
subpath.normalise "/foo"
|
||||
=> <error>
|
||||
*/
|
||||
subpath.normalise =
|
||||
# The subpath string to normalise
|
||||
|
|
|
@ -18,7 +18,14 @@ pkgs.runCommand "lib-path-tests" {
|
|||
];
|
||||
} ''
|
||||
# Needed to make Nix evaluation work
|
||||
export NIX_STATE_DIR=$(mktemp -d)
|
||||
export TEST_ROOT=$(pwd)/test-tmp
|
||||
export NIX_BUILD_HOOK=
|
||||
export NIX_CONF_DIR=$TEST_ROOT/etc
|
||||
export NIX_LOCALSTATE_DIR=$TEST_ROOT/var
|
||||
export NIX_LOG_DIR=$TEST_ROOT/var/log/nix
|
||||
export NIX_STATE_DIR=$TEST_ROOT/var/nix
|
||||
export NIX_STORE_DIR=$TEST_ROOT/store
|
||||
export PAGER=cat
|
||||
|
||||
cp -r ${libpath} lib
|
||||
export TEST_LIB=$PWD/lib
|
||||
|
|
4
third_party/nixpkgs/lib/strings.nix
vendored
4
third_party/nixpkgs/lib/strings.nix
vendored
|
@ -629,10 +629,10 @@ rec {
|
|||
This behavior is deprecated and will throw an error in the future.''
|
||||
(let
|
||||
preLen = stringLength prefix;
|
||||
sLen = stringLength str;
|
||||
in
|
||||
if substring 0 preLen str == prefix then
|
||||
substring preLen (sLen - preLen) str
|
||||
# -1 will take the string until the end
|
||||
substring preLen (-1) str
|
||||
else
|
||||
str);
|
||||
|
||||
|
|
2
third_party/nixpkgs/lib/systems/parse.nix
vendored
2
third_party/nixpkgs/lib/systems/parse.nix
vendored
|
@ -221,6 +221,8 @@ rec {
|
|||
vendors = setTypes types.openVendor {
|
||||
apple = {};
|
||||
pc = {};
|
||||
knuth = {};
|
||||
|
||||
# Actually matters, unlocking some MinGW-w64-specific options in GCC. See
|
||||
# bottom of https://sourceforge.net/p/mingw-w64/wiki2/Unicode%20apps/
|
||||
w64 = {};
|
||||
|
|
59
third_party/nixpkgs/lib/tests/misc.nix
vendored
59
third_party/nixpkgs/lib/tests/misc.nix
vendored
|
@ -349,6 +349,27 @@ runTests {
|
|||
expected = true;
|
||||
};
|
||||
|
||||
testRemovePrefixExample1 = {
|
||||
expr = removePrefix "foo." "foo.bar.baz";
|
||||
expected = "bar.baz";
|
||||
};
|
||||
testRemovePrefixExample2 = {
|
||||
expr = removePrefix "xxx" "foo.bar.baz";
|
||||
expected = "foo.bar.baz";
|
||||
};
|
||||
testRemovePrefixEmptyPrefix = {
|
||||
expr = removePrefix "" "foo";
|
||||
expected = "foo";
|
||||
};
|
||||
testRemovePrefixEmptyString = {
|
||||
expr = removePrefix "foo" "";
|
||||
expected = "";
|
||||
};
|
||||
testRemovePrefixEmptyBoth = {
|
||||
expr = removePrefix "" "";
|
||||
expected = "";
|
||||
};
|
||||
|
||||
testNormalizePath = {
|
||||
expr = strings.normalizePath "//a/b//c////d/";
|
||||
expected = "/a/b/c/d/";
|
||||
|
@ -492,6 +513,44 @@ runTests {
|
|||
([ 1 2 3 ] == (take 4 [ 1 2 3 ]))
|
||||
];
|
||||
|
||||
testListHasPrefixExample1 = {
|
||||
expr = lists.hasPrefix [ 1 2 ] [ 1 2 3 4 ];
|
||||
expected = true;
|
||||
};
|
||||
testListHasPrefixExample2 = {
|
||||
expr = lists.hasPrefix [ 0 1 ] [ 1 2 3 4 ];
|
||||
expected = false;
|
||||
};
|
||||
testListHasPrefixLazy = {
|
||||
expr = lists.hasPrefix [ 1 ] [ 1 (abort "lib.lists.hasPrefix is not lazy") ];
|
||||
expected = true;
|
||||
};
|
||||
testListHasPrefixEmptyPrefix = {
|
||||
expr = lists.hasPrefix [ ] [ 1 2 ];
|
||||
expected = true;
|
||||
};
|
||||
testListHasPrefixEmptyList = {
|
||||
expr = lists.hasPrefix [ 1 2 ] [ ];
|
||||
expected = false;
|
||||
};
|
||||
|
||||
testListRemovePrefixExample1 = {
|
||||
expr = lists.removePrefix [ 1 2 ] [ 1 2 3 4 ];
|
||||
expected = [ 3 4 ];
|
||||
};
|
||||
testListRemovePrefixExample2 = {
|
||||
expr = (builtins.tryEval (lists.removePrefix [ 0 1 ] [ 1 2 3 4 ])).success;
|
||||
expected = false;
|
||||
};
|
||||
testListRemovePrefixEmptyPrefix = {
|
||||
expr = lists.removePrefix [ ] [ 1 2 ];
|
||||
expected = [ 1 2 ];
|
||||
};
|
||||
testListRemovePrefixEmptyList = {
|
||||
expr = (builtins.tryEval (lists.removePrefix [ 1 2 ] [ ])).success;
|
||||
expected = false;
|
||||
};
|
||||
|
||||
testFoldAttrs = {
|
||||
expr = foldAttrs (n: a: [n] ++ a) [] [
|
||||
{ a = 2; b = 7; }
|
||||
|
|
5
third_party/nixpkgs/lib/tests/modules.sh
vendored
5
third_party/nixpkgs/lib/tests/modules.sh
vendored
|
@ -393,6 +393,11 @@ checkConfigError \
|
|||
config.set \
|
||||
./declare-set.nix ./declare-enable-nested.nix
|
||||
|
||||
# Options: accidental use of an option-type instead of option (or other tagged type; unlikely)
|
||||
checkConfigError 'In module .*/options-type-error-typical.nix: expected an option declaration at option path .result. but got an attribute set with type option-type' config.result ./options-type-error-typical.nix
|
||||
checkConfigError 'In module .*/options-type-error-typical-nested.nix: expected an option declaration at option path .result.here. but got an attribute set with type option-type' config.result.here ./options-type-error-typical-nested.nix
|
||||
checkConfigError 'In module .*/options-type-error-configuration.nix: expected an option declaration at option path .result. but got an attribute set with type configuration' config.result ./options-type-error-configuration.nix
|
||||
|
||||
# Check that that merging of option collisions doesn't depend on type being set
|
||||
checkConfigError 'The option .group..*would be a parent of the following options, but its type .<no description>. does not support nested options.\n\s*- option.s. with prefix .group.enable..*' config.group.enable ./merge-typeless-option.nix
|
||||
|
||||
|
|
6
third_party/nixpkgs/lib/tests/modules/options-type-error-configuration.nix
vendored
Normal file
6
third_party/nixpkgs/lib/tests/modules/options-type-error-configuration.nix
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
{ lib, ... }: {
|
||||
options = {
|
||||
# unlikely mistake, but we can catch any attrset with _type
|
||||
result = lib.evalModules { modules = []; };
|
||||
};
|
||||
}
|
5
third_party/nixpkgs/lib/tests/modules/options-type-error-typical-nested.nix
vendored
Normal file
5
third_party/nixpkgs/lib/tests/modules/options-type-error-typical-nested.nix
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{ lib, ... }: {
|
||||
options = {
|
||||
result.here = lib.types.str;
|
||||
};
|
||||
}
|
5
third_party/nixpkgs/lib/tests/modules/options-type-error-typical.nix
vendored
Normal file
5
third_party/nixpkgs/lib/tests/modules/options-type-error-typical.nix
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{ lib, ... }: {
|
||||
options = {
|
||||
result = lib.types.str;
|
||||
};
|
||||
}
|
4
third_party/nixpkgs/lib/trivial.nix
vendored
4
third_party/nixpkgs/lib/trivial.nix
vendored
|
@ -307,14 +307,14 @@ rec {
|
|||
|
||||
/* Reads a JSON file.
|
||||
|
||||
Type :: path -> any
|
||||
Type: importJSON :: path -> any
|
||||
*/
|
||||
importJSON = path:
|
||||
builtins.fromJSON (builtins.readFile path);
|
||||
|
||||
/* Reads a TOML file.
|
||||
|
||||
Type :: path -> any
|
||||
Type: importTOML :: path -> any
|
||||
*/
|
||||
importTOML = path:
|
||||
builtins.fromTOML (builtins.readFile path);
|
||||
|
|
7
third_party/nixpkgs/lib/types.nix
vendored
7
third_party/nixpkgs/lib/types.nix
vendored
|
@ -436,7 +436,12 @@ rec {
|
|||
|
||||
# Deprecated; should not be used because it quietly concatenates
|
||||
# strings, which is usually not what you want.
|
||||
string = throw "The type `types.string` is deprecated. See https://github.com/NixOS/nixpkgs/pull/66346 for better alternative types.";
|
||||
# We use a lib.warn because `deprecationMessage` doesn't trigger in nested types such as `attrsOf string`
|
||||
string = lib.warn
|
||||
"The type `types.string` is deprecated. See https://github.com/NixOS/nixpkgs/pull/66346 for better alternative types."
|
||||
(separatedString "" // {
|
||||
name = "string";
|
||||
});
|
||||
|
||||
passwdEntry = entryType: addCheck entryType (str: !(hasInfix ":" str || hasInfix "\n" str)) // {
|
||||
name = "passwdEntry ${entryType.name}";
|
||||
|
|
114
third_party/nixpkgs/maintainers/README.md
vendored
Normal file
114
third_party/nixpkgs/maintainers/README.md
vendored
Normal file
|
@ -0,0 +1,114 @@
|
|||
# Nixpkgs Maintainers
|
||||
|
||||
The *Nixpkgs maintainers* are people who have assigned themselves to
|
||||
maintain specific individual packages. We encourage people who care
|
||||
about a package to assign themselves as a maintainer. When a pull
|
||||
request is made against a package, OfBorg will notify the appropriate
|
||||
maintainer(s).
|
||||
|
||||
## Reviewing contributions
|
||||
|
||||
### Individual maintainer list
|
||||
|
||||
When adding users to [`maintainer-list.nix`](./maintainer-list.nix), the following
|
||||
checks should be performed:
|
||||
|
||||
- If the user has specified a GPG key, verify that the commit is
|
||||
signed by their key.
|
||||
|
||||
First, validate that the commit adding the maintainer is signed by
|
||||
the key the maintainer listed. Check out the pull request and
|
||||
compare its signing key with the listed key in the commit.
|
||||
|
||||
If the commit is not signed or it is signed by a different user, ask
|
||||
them to either recommit using that key or to remove their key
|
||||
information.
|
||||
|
||||
Given a maintainer entry like this:
|
||||
|
||||
``` nix
|
||||
{
|
||||
example = {
|
||||
email = "user@example.com";
|
||||
name = "Example User";
|
||||
keys = [{
|
||||
fingerprint = "0000 0000 2A70 6423 0AED 3C11 F04F 7A19 AAA6 3AFE";
|
||||
}];
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
First receive their key from a keyserver:
|
||||
|
||||
$ gpg --recv-keys 0xF04F7A19AAA63AFE
|
||||
gpg: key 0xF04F7A19AAA63AFE: public key "Example <user@example.com>" imported
|
||||
gpg: Total number processed: 1
|
||||
gpg: imported: 1
|
||||
|
||||
Then check the commit is signed by that key:
|
||||
|
||||
$ git log --show-signature
|
||||
commit b87862a4f7d32319b1de428adb6cdbdd3a960153
|
||||
gpg: Signature made Wed Mar 12 13:32:24 2003 +0000
|
||||
gpg: using RSA key 000000002A7064230AED3C11F04F7A19AAA63AFE
|
||||
gpg: Good signature from "Example User <user@example.com>
|
||||
Author: Example User <user@example.com>
|
||||
Date: Wed Mar 12 13:32:24 2003 +0000
|
||||
|
||||
maintainers: adding example
|
||||
|
||||
and validate that there is a `Good signature` and the printed key
|
||||
matches the user's submitted key.
|
||||
|
||||
Note: GitHub's "Verified" label does not display the user's full key
|
||||
fingerprint, and should not be used for validating the key matches.
|
||||
|
||||
- If the user has specified a `github` account name, ensure they have
|
||||
also specified a `githubId` and verify the two match.
|
||||
|
||||
Maintainer entries that include a `github` field must also include
|
||||
their `githubId`. People can and do change their GitHub name
|
||||
frequently, and the ID is used as the official and stable identity
|
||||
of the maintainer.
|
||||
|
||||
Given a maintainer entry like this:
|
||||
|
||||
``` nix
|
||||
{
|
||||
example = {
|
||||
email = "user@example.com";
|
||||
name = "Example User";
|
||||
github = "ghost";
|
||||
githubId = 10137;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
First, make sure that the listed GitHub handle matches the author of
|
||||
the commit.
|
||||
|
||||
Then, visit the URL `https://api.github.com/users/ghost` and
|
||||
validate that the `id` field matches the provided `githubId`.
|
||||
|
||||
### Maintainer teams
|
||||
|
||||
Feel free to create a new maintainer team in [`team-list.nix`](./team-list.nix)
|
||||
when a group is collectively responsible for a collection of packages.
|
||||
Use taste and personal judgement when deciding if a team is warranted.
|
||||
|
||||
Teams are allowed to define their own rules about membership.
|
||||
|
||||
For example, some teams will represent a business or other group which
|
||||
wants to carefully track its members. Other teams may be very open about
|
||||
who can join, and allow anybody to participate.
|
||||
|
||||
When reviewing changes to a team, read the team's scope and the context
|
||||
around the member list for indications about the team's membership
|
||||
policy.
|
||||
|
||||
In any case, request reviews from the existing team members. If the team
|
||||
lists no specific membership policy, feel free to merge changes to the
|
||||
team after giving the existing members a few days to respond.
|
||||
|
||||
*Important:* If a team says it is a closed group, do not merge additions
|
||||
to the team without an approval by at least one existing member.
|
221
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
221
third_party/nixpkgs/maintainers/maintainer-list.nix
vendored
|
@ -644,6 +644,11 @@
|
|||
githubId = 43479487;
|
||||
name = "Titouan Biteau";
|
||||
};
|
||||
albertchae = {
|
||||
github = "albertchae";
|
||||
githubId = 217050;
|
||||
name = "Albert Chae";
|
||||
};
|
||||
aldoborrero = {
|
||||
email = "aldoborrero+nixos@pm.me";
|
||||
github = "aldoborrero";
|
||||
|
@ -705,12 +710,6 @@
|
|||
githubId = 8316672;
|
||||
name = "Alex Chapman";
|
||||
};
|
||||
alexeyre = {
|
||||
email = "A.Eyre@sms.ed.ac.uk";
|
||||
github = "alexeyre";
|
||||
githubId = 38869148;
|
||||
name = "Alex Eyre";
|
||||
};
|
||||
alexfmpe = {
|
||||
email = "alexandre.fmp.esteves@gmail.com";
|
||||
github = "alexfmpe";
|
||||
|
@ -1198,6 +1197,12 @@
|
|||
githubId = 20933385;
|
||||
name = "Anton Latukha";
|
||||
};
|
||||
antonmosich = {
|
||||
email = "anton@mosich.at";
|
||||
github = "antonmosich";
|
||||
githubId = 27223336;
|
||||
name = "Anton Mosich";
|
||||
};
|
||||
antono = {
|
||||
email = "self@antono.info";
|
||||
github = "antono";
|
||||
|
@ -1277,6 +1282,12 @@
|
|||
githubId = 56009;
|
||||
name = "Arcadio Rubio García";
|
||||
};
|
||||
arcayr = {
|
||||
email = "nix@arcayr.online";
|
||||
github = "arcayr";
|
||||
githubId = 11192354;
|
||||
name = "Elliot Speck";
|
||||
};
|
||||
archer-65 = {
|
||||
email = "mario.liguori.056@gmail.com";
|
||||
github = "archer-65";
|
||||
|
@ -1449,6 +1460,15 @@
|
|||
githubId = 1482768;
|
||||
name = "Benjamin Asbach";
|
||||
};
|
||||
asciimoth = {
|
||||
name = "Andrew";
|
||||
email = "ascii@moth.contact";
|
||||
github = "asciimoth";
|
||||
githubId = 91414737;
|
||||
keys = [{
|
||||
fingerprint = "C5C8 4658 CCFD 7E8E 71DE E933 AF3A E54F C3A3 5C9F";
|
||||
}];
|
||||
};
|
||||
ashalkhakov = {
|
||||
email = "artyom.shalkhakov@gmail.com";
|
||||
github = "ashalkhakov";
|
||||
|
@ -1722,6 +1742,11 @@
|
|||
githubId = 135230;
|
||||
name = "Aycan iRiCAN";
|
||||
};
|
||||
aynish = {
|
||||
github = "Chickensoupwithrice";
|
||||
githubId = 22575913;
|
||||
name = "Anish Lakhwara";
|
||||
};
|
||||
azahi = {
|
||||
name = "Azat Bahawi";
|
||||
email = "azat@bahawi.net";
|
||||
|
@ -3277,6 +3302,15 @@
|
|||
email = "jupiter@m.rdis.dev";
|
||||
name = "Scott Little";
|
||||
};
|
||||
codifryed = {
|
||||
email = "gb@guyboldon.com";
|
||||
name = "Guy Boldon";
|
||||
github = "codifryed";
|
||||
githubId = 27779510;
|
||||
keys = [{
|
||||
fingerprint = "FDF5 EF67 8CC1 FE22 1845 6A22 CF7B BB5B C756 1BD3";
|
||||
}];
|
||||
};
|
||||
codsl = {
|
||||
email = "codsl@riseup.net";
|
||||
github = "codsl";
|
||||
|
@ -3737,6 +3771,12 @@
|
|||
githubId = 1298344;
|
||||
name = "Daniel Fullmer";
|
||||
};
|
||||
danielrolls = {
|
||||
email = "daniel.rolls.27@googlemail.com";
|
||||
github = "danielrolls";
|
||||
githubId = 50051176;
|
||||
name = "Daniel Rolls";
|
||||
};
|
||||
daniyalsuri6 = {
|
||||
email = "daniyal.suri@gmail.com";
|
||||
github = "daniyalsuri6";
|
||||
|
@ -4323,6 +4363,11 @@
|
|||
githubId = 997543;
|
||||
name = "Dmitry Malikov";
|
||||
};
|
||||
DMills27 = {
|
||||
github = "DMills27";
|
||||
githubId = 5251658;
|
||||
name = "Dominic Mills";
|
||||
};
|
||||
DmitryTsygankov = {
|
||||
email = "dmitry.tsygankov@gmail.com";
|
||||
github = "DmitryTsygankov";
|
||||
|
@ -4666,6 +4711,11 @@
|
|||
githubId = 7875;
|
||||
name = "Rommel Martinez";
|
||||
};
|
||||
eclairevoyant = {
|
||||
github = "eclairevoyant";
|
||||
githubId = 848000;
|
||||
name = "éclairevoyant";
|
||||
};
|
||||
edanaher = {
|
||||
email = "nixos@edanaher.net";
|
||||
github = "edanaher";
|
||||
|
@ -5121,6 +5171,11 @@
|
|||
githubId = 1583484;
|
||||
name = "Andrey Golovizin";
|
||||
};
|
||||
errnoh = {
|
||||
github = "errnoh";
|
||||
githubId = 373946;
|
||||
name = "Erno Hopearuoho";
|
||||
};
|
||||
ersin = {
|
||||
email = "me@ersinakinci.com";
|
||||
github = "ersinakinci";
|
||||
|
@ -5500,6 +5555,13 @@
|
|||
}
|
||||
];
|
||||
};
|
||||
fernsehmuell = {
|
||||
email = "fernsehmuel@googlemail.com";
|
||||
matrix = "@fernsehmuell:matrix.org";
|
||||
github = "fernsehmuell";
|
||||
githubId = 5198058;
|
||||
name = "Udo Sauer";
|
||||
};
|
||||
ffinkdevs = {
|
||||
email = "fink@h0st.space";
|
||||
github = "ffinkdevs";
|
||||
|
@ -6188,6 +6250,12 @@
|
|||
githubId = 471835;
|
||||
name = "Giorgio Gallo";
|
||||
};
|
||||
GirardR1006 = {
|
||||
email = "julien.girard2@cea.fr";
|
||||
github = "GirardR1006";
|
||||
githubId = 19275558;
|
||||
name = "Julien Girard-Satabin";
|
||||
};
|
||||
GKasparov = {
|
||||
email = "mizozahr@gmail.com";
|
||||
github = "GKasparov";
|
||||
|
@ -7420,6 +7488,12 @@
|
|||
githubId = 20320695;
|
||||
name = "Matan Bendix Shenhav";
|
||||
};
|
||||
iynaix = {
|
||||
email = "iynaix@gmail.com";
|
||||
github = "iynaix";
|
||||
githubId = 94313;
|
||||
name = "Xianyi Lin";
|
||||
};
|
||||
izorkin = {
|
||||
email = "Izorkin@gmail.com";
|
||||
github = "Izorkin";
|
||||
|
@ -7456,6 +7530,12 @@
|
|||
github = "j4m3s-s";
|
||||
githubId = 9413812;
|
||||
};
|
||||
jacbart = {
|
||||
name = "Jack Bartlett";
|
||||
email = "jacbart@gmail.com";
|
||||
github = "jacbart";
|
||||
githubId = 7909687;
|
||||
};
|
||||
jacfal = {
|
||||
name = "Jakub Pravda";
|
||||
email = "me@jakubpravda.net";
|
||||
|
@ -7474,6 +7554,12 @@
|
|||
githubId = 7558482;
|
||||
name = "Jack Gerrits";
|
||||
};
|
||||
jaduff = {
|
||||
email = "jdduffpublic@proton.me";
|
||||
github = "jaduff";
|
||||
githubId = 10690970;
|
||||
name = "James Duff";
|
||||
};
|
||||
jagajaga = {
|
||||
email = "ars.seroka@gmail.com";
|
||||
github = "jagajaga";
|
||||
|
@ -7856,6 +7942,11 @@
|
|||
githubId = 31008330;
|
||||
name = "Jann Marc Villablanca";
|
||||
};
|
||||
jgarcia = {
|
||||
github = "chewblacka";
|
||||
githubId = 18430320;
|
||||
name = "John Garcia";
|
||||
};
|
||||
jgart = {
|
||||
email = "jgart@dismail.de";
|
||||
github = "jgarte";
|
||||
|
@ -8038,6 +8129,12 @@
|
|||
githubId = 2308444;
|
||||
name = "Joshua Gilman";
|
||||
};
|
||||
jmillerpdt = {
|
||||
email = "jcmiller@pdtpartners.com";
|
||||
github = "jmillerpdt";
|
||||
githubId = 54179289;
|
||||
name = "Jason Miller";
|
||||
};
|
||||
jnsgruk = {
|
||||
email = "jon@sgrs.uk";
|
||||
github = "jnsgruk";
|
||||
|
@ -8671,6 +8768,17 @@
|
|||
githubId = 1927188;
|
||||
name = "karolchmist";
|
||||
};
|
||||
kashw2 = {
|
||||
email = "supra4keanu@hotmail.com";
|
||||
github = "kashw2";
|
||||
githubId = 15855440;
|
||||
name = "Keanu Ashwell";
|
||||
};
|
||||
katexochen = {
|
||||
github = "katexochen";
|
||||
githubId = 49727155;
|
||||
name = "Paul Meyer";
|
||||
};
|
||||
kayhide = {
|
||||
email = "kayhide@gmail.com";
|
||||
github = "kayhide";
|
||||
|
@ -9116,12 +9224,6 @@
|
|||
githubId = 2037002;
|
||||
name = "Konstantinos";
|
||||
};
|
||||
kototama = {
|
||||
email = "kototama@posteo.jp";
|
||||
github = "kototama";
|
||||
githubId = 128620;
|
||||
name = "Kototama";
|
||||
};
|
||||
kouyk = {
|
||||
email = "skykinetic@stevenkou.xyz";
|
||||
github = "kouyk";
|
||||
|
@ -9908,6 +10010,12 @@
|
|||
githubId = 8555953;
|
||||
name = "Laure Tavard";
|
||||
};
|
||||
lu15w1r7h = {
|
||||
email = "lwirth2000@gmail.com";
|
||||
github = "LU15W1R7H";
|
||||
githubId = 37505890;
|
||||
name = "Luis Wirth";
|
||||
};
|
||||
luc65r = {
|
||||
email = "lucas@ransan.tk";
|
||||
github = "luc65r";
|
||||
|
@ -11005,6 +11113,12 @@
|
|||
github = "michaelCTS";
|
||||
githubId = 132582212;
|
||||
};
|
||||
michaeldonovan = {
|
||||
email = "michael@mdonovan.dev";
|
||||
name = "Michael Donovan";
|
||||
github = "michaeldonovan";
|
||||
githubId = 14077230;
|
||||
};
|
||||
michaelgrahamevans = {
|
||||
email = "michaelgrahamevans@gmail.com";
|
||||
name = "Michael Evans";
|
||||
|
@ -13871,6 +13985,12 @@
|
|||
githubId = 115877;
|
||||
name = "Kenny Shen";
|
||||
};
|
||||
quadradical = {
|
||||
email = "nixos@henryhiles.com";
|
||||
github = "Henry-Hiles";
|
||||
githubId = 71790868;
|
||||
name = "Henry Hiles";
|
||||
};
|
||||
quag = {
|
||||
email = "quaggy@gmail.com";
|
||||
github = "quag";
|
||||
|
@ -14556,6 +14676,12 @@
|
|||
github = "RossComputerGuy";
|
||||
githubId = 19699320;
|
||||
};
|
||||
rotaerk = {
|
||||
name = "Matthew Stewart";
|
||||
email = "m.scott.stewart@gmail.com";
|
||||
github = "rotaerk";
|
||||
githubId = 17690823;
|
||||
};
|
||||
rowanG077 = {
|
||||
email = "goemansrowan@gmail.com";
|
||||
github = "rowanG077";
|
||||
|
@ -14609,7 +14735,7 @@
|
|||
name = "Rahul Butani";
|
||||
};
|
||||
rs0vere = {
|
||||
email = "rs0vere@outlook.com";
|
||||
email = "rs0vere@proton.me";
|
||||
github = "rs0vere";
|
||||
githubId = 140035635;
|
||||
keys = [{
|
||||
|
@ -15034,6 +15160,13 @@
|
|||
fingerprint = "30BB FF3F AB0B BB3E 0435 F83C 8E8F F66E 2AE8 D970";
|
||||
}];
|
||||
};
|
||||
scm2342 = {
|
||||
name = "Sven Mattsen";
|
||||
email = "nix@sven.cc";
|
||||
matrix = "@scm:matrix.sven.cc";
|
||||
github = "scm2342";
|
||||
githubId = 154108;
|
||||
};
|
||||
scode = {
|
||||
email = "peter.schuller@infidyne.com";
|
||||
github = "scode";
|
||||
|
@ -15259,6 +15392,13 @@
|
|||
fingerprint = "7246 B6E1 ABB9 9A48 4395 FD11 DC26 B921 A9E9 DBDE";
|
||||
}];
|
||||
};
|
||||
sfr = {
|
||||
email = "sol@solfisher.com";
|
||||
matrix = "@sfr:enby.space";
|
||||
github = "solfisher";
|
||||
githubId = 57151943;
|
||||
name = "Sol Fisher Romanoff";
|
||||
};
|
||||
sfrijters = {
|
||||
email = "sfrijters@gmail.com";
|
||||
github = "SFrijters";
|
||||
|
@ -15851,6 +15991,12 @@
|
|||
githubId = 2600039;
|
||||
name = "Spencer Janssen";
|
||||
};
|
||||
spikespaz = {
|
||||
name = "Jacob Birkett";
|
||||
email = "support@birkett.dev";
|
||||
github = "spikespaz";
|
||||
githubId = 12502988;
|
||||
};
|
||||
spinus = {
|
||||
email = "tomasz.czyz@gmail.com";
|
||||
github = "spinus";
|
||||
|
@ -16283,6 +16429,12 @@
|
|||
github = "sweenu";
|
||||
githubId = 7051978;
|
||||
};
|
||||
swesterfeld = {
|
||||
email = "stefan@space.twc.de";
|
||||
github = "swesterfeld";
|
||||
githubId = 14840066;
|
||||
name = "Stefan Westerfeld";
|
||||
};
|
||||
swflint = {
|
||||
email = "swflint@flintfam.org";
|
||||
github = "swflint";
|
||||
|
@ -16476,6 +16628,12 @@
|
|||
githubId = 863327;
|
||||
name = "Tyler Benster";
|
||||
};
|
||||
tbidne = {
|
||||
email = "tbidne@protonmail.com";
|
||||
github = "tbidne";
|
||||
githubId = 2856188;
|
||||
name = "Thomas Bidne";
|
||||
};
|
||||
tboerger = {
|
||||
email = "thomas@webhippie.de";
|
||||
matrix = "@tboerger:matrix.org";
|
||||
|
@ -16979,6 +17137,13 @@
|
|||
githubId = 1292007;
|
||||
name = "Sébastien Maccagnoni";
|
||||
};
|
||||
tiredofit = {
|
||||
email = "dave@tiredofit.ca";
|
||||
github = "tiredofit";
|
||||
githubId = 23528985;
|
||||
name = "Dave Conroy";
|
||||
matrix = "@dave:tiredofit.ca";
|
||||
};
|
||||
tirex = {
|
||||
email = "szymon@kliniewski.pl";
|
||||
name = "Szymon Kliniewski";
|
||||
|
@ -17159,6 +17324,12 @@
|
|||
githubId = 27586264;
|
||||
name = "Tobias Schmidt";
|
||||
};
|
||||
totalchaos = {
|
||||
email = "basil.keeler@outlook.com";
|
||||
github = "totalchaos05";
|
||||
githubId = 70387628;
|
||||
name = "Basil Keeler";
|
||||
};
|
||||
totoroot = {
|
||||
name = "Matthias Thym";
|
||||
email = "git@thym.at";
|
||||
|
@ -17728,6 +17899,12 @@
|
|||
fingerprint = "5814 50EB 6E17 E715 7C63 E7F1 9879 8C3C 4D68 8D6D";
|
||||
}];
|
||||
};
|
||||
viluon = {
|
||||
email = "nix@viluon.me";
|
||||
github = "viluon";
|
||||
githubId = 7235381;
|
||||
name = "Ondřej Kvapil";
|
||||
};
|
||||
vincentbernat = {
|
||||
email = "vincent@bernat.ch";
|
||||
github = "vincentbernat";
|
||||
|
@ -18428,6 +18605,17 @@
|
|||
githubId = 3705333;
|
||||
name = "Dmitry V.";
|
||||
};
|
||||
yavko = {
|
||||
name = "Yavor Kolev";
|
||||
email = "yavornkolev@gmail.com";
|
||||
matrix = "@yavor:nikolay.ems.host";
|
||||
github = "yavko";
|
||||
githubId = 15178513;
|
||||
keys = [
|
||||
{fingerprint = "DC05 7015 ECD7 E68A 6426 EFD8 F07D 19A3 2407 F857";}
|
||||
{fingerprint = "2874 581F F832 C9E9 AEC6 8D84 E57B F27C 8BB0 80B0";}
|
||||
];
|
||||
};
|
||||
yayayayaka = {
|
||||
email = "nixpkgs@uwu.is";
|
||||
matrix = "@yaya:uwu.is";
|
||||
|
@ -18536,6 +18724,13 @@
|
|||
githubId = 647076;
|
||||
name = "Yorick van Pelt";
|
||||
};
|
||||
YorikSar = {
|
||||
name = "Yuriy Taraday";
|
||||
email = "yorik.sar@gmail.com";
|
||||
matrix = "@yorik.sar:matrix.org";
|
||||
github = "YorikSar";
|
||||
githubId = 428074;
|
||||
};
|
||||
yrashk = {
|
||||
email = "yrashk@gmail.com";
|
||||
github = "yrashk";
|
||||
|
|
|
@ -66,6 +66,7 @@ sed -r \
|
|||
-e '/ hie-bios /d' \
|
||||
-e '/ ShellCheck /d' \
|
||||
-e '/ Agda /d' \
|
||||
-e '/ stack /d' \
|
||||
< "${tmpfile_new}" >> $stackage_config
|
||||
# Explanations:
|
||||
# cabal2nix, distribution-nixpkgs, jailbreak-cabal, language-nix: These are our packages and we know what we are doing.
|
||||
|
|
|
@ -4,20 +4,20 @@
|
|||
# - maintainers/scripts/update-luarocks-packages
|
||||
|
||||
# format:
|
||||
# $ nix run nixpkgs.python3Packages.black -c black update.py
|
||||
# $ nix run nixpkgs#black maintainers/scripts/pluginupdate.py
|
||||
# type-check:
|
||||
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
|
||||
# $ nix run nixpkgs#python3.pkgs.mypy maintainers/scripts/pluginupdate.py
|
||||
# linted:
|
||||
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265 update.py
|
||||
# $ nix run nixpkgs#python3.pkgs.flake8 -- --ignore E501,E265 maintainers/scripts/pluginupdate.py
|
||||
|
||||
import argparse
|
||||
import csv
|
||||
import functools
|
||||
import http
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
@ -25,14 +25,14 @@ import urllib.error
|
|||
import urllib.parse
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree as ET
|
||||
from dataclasses import asdict, dataclass
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
from multiprocessing.dummy import Pool
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Tuple, Union, Any, Callable
|
||||
from urllib.parse import urljoin, urlparse
|
||||
from tempfile import NamedTemporaryFile
|
||||
from dataclasses import dataclass, asdict
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import git
|
||||
|
||||
|
@ -41,12 +41,13 @@ ATOM_LINK = "{http://www.w3.org/2005/Atom}link" # "
|
|||
ATOM_UPDATED = "{http://www.w3.org/2005/Atom}updated" # "
|
||||
|
||||
LOG_LEVELS = {
|
||||
logging.getLevelName(level): level for level in [
|
||||
logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR ]
|
||||
logging.getLevelName(level): level
|
||||
for level in [logging.DEBUG, logging.INFO, logging.WARN, logging.ERROR]
|
||||
}
|
||||
|
||||
log = logging.getLogger()
|
||||
|
||||
|
||||
def retry(ExceptionToCheck: Any, tries: int = 4, delay: float = 3, backoff: float = 2):
|
||||
"""Retry calling the decorated function using an exponential backoff.
|
||||
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
|
||||
|
@ -77,6 +78,7 @@ def retry(ExceptionToCheck: Any, tries: int = 4, delay: float = 3, backoff: floa
|
|||
|
||||
return deco_retry
|
||||
|
||||
|
||||
@dataclass
|
||||
class FetchConfig:
|
||||
proc: int
|
||||
|
@ -91,22 +93,21 @@ def make_request(url: str, token=None) -> urllib.request.Request:
|
|||
|
||||
|
||||
# a dictionary of plugins and their new repositories
|
||||
Redirects = Dict['PluginDesc', 'Repo']
|
||||
Redirects = Dict["PluginDesc", "Repo"]
|
||||
|
||||
|
||||
class Repo:
|
||||
def __init__(
|
||||
self, uri: str, branch: str
|
||||
) -> None:
|
||||
def __init__(self, uri: str, branch: str) -> None:
|
||||
self.uri = uri
|
||||
'''Url to the repo'''
|
||||
"""Url to the repo"""
|
||||
self._branch = branch
|
||||
# Redirect is the new Repo to use
|
||||
self.redirect: Optional['Repo'] = None
|
||||
self.redirect: Optional["Repo"] = None
|
||||
self.token = "dummy_token"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.uri.split('/')[-1]
|
||||
return self.uri.split("/")[-1]
|
||||
|
||||
@property
|
||||
def branch(self):
|
||||
|
@ -114,6 +115,7 @@ class Repo:
|
|||
|
||||
def __str__(self) -> str:
|
||||
return f"{self.uri}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Repo({self.name}, {self.uri})"
|
||||
|
||||
|
@ -125,9 +127,9 @@ class Repo:
|
|||
def latest_commit(self) -> Tuple[str, datetime]:
|
||||
log.debug("Latest commit")
|
||||
loaded = self._prefetch(None)
|
||||
updated = datetime.strptime(loaded['date'], "%Y-%m-%dT%H:%M:%S%z")
|
||||
updated = datetime.strptime(loaded["date"], "%Y-%m-%dT%H:%M:%S%z")
|
||||
|
||||
return loaded['rev'], updated
|
||||
return loaded["rev"], updated
|
||||
|
||||
def _prefetch(self, ref: Optional[str]):
|
||||
cmd = ["nix-prefetch-git", "--quiet", "--fetch-submodules", self.uri]
|
||||
|
@ -144,23 +146,23 @@ class Repo:
|
|||
return loaded["sha256"]
|
||||
|
||||
def as_nix(self, plugin: "Plugin") -> str:
|
||||
return f'''fetchgit {{
|
||||
return f"""fetchgit {{
|
||||
url = "{self.uri}";
|
||||
rev = "{plugin.commit}";
|
||||
sha256 = "{plugin.sha256}";
|
||||
}}'''
|
||||
}}"""
|
||||
|
||||
|
||||
class RepoGitHub(Repo):
|
||||
def __init__(
|
||||
self, owner: str, repo: str, branch: str
|
||||
) -> None:
|
||||
def __init__(self, owner: str, repo: str, branch: str) -> None:
|
||||
self.owner = owner
|
||||
self.repo = repo
|
||||
self.token = None
|
||||
'''Url to the repo'''
|
||||
"""Url to the repo"""
|
||||
super().__init__(self.url(""), branch)
|
||||
log.debug("Instantiating github repo owner=%s and repo=%s", self.owner, self.repo)
|
||||
log.debug(
|
||||
"Instantiating github repo owner=%s and repo=%s", self.owner, self.repo
|
||||
)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
@ -213,7 +215,6 @@ class RepoGitHub(Repo):
|
|||
new_repo = RepoGitHub(owner=new_owner, repo=new_name, branch=self.branch)
|
||||
self.redirect = new_repo
|
||||
|
||||
|
||||
def prefetch(self, commit: str) -> str:
|
||||
if self.has_submodules():
|
||||
sha256 = super().prefetch(commit)
|
||||
|
@ -233,12 +234,12 @@ class RepoGitHub(Repo):
|
|||
else:
|
||||
submodule_attr = ""
|
||||
|
||||
return f'''fetchFromGitHub {{
|
||||
return f"""fetchFromGitHub {{
|
||||
owner = "{self.owner}";
|
||||
repo = "{self.repo}";
|
||||
rev = "{plugin.commit}";
|
||||
sha256 = "{plugin.sha256}";{submodule_attr}
|
||||
}}'''
|
||||
}}"""
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
|
@ -258,15 +259,14 @@ class PluginDesc:
|
|||
return self.repo.name < other.repo.name
|
||||
|
||||
@staticmethod
|
||||
def load_from_csv(config: FetchConfig, row: Dict[str, str]) -> 'PluginDesc':
|
||||
def load_from_csv(config: FetchConfig, row: Dict[str, str]) -> "PluginDesc":
|
||||
branch = row["branch"]
|
||||
repo = make_repo(row['repo'], branch.strip())
|
||||
repo = make_repo(row["repo"], branch.strip())
|
||||
repo.token = config.github_token
|
||||
return PluginDesc(repo, branch.strip(), row["alias"])
|
||||
|
||||
|
||||
@staticmethod
|
||||
def load_from_string(config: FetchConfig, line: str) -> 'PluginDesc':
|
||||
def load_from_string(config: FetchConfig, line: str) -> "PluginDesc":
|
||||
branch = "HEAD"
|
||||
alias = None
|
||||
uri = line
|
||||
|
@ -279,6 +279,7 @@ class PluginDesc:
|
|||
repo.token = config.github_token
|
||||
return PluginDesc(repo, branch.strip(), alias)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Plugin:
|
||||
name: str
|
||||
|
@ -302,22 +303,38 @@ class Plugin:
|
|||
return copy
|
||||
|
||||
|
||||
def load_plugins_from_csv(config: FetchConfig, input_file: Path,) -> List[PluginDesc]:
|
||||
def load_plugins_from_csv(
|
||||
config: FetchConfig,
|
||||
input_file: Path,
|
||||
) -> List[PluginDesc]:
|
||||
log.debug("Load plugins from csv %s", input_file)
|
||||
plugins = []
|
||||
with open(input_file, newline='') as csvfile:
|
||||
with open(input_file, newline="") as csvfile:
|
||||
log.debug("Writing into %s", input_file)
|
||||
reader = csv.DictReader(csvfile,)
|
||||
reader = csv.DictReader(
|
||||
csvfile,
|
||||
)
|
||||
for line in reader:
|
||||
plugin = PluginDesc.load_from_csv(config, line)
|
||||
plugins.append(plugin)
|
||||
|
||||
return plugins
|
||||
|
||||
|
||||
def run_nix_expr(expr):
|
||||
with CleanEnvironment():
|
||||
cmd = ["nix", "eval", "--extra-experimental-features",
|
||||
"nix-command", "--impure", "--json", "--expr", expr]
|
||||
with CleanEnvironment() as nix_path:
|
||||
cmd = [
|
||||
"nix",
|
||||
"eval",
|
||||
"--extra-experimental-features",
|
||||
"nix-command",
|
||||
"--impure",
|
||||
"--json",
|
||||
"--expr",
|
||||
expr,
|
||||
"--nix-path",
|
||||
nix_path,
|
||||
]
|
||||
log.debug("Running command %s", " ".join(cmd))
|
||||
out = subprocess.check_output(cmd)
|
||||
data = json.loads(out)
|
||||
|
@ -348,7 +365,7 @@ class Editor:
|
|||
self.nixpkgs_repo = None
|
||||
|
||||
def add(self, args):
|
||||
'''CSV spec'''
|
||||
"""CSV spec"""
|
||||
log.debug("called the 'add' command")
|
||||
fetch_config = FetchConfig(args.proc, args.github_token)
|
||||
editor = self
|
||||
|
@ -356,23 +373,27 @@ class Editor:
|
|||
log.debug("using plugin_line", plugin_line)
|
||||
pdesc = PluginDesc.load_from_string(fetch_config, plugin_line)
|
||||
log.debug("loaded as pdesc", pdesc)
|
||||
append = [ pdesc ]
|
||||
editor.rewrite_input(fetch_config, args.input_file, editor.deprecated, append=append)
|
||||
plugin, _ = prefetch_plugin(pdesc, )
|
||||
append = [pdesc]
|
||||
editor.rewrite_input(
|
||||
fetch_config, args.input_file, editor.deprecated, append=append
|
||||
)
|
||||
plugin, _ = prefetch_plugin(
|
||||
pdesc,
|
||||
)
|
||||
autocommit = not args.no_commit
|
||||
if autocommit:
|
||||
commit(
|
||||
editor.nixpkgs_repo,
|
||||
"{drv_name}: init at {version}".format(
|
||||
drv_name=editor.get_drv_name(plugin.normalized_name),
|
||||
version=plugin.version
|
||||
version=plugin.version,
|
||||
),
|
||||
[args.outfile, args.input_file],
|
||||
)
|
||||
|
||||
# Expects arguments generated by 'update' subparser
|
||||
def update(self, args ):
|
||||
'''CSV spec'''
|
||||
def update(self, args):
|
||||
"""CSV spec"""
|
||||
print("the update member function should be overriden in subclasses")
|
||||
|
||||
def get_current_plugins(self) -> List[Plugin]:
|
||||
|
@ -385,11 +406,11 @@ class Editor:
|
|||
return plugins
|
||||
|
||||
def load_plugin_spec(self, config: FetchConfig, plugin_file) -> List[PluginDesc]:
|
||||
'''CSV spec'''
|
||||
"""CSV spec"""
|
||||
return load_plugins_from_csv(config, plugin_file)
|
||||
|
||||
def generate_nix(self, _plugins, _outfile: str):
|
||||
'''Returns nothing for now, writes directly to outfile'''
|
||||
"""Returns nothing for now, writes directly to outfile"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_update(self, input_file: str, outfile: str, config: FetchConfig):
|
||||
|
@ -413,7 +434,6 @@ class Editor:
|
|||
|
||||
return update
|
||||
|
||||
|
||||
@property
|
||||
def attr_path(self):
|
||||
return self.name + "Plugins"
|
||||
|
@ -427,10 +447,11 @@ class Editor:
|
|||
def create_parser(self):
|
||||
common = argparse.ArgumentParser(
|
||||
add_help=False,
|
||||
description=(f"""
|
||||
description=(
|
||||
f"""
|
||||
Updates nix derivations for {self.name} plugins.\n
|
||||
By default from {self.default_in} to {self.default_out}"""
|
||||
)
|
||||
),
|
||||
)
|
||||
common.add_argument(
|
||||
"--input-names",
|
||||
|
@ -463,26 +484,33 @@ class Editor:
|
|||
Uses GITHUB_API_TOKEN environment variables as the default value.""",
|
||||
)
|
||||
common.add_argument(
|
||||
"--no-commit", "-n", action="store_true", default=False,
|
||||
help="Whether to autocommit changes"
|
||||
"--no-commit",
|
||||
"-n",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Whether to autocommit changes",
|
||||
)
|
||||
common.add_argument(
|
||||
"--debug", "-d", choices=LOG_LEVELS.keys(),
|
||||
"--debug",
|
||||
"-d",
|
||||
choices=LOG_LEVELS.keys(),
|
||||
default=logging.getLevelName(logging.WARN),
|
||||
help="Adjust log level"
|
||||
help="Adjust log level",
|
||||
)
|
||||
|
||||
main = argparse.ArgumentParser(
|
||||
parents=[common],
|
||||
description=(f"""
|
||||
description=(
|
||||
f"""
|
||||
Updates nix derivations for {self.name} plugins.\n
|
||||
By default from {self.default_in} to {self.default_out}"""
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
subparsers = main.add_subparsers(dest="command", required=False)
|
||||
padd = subparsers.add_parser(
|
||||
"add", parents=[],
|
||||
"add",
|
||||
parents=[],
|
||||
description="Add new plugin",
|
||||
add_help=False,
|
||||
)
|
||||
|
@ -502,10 +530,12 @@ class Editor:
|
|||
pupdate.set_defaults(func=self.update)
|
||||
return main
|
||||
|
||||
def run(self,):
|
||||
'''
|
||||
def run(
|
||||
self,
|
||||
):
|
||||
"""
|
||||
Convenience function
|
||||
'''
|
||||
"""
|
||||
parser = self.create_parser()
|
||||
args = parser.parse_args()
|
||||
command = args.command or "update"
|
||||
|
@ -518,17 +548,15 @@ class Editor:
|
|||
getattr(self, command)(args)
|
||||
|
||||
|
||||
|
||||
|
||||
class CleanEnvironment(object):
|
||||
def __enter__(self) -> None:
|
||||
def __enter__(self) -> str:
|
||||
self.old_environ = os.environ.copy()
|
||||
local_pkgs = str(Path(__file__).parent.parent.parent)
|
||||
os.environ["NIX_PATH"] = f"localpkgs={local_pkgs}"
|
||||
self.empty_config = NamedTemporaryFile()
|
||||
self.empty_config.write(b"{}")
|
||||
self.empty_config.flush()
|
||||
os.environ["NIXPKGS_CONFIG"] = self.empty_config.name
|
||||
return f"localpkgs={local_pkgs}"
|
||||
|
||||
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
||||
os.environ.update(self.old_environ)
|
||||
|
@ -570,14 +598,15 @@ def print_download_error(plugin: PluginDesc, ex: Exception):
|
|||
]
|
||||
print("\n".join(tb_lines))
|
||||
|
||||
|
||||
def check_results(
|
||||
results: List[Tuple[PluginDesc, Union[Exception, Plugin], Optional[Repo]]]
|
||||
) -> Tuple[List[Tuple[PluginDesc, Plugin]], Redirects]:
|
||||
''' '''
|
||||
""" """
|
||||
failures: List[Tuple[PluginDesc, Exception]] = []
|
||||
plugins = []
|
||||
redirects: Redirects = {}
|
||||
for (pdesc, result, redirect) in results:
|
||||
for pdesc, result, redirect in results:
|
||||
if isinstance(result, Exception):
|
||||
failures.append((pdesc, result))
|
||||
else:
|
||||
|
@ -594,17 +623,18 @@ def check_results(
|
|||
else:
|
||||
print(f", {len(failures)} plugin(s) could not be downloaded:\n")
|
||||
|
||||
for (plugin, exception) in failures:
|
||||
for plugin, exception in failures:
|
||||
print_download_error(plugin, exception)
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def make_repo(uri: str, branch) -> Repo:
|
||||
'''Instantiate a Repo with the correct specialization depending on server (gitub spec)'''
|
||||
"""Instantiate a Repo with the correct specialization depending on server (gitub spec)"""
|
||||
# dumb check to see if it's of the form owner/repo (=> github) or https://...
|
||||
res = urlparse(uri)
|
||||
if res.netloc in [ "github.com", ""]:
|
||||
res = res.path.strip('/').split('/')
|
||||
if res.netloc in ["github.com", ""]:
|
||||
res = res.path.strip("/").split("/")
|
||||
repo = RepoGitHub(res[0], res[1], branch)
|
||||
else:
|
||||
repo = Repo(uri.strip(), branch)
|
||||
|
@ -675,7 +705,6 @@ def prefetch(
|
|||
return (pluginDesc, e, None)
|
||||
|
||||
|
||||
|
||||
def rewrite_input(
|
||||
config: FetchConfig,
|
||||
input_file: Path,
|
||||
|
@ -684,12 +713,14 @@ def rewrite_input(
|
|||
redirects: Redirects = {},
|
||||
append: List[PluginDesc] = [],
|
||||
):
|
||||
plugins = load_plugins_from_csv(config, input_file,)
|
||||
plugins = load_plugins_from_csv(
|
||||
config,
|
||||
input_file,
|
||||
)
|
||||
|
||||
plugins.extend(append)
|
||||
|
||||
if redirects:
|
||||
|
||||
cur_date_iso = datetime.now().strftime("%Y-%m-%d")
|
||||
with open(deprecated, "r") as f:
|
||||
deprecations = json.load(f)
|
||||
|
@ -709,8 +740,8 @@ def rewrite_input(
|
|||
with open(input_file, "w") as f:
|
||||
log.debug("Writing into %s", input_file)
|
||||
# fields = dataclasses.fields(PluginDesc)
|
||||
fieldnames = ['repo', 'branch', 'alias']
|
||||
writer = csv.DictWriter(f, fieldnames, dialect='unix', quoting=csv.QUOTE_NONE)
|
||||
fieldnames = ["repo", "branch", "alias"]
|
||||
writer = csv.DictWriter(f, fieldnames, dialect="unix", quoting=csv.QUOTE_NONE)
|
||||
writer.writeheader()
|
||||
for plugin in sorted(plugins):
|
||||
writer.writerow(asdict(plugin))
|
||||
|
@ -726,7 +757,6 @@ def commit(repo: git.Repo, message: str, files: List[Path]) -> None:
|
|||
print("no changes in working tree to commit")
|
||||
|
||||
|
||||
|
||||
def update_plugins(editor: Editor, args):
|
||||
"""The main entry function of this module. All input arguments are grouped in the `Editor`."""
|
||||
|
||||
|
@ -751,4 +781,3 @@ def update_plugins(editor: Editor, args):
|
|||
f"{editor.attr_path}: resolve github repository redirects",
|
||||
[args.outfile, args.input_file, editor.deprecated],
|
||||
)
|
||||
|
||||
|
|
25
third_party/nixpkgs/maintainers/team-list.nix
vendored
25
third_party/nixpkgs/maintainers/team-list.nix
vendored
|
@ -181,6 +181,19 @@ with lib.maintainers; {
|
|||
shortName = "Cosmopolitan";
|
||||
};
|
||||
|
||||
dotnet = {
|
||||
members = [
|
||||
ivar
|
||||
mdarocha
|
||||
corngood
|
||||
raphaelr
|
||||
jamiemagee
|
||||
anpin
|
||||
];
|
||||
scope = "Maintainers of the .NET build tools and packages";
|
||||
shortName = "dotnet";
|
||||
};
|
||||
|
||||
deepin = {
|
||||
members = [
|
||||
rewine
|
||||
|
@ -413,6 +426,7 @@ with lib.maintainers; {
|
|||
|
||||
jupyter = {
|
||||
members = [
|
||||
GaetanLepage
|
||||
natsukium
|
||||
];
|
||||
scope = "Maintain Jupyter and related packages.";
|
||||
|
@ -732,6 +746,7 @@ with lib.maintainers; {
|
|||
fridh
|
||||
hexa
|
||||
jonringer
|
||||
tjni
|
||||
];
|
||||
scope = "Maintain the Python interpreter and related packages.";
|
||||
shortName = "Python";
|
||||
|
@ -891,4 +906,14 @@ with lib.maintainers; {
|
|||
shortName = "Xfce";
|
||||
enableFeatureFreezePing = true;
|
||||
};
|
||||
|
||||
zig = {
|
||||
members = [
|
||||
AndersonTorres
|
||||
figsoda
|
||||
];
|
||||
scope = "Maintain the Zig compiler toolchain and nixpkgs integration.";
|
||||
shortName = "Zig";
|
||||
enableFeatureFreezePing = true;
|
||||
};
|
||||
}
|
||||
|
|
5
third_party/nixpkgs/nixos/README
vendored
5
third_party/nixpkgs/nixos/README
vendored
|
@ -1,5 +0,0 @@
|
|||
*** NixOS ***
|
||||
|
||||
NixOS is a Linux distribution based on the purely functional package
|
||||
management system Nix. More information can be found at
|
||||
https://nixos.org/nixos and in the manual in doc/manual.
|
86
third_party/nixpkgs/nixos/README.md
vendored
Normal file
86
third_party/nixpkgs/nixos/README.md
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
# NixOS
|
||||
|
||||
NixOS is a Linux distribution based on the purely functional package
|
||||
management system Nix. More information can be found at
|
||||
https://nixos.org/nixos and in the manual in doc/manual.
|
||||
|
||||
## Testing changes
|
||||
|
||||
You can add new module to your NixOS configuration file (usually it’s `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
|
||||
|
||||
## Reviewing contributions
|
||||
|
||||
When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people’s installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from \@edolstra.
|
||||
|
||||
### Module updates
|
||||
|
||||
Module updates are submissions changing modules in some ways. These often contains changes to the options or introduce new options.
|
||||
|
||||
Reviewing process:
|
||||
|
||||
- Ensure that the module maintainers are notified.
|
||||
- [CODEOWNERS](https://help.github.com/articles/about-codeowners/) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
|
||||
- Ensure that the module tests, if any, are succeeding.
|
||||
- Ensure that the introduced options are correct.
|
||||
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
|
||||
- Description, default and example should be provided.
|
||||
- Ensure that option changes are backward compatible.
|
||||
- `mkRenamedOptionModuleWith` provides a way to make option changes backward compatible.
|
||||
- Ensure that removed options are declared with `mkRemovedOptionModule`
|
||||
- Ensure that changes that are not backward compatible are mentioned in release notes.
|
||||
- Ensure that documentations affected by the change is updated.
|
||||
|
||||
Sample template for a module update review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] changes are backward compatible
|
||||
- [ ] removed options are declared with `mkRemovedOptionModule`
|
||||
- [ ] changes that are not backward compatible are documented in release notes
|
||||
- [ ] module tests succeed on ARCHITECTURE
|
||||
- [ ] options types are appropriate
|
||||
- [ ] options description is set
|
||||
- [ ] options example is provided
|
||||
- [ ] documentation affected by the changes is updated
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
||||
|
||||
### New modules
|
||||
|
||||
New modules submissions introduce a new module to NixOS.
|
||||
|
||||
Reviewing process:
|
||||
|
||||
- Ensure that the module tests, if any, are succeeding.
|
||||
- Ensure that the introduced options are correct.
|
||||
- Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
|
||||
- Description, default and example should be provided.
|
||||
- Ensure that module `meta` field is present
|
||||
- Maintainers should be declared in `meta.maintainers`.
|
||||
- Module documentation should be declared with `meta.doc`.
|
||||
- Ensure that the module respect other modules functionality.
|
||||
- For example, enabling a module should not open firewall ports by default.
|
||||
|
||||
Sample template for a new module review is provided below.
|
||||
|
||||
```markdown
|
||||
##### Reviewed points
|
||||
|
||||
- [ ] module path fits the guidelines
|
||||
- [ ] module tests succeed on ARCHITECTURE
|
||||
- [ ] options have appropriate types
|
||||
- [ ] options have default
|
||||
- [ ] options have example
|
||||
- [ ] options have descriptions
|
||||
- [ ] No unneeded package is added to environment.systemPackages
|
||||
- [ ] meta.maintainers is set
|
||||
- [ ] module documentation is declared in meta.doc
|
||||
|
||||
##### Possible improvements
|
||||
|
||||
##### Comments
|
||||
```
|
|
@ -118,3 +118,33 @@ the symlink, and this path is in `/nix/store/.../lib/systemd/user/`.
|
|||
Hence [garbage collection](#sec-nix-gc) will remove that file and you
|
||||
will wind up with a broken symlink in your systemd configuration, which
|
||||
in turn will not make the service / timer start on login.
|
||||
|
||||
## Template units {#sect-nixos-systemd-template-units}
|
||||
|
||||
systemd supports templated units where a base unit can be started multiple
|
||||
times with a different parameter. The syntax to accomplish this is
|
||||
`service-name@instance-name.service`. Units get the instance name passed to
|
||||
them (see `systemd.unit(5)`). NixOS has support for these kinds of units and
|
||||
for template-specific overrides. A service needs to be defined twice, once
|
||||
for the base unit and once for the instance. All instances must include
|
||||
`overrideStrategy = "asDropin"` for the change detection to work. This
|
||||
example illustrates this:
|
||||
```nix
|
||||
{
|
||||
systemd.services = {
|
||||
"base-unit@".serviceConfig = {
|
||||
ExecStart = "...";
|
||||
User = "...";
|
||||
};
|
||||
"base-unit@instance-a" = {
|
||||
overrideStrategy = "asDropin"; # needed for templates to work
|
||||
wantedBy = [ "multi-user.target" ]; # causes NixOS to manage the instance
|
||||
};
|
||||
"base-unit@instance-b" = {
|
||||
overrideStrategy = "asDropin"; # needed for templates to work
|
||||
wantedBy = [ "multi-user.target" ]; # causes NixOS to manage the instance
|
||||
serviceConfig.User = "root"; # also override something for this specific instance
|
||||
};
|
||||
};
|
||||
}
|
||||
```
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# Contributing to this manual {#chap-contributing}
|
||||
|
||||
The [DocBook] and CommonMark sources of the NixOS manual are in the [nixos/doc/manual](https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual) subdirectory of the [Nixpkgs](https://github.com/NixOS/nixpkgs) repository.
|
||||
This manual uses the [Nixpkgs manual syntax](https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup).
|
||||
|
||||
You can quickly check your edits with the following:
|
||||
|
||||
```ShellSession
|
||||
$ cd /path/to/nixpkgs
|
||||
$ $EDITOR doc/nixos/manual/... # edit the manual
|
||||
$ nix-build nixos/release.nix -A manual.x86_64-linux
|
||||
```
|
||||
|
||||
|
@ -13,24 +15,96 @@ If the build succeeds, the manual will be in `./result/share/doc/nixos/index.htm
|
|||
|
||||
There's also [a convenient development daemon](https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-devmode).
|
||||
|
||||
**Contributing to the man pages**
|
||||
The above instructions don't deal with the appendix of available `configuration.nix` options, and the manual pages related to NixOS. These are built, and written in a different location and in a different format, as explained in the next sections.
|
||||
|
||||
The man pages are written in [DocBook] which is XML.
|
||||
## Contributing to the `configuration.nix` options documentation {#sec-contributing-options}
|
||||
|
||||
To see what your edits look like:
|
||||
The documentation for all the different `configuration.nix` options is automatically generated by reading the `description`s of all the NixOS options defined at `nixos/modules/`. If you want to improve such `description`, find it in the `nixos/modules/` directory, and edit it and open a pull request.
|
||||
|
||||
To see how your changes render on the web, run again:
|
||||
|
||||
```ShellSession
|
||||
$ nix-build nixos/release.nix -A manual.x86_64-linux
|
||||
```
|
||||
|
||||
And you'll see the changes to the appendix in the path `result/share/doc/nixos/options.html`.
|
||||
|
||||
You can also build only the `configuration.nix(5)` manual page, via:
|
||||
|
||||
```ShellSession
|
||||
$ cd /path/to/nixpkgs
|
||||
$ nix-build nixos/release.nix -A manpages.x86_64-linux
|
||||
$ nix-build nixos/release.nix -A nixos-configuration-reference-manpage.x86_64-linux
|
||||
```
|
||||
|
||||
You can then read the man page you edited by running
|
||||
And observe the result via:
|
||||
|
||||
```ShellSession
|
||||
$ man --manpath=result/share/man nixos-rebuild # Replace nixos-rebuild with the command whose manual you edited
|
||||
$ man --local-file result/share/man/man5/configuration.nix.5
|
||||
```
|
||||
|
||||
If you're on a different architecture that's supported by NixOS (check nixos/release.nix) then replace `x86_64-linux` with the architecture.
|
||||
`nix-build` will complain otherwise, but should also tell you which architecture you have + the supported ones.
|
||||
If you're on a different architecture that's supported by NixOS (check file `nixos/release.nix` on Nixpkgs' repository) then replace `x86_64-linux` with the architecture. `nix-build` will complain otherwise, but should also tell you which architecture you have + the supported ones.
|
||||
|
||||
[DocBook]: https://en.wikipedia.org/wiki/DocBook
|
||||
## Contributing to `nixos-*` tools' manpages {#sec-contributing-nixos-tools}
|
||||
|
||||
The manual pages for the tools available in the installation image can be found in Nixpkgs by running (e.g for `nixos-rebuild`):
|
||||
|
||||
```ShellSession
|
||||
$ git ls | grep nixos-rebuild.8
|
||||
```
|
||||
|
||||
Man pages are written in [`mdoc(7)` format](https://mandoc.bsd.lv/man/mdoc.7.html) and should be portable between mandoc and groff for rendering (except for minor differences, notably different spacing rules.)
|
||||
|
||||
For a preview, run `man --local-file path/to/file.8`.
|
||||
|
||||
Being written in `mdoc`, these manpages use semantic markup. This following subsections provides a guideline on where to apply which semantic elements.
|
||||
|
||||
### Command lines and arguments {#ssec-contributing-nixos-tools-cli-and-args}
|
||||
|
||||
In any manpage, commands, flags and arguments to the *current* executable should be marked according to their semantics. Commands, flags and arguments passed to *other* executables should not be marked like this and should instead be considered as code examples and marked with `Ql`.
|
||||
|
||||
- Use `Fl` to mark flag arguments, `Ar` for their arguments.
|
||||
- Repeating arguments should be marked by adding an ellipsis (spelled with periods, `...`).
|
||||
- Use `Cm` to mark literal string arguments, e.g. the `boot` command argument passed to `nixos-rebuild`.
|
||||
- Optional flags or arguments should be marked with `Op`. This includes optional repeating arguments.
|
||||
- Required flags or arguments should not be marked.
|
||||
- Mutually exclusive groups of arguments should be enclosed in curly brackets, preferably created with `Bro`/`Brc` blocks.
|
||||
|
||||
When an argument is used in an example it should be marked up with `Ar` again to differentiate it from a constant. For example, a command with a `--host name` option that calls ssh to retrieve the host's local time would signify this thusly:
|
||||
```
|
||||
This will run
|
||||
.Ic ssh Ar name Ic time
|
||||
to retrieve the remote time.
|
||||
```
|
||||
|
||||
### Paths, NixOS options, environment variables {#ssec-contributing-nixos-tools-options-and-environment}
|
||||
|
||||
Constant paths should be marked with `Pa`, NixOS options with `Va`, and environment variables with `Ev`.
|
||||
|
||||
Generated paths, e.g. `result/bin/run-hostname-vm` (where `hostname` is a variable or arguments) should be marked as `Ql` inline literals with their variable components marked appropriately.
|
||||
|
||||
- When `hostname` refers to an argument, it becomes `.Ql result/bin/run- Ns Ar hostname Ns -vm`
|
||||
- When `hostname` refers to a variable, it becomes `.Ql result/bin/run- Ns Va hostname Ns -vm`
|
||||
|
||||
### Code examples and other commands {#ssec-contributing-nixos-tools-code-examples}
|
||||
|
||||
In free text names and complete invocations of other commands (e.g. `ssh` or `tar -xvf src.tar`) should be marked with `Ic`, fragments of command lines should be marked with `Ql`.
|
||||
|
||||
Larger code blocks or those that cannot be shown inline should use indented literal display block markup for their contents, i.e.
|
||||
|
||||
```
|
||||
.Bd -literal -offset indent
|
||||
...
|
||||
.Ed
|
||||
```
|
||||
|
||||
Contents of code blocks may be marked up further, e.g. if they refer to arguments that will be substituted into them:
|
||||
|
||||
```
|
||||
.Bd -literal -offset indent
|
||||
{
|
||||
config.networking.hostname = "\c
|
||||
.Ar hostname Ns \c
|
||||
";
|
||||
}
|
||||
.Ed
|
||||
```
|
||||
|
|
|
@ -184,8 +184,8 @@ in rec {
|
|||
'';
|
||||
|
||||
|
||||
# Generate the NixOS manpages.
|
||||
manpages = runCommand "nixos-manpages"
|
||||
# Generate the `man configuration.nix` package
|
||||
nixos-configuration-reference-manpage = runCommand "nixos-configuration-reference-manpage"
|
||||
{ nativeBuildInputs = [
|
||||
buildPackages.installShellFiles
|
||||
buildPackages.nixos-render-docs
|
||||
|
@ -194,8 +194,6 @@ in rec {
|
|||
}
|
||||
''
|
||||
# Generate manpages.
|
||||
mkdir -p $out/share/man/man8
|
||||
installManPage ${./manpages}/*
|
||||
mkdir -p $out/share/man/man5
|
||||
nixos-render-docs -j $NIX_BUILD_CORES options manpage \
|
||||
--revision ${lib.escapeShellArg revision} \
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
# NixOS manpages
|
||||
|
||||
This is the collection of NixOS manpages, excluding `configuration.nix(5)`.
|
||||
|
||||
Man pages are written in [`mdoc(7)` format](https://mandoc.bsd.lv/man/mdoc.7.html) and should be portable between mandoc and groff for rendering (though minor differences may occur, mandoc and groff seem to have slightly different spacing rules.)
|
||||
|
||||
For previewing edited files, you can just run `man -l path/to/file.8` and you will see it rendered.
|
||||
|
||||
Being written in `mdoc` these manpages use semantic markup. This file provides a guideline on where to apply which of the semantic elements of `mdoc`.
|
||||
|
||||
### Command lines and arguments
|
||||
|
||||
In any manpage, commands, flags and arguments to the *current* executable should be marked according to their semantics. Commands, flags and arguments passed to *other* executables should not be marked like this and should instead be considered as code examples and marked with `Ql`.
|
||||
|
||||
- Use `Fl` to mark flag arguments, `Ar` for their arguments.
|
||||
- Repeating arguments should be marked by adding ellipses (`...`).
|
||||
- Use `Cm` to mark literal string arguments, e.g. the `boot` command argument passed to `nixos-rebuild`.
|
||||
- Optional flags or arguments should be marked with `Op`. This includes optional repeating arguments.
|
||||
- Required flags or arguments should not be marked.
|
||||
- Mutually exclusive groups of arguments should be enclosed in curly brackets, preferably created with `Bro`/`Brc` blocks.
|
||||
|
||||
When an argument is used in an example it should be marked up with `Ar` again to differentiate it from a constant. For example, a command with a `--host name` flag that calls ssh to retrieve the host's local time would signify this thusly:
|
||||
```
|
||||
This will run
|
||||
.Ic ssh Ar name Ic time
|
||||
to retrieve the remote time.
|
||||
```
|
||||
|
||||
### Paths, NixOS options, environment variables
|
||||
|
||||
Constant paths should be marked with `Pa`, NixOS options with `Va`, and environment variables with `Ev`.
|
||||
|
||||
Generated paths, e.g. `result/bin/run-hostname-vm` (where `hostname` is a variable or arguments) should be marked as `Ql` inline literals with their variable components marked appropriately.
|
||||
|
||||
- Taking `hostname` from an argument become `.Ql result/bin/run- Ns Ar hostname Ns -vm`
|
||||
- Taking `hostname` from a variable otherwise defined becomes `.Ql result/bin/run- Ns Va hostname Ns -vm`
|
||||
|
||||
### Code examples and other commands
|
||||
|
||||
In free text names and complete invocations of other commands (e.g. `ssh` or `tar -xvf src.tar`) should be marked with `Ic`, fragments of command lines should be marked with `Ql`.
|
||||
|
||||
Larger code blocks or those that cannot be shown inline should use indented literal display block markup for their contents, i.e.
|
||||
```
|
||||
.Bd -literal -offset indent
|
||||
...
|
||||
.Ed
|
||||
```
|
||||
Contents of code blocks may be marked up further, e.g. if they refer to arguments that will be substituted into them:
|
||||
```
|
||||
.Bd -literal -offset indent
|
||||
{
|
||||
options.hostname = "\c
|
||||
.Ar hostname Ns \c
|
||||
";
|
||||
}
|
||||
.Ed
|
||||
```
|
|
@ -30,6 +30,8 @@
|
|||
|
||||
- [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
|
||||
|
||||
- [Jool](https://nicmx.github.io/Jool/en/index.html), an Open Source implementation of IPv4/IPv6 translation on Linux. Available as [networking.jool.enable](#opt-networking.jool.enable).
|
||||
|
||||
- [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
|
||||
|
||||
- [pgBouncer](https://www.pgbouncer.org), a PostgreSQL connection pooler. Available as [services.pgbouncer](#opt-services.pgbouncer.enable).
|
||||
|
@ -68,6 +70,13 @@
|
|||
|
||||
- The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
|
||||
|
||||
- The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
|
||||
- The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
|
||||
- The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
|
||||
- These optional dependencies are automatically added via a wrapper (`pkgs.matrix-synapse.override { extras = ["redis"]; }` for `hiredis` & `txredisapi` for instance) if the relevant config section is declared in `services.matrix-synapse.settings`. For instance, if `services.matrix-synapse.settings.redis.enabled` is set to `true`, `"redis"` will be automatically added to the `extras` list of `pkgs.matrix-synapse`.
|
||||
- A list of all extras (and the extras enabled by default) can be found at the [option's reference for `services.matrix-synapse.extras`](#opt-services.matrix-synapse.extras).
|
||||
- In some cases (e.g. for running synapse workers) it was necessary to re-use the `PYTHONPATH` of `matrix-synapse.service`'s environment to have all plugins available. This isn't necessary anymore, instead `config.services.matrix-synapse.package` can be used as it points to the wrapper with properly configured `extras` and also all plugins defined via [`services.matrix-synapse.plugins`](#opt-services.matrix-synapse.plugins) available. This is also the reason for why the option is read-only now, it's supposed to be set by the module only.
|
||||
|
||||
- `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
|
||||
|
||||
- `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
|
||||
|
@ -136,10 +145,26 @@
|
|||
|
||||
- `pharo` has been updated to latest stable (PharoVM 10.0.5), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
|
||||
|
||||
- Emacs mainline version 29 was introduced. This new version includes many major additions, most notably `tree-sitter` support (enabled by default) and the pgtk variant (useful for Wayland users), which is available under the attribute `emacs29-pgtk`.
|
||||
|
||||
- Emacs macport version 29 was introduced.
|
||||
|
||||
- The `html-proofer` package has been updated from major version 3 to major version 5, which includes [breaking changes](https://github.com/gjtorikian/html-proofer/blob/v5.0.8/UPGRADING.md).
|
||||
|
||||
## Other Notable Changes {#sec-release-23.11-notable-changes}
|
||||
|
||||
- The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
|
||||
|
||||
- GNOME, Pantheon, Cinnamon module no longer forces Qt applications to use Adwaita style since it was buggy and is no longer maintained upstream (specifically, Cinnamon now defaults to the gtk2 style instead, following the default in Linux Mint). If you still want it, you can add the following options to your configuration but it will probably be eventually removed:
|
||||
|
||||
```nix
|
||||
qt = {
|
||||
enable = true;
|
||||
platformTheme = "gnome";
|
||||
style = "adwaita";
|
||||
};
|
||||
```
|
||||
|
||||
- `fontconfig` now defaults to using greyscale antialiasing instead of subpixel antialiasing because of a [recommendation from one of the downstreams](https://gitlab.freedesktop.org/fontconfig/fontconfig/-/issues/337). You can change this value by configuring [](#opt-fonts.fontconfig.subpixel.rgba) accordingly.
|
||||
|
||||
- The latest available version of Nextcloud is v27 (available as `pkgs.nextcloud27`). The installation logic is as follows:
|
||||
|
@ -160,6 +185,8 @@
|
|||
|
||||
- `services.fail2ban.jails` can now be configured with attribute sets defining settings and filters instead of lines. The stringed options `daemonConfig` and `extraSettings` have respectively been replaced by `daemonSettings` and `jails.DEFAULT.settings` which use attribute sets.
|
||||
|
||||
- The application firewall `opensnitch` now uses the process monitor method eBPF as default as recommended by upstream. The method can be changed with the setting [services.opensnitch.settings.ProcMonitorMethod](#opt-services.opensnitch.settings.ProcMonitorMethod).
|
||||
|
||||
- The module [services.ankisyncd](#opt-services.ankisyncd.package) has been switched to [anki-sync-server-rs](https://github.com/ankicommunity/anki-sync-server-rs) from the old python version, which was difficult to update, had not been updated in a while, and did not support recent versions of anki.
|
||||
Unfortunately all servers supporting new clients (newer version of anki-sync-server, anki's built in sync server and this new rust package) do not support the older sync protocol that was used in the old server, so such old clients will also need updating and in particular the anki package in nixpkgs is also being updated in this release.
|
||||
The module update takes care of the new config syntax and the data itself (user login and cards) are compatible, so users of the module will be able to just log in again after updating both client and server without any extra action.
|
||||
|
@ -184,6 +211,18 @@ The module update takes care of the new config syntax and the data itself (user
|
|||
|
||||
- The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
|
||||
|
||||
- The `django` alias in the python package set was upgraded to Django 4.x.
|
||||
Applications that consume Django should always pin their python environment
|
||||
to a compatible major version, so they can move at their own pace.
|
||||
|
||||
```nix
|
||||
python = python3.override {
|
||||
packageOverrides = self: super: {
|
||||
django = super.django_3;
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
- The `qemu-vm.nix` module by default now identifies block devices via
|
||||
persistent names available in `/dev/disk/by-*`. Because the rootDevice is
|
||||
identfied by its filesystem label, it needs to be formatted before the VM is
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ stdenv, squashfsTools, closureInfo
|
||||
{ lib, stdenv, squashfsTools, closureInfo
|
||||
|
||||
, # The root directory of the squashfs filesystem is filled with the
|
||||
# closures of the Nix store paths listed here.
|
||||
|
@ -22,11 +22,13 @@ stdenv.mkDerivation {
|
|||
# for nix-store --load-db.
|
||||
cp $closureInfo/registration nix-path-registration
|
||||
|
||||
'' + lib.optionalString stdenv.buildPlatform.is32bit ''
|
||||
# 64 cores on i686 does not work
|
||||
# fails with FATAL ERROR: mangle2:: xz compress failed with error code 5
|
||||
if ((NIX_BUILD_CORES > 48)); then
|
||||
NIX_BUILD_CORES=48
|
||||
fi
|
||||
'' + ''
|
||||
|
||||
# Generate the squashfs image.
|
||||
mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out \
|
||||
|
|
|
@ -106,7 +106,13 @@ def main() -> None:
|
|||
args.keep_vm_state,
|
||||
) as driver:
|
||||
if args.interactive:
|
||||
ptpython.repl.embed(driver.test_symbols(), {})
|
||||
history_dir = os.getcwd()
|
||||
history_path = os.path.join(history_dir, ".nixos-test-history")
|
||||
ptpython.repl.embed(
|
||||
driver.test_symbols(),
|
||||
{},
|
||||
history_filename=history_path,
|
||||
)
|
||||
else:
|
||||
tic = time.time()
|
||||
driver.run_tests()
|
||||
|
|
|
@ -1,21 +1,14 @@
|
|||
# This module provides the proprietary NVIDIA X11 / OpenGL drivers.
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
nvidia_x11 =
|
||||
if (lib.elem "nvidia" config.services.xserver.videoDrivers)
|
||||
then cfg.package
|
||||
else null;
|
||||
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
nvidia_x11 = let
|
||||
drivers = config.services.xserver.videoDrivers;
|
||||
isDeprecated = str: (hasPrefix "nvidia" str) && (str != "nvidia");
|
||||
hasDeprecated = drivers: any isDeprecated drivers;
|
||||
in if (hasDeprecated drivers) then
|
||||
throw ''
|
||||
Selecting an nvidia driver has been modified for NixOS 19.03. The version is now set using `hardware.nvidia.package`.
|
||||
''
|
||||
else if (elem "nvidia" drivers) then cfg.package else null;
|
||||
|
||||
enabled = nvidia_x11 != null;
|
||||
cfg = config.hardware.nvidia;
|
||||
|
||||
pCfg = cfg.prime;
|
||||
|
@ -23,90 +16,68 @@ let
|
|||
offloadCfg = pCfg.offload;
|
||||
reverseSyncCfg = pCfg.reverseSync;
|
||||
primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
|
||||
nvidiaPersistencedEnabled = cfg.nvidiaPersistenced;
|
||||
nvidiaSettings = cfg.nvidiaSettings;
|
||||
busIDType = types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
|
||||
|
||||
busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
|
||||
ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
|
||||
in
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "enable" ] [ "hardware" "nvidia" "prime" "sync" "enable" ])
|
||||
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "allowExternalGpu" ] [ "hardware" "nvidia" "prime" "allowExternalGpu" ])
|
||||
(mkRenamedOptionModule [ "hardware" "nvidia" "prime" "sync" "allowExternalGpu" ] [ "hardware" "nvidia" "prime" "allowExternalGpu" ])
|
||||
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "nvidiaBusId" ] [ "hardware" "nvidia" "prime" "nvidiaBusId" ])
|
||||
(mkRenamedOptionModule [ "hardware" "nvidia" "optimus_prime" "intelBusId" ] [ "hardware" "nvidia" "prime" "intelBusId" ])
|
||||
];
|
||||
|
||||
in {
|
||||
options = {
|
||||
hardware.nvidia.powerManagement.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Experimental power management through systemd. For more information, see
|
||||
hardware.nvidia = {
|
||||
powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
experimental power management through systemd. For more information, see
|
||||
the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.powerManagement.finegrained = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Experimental power management of PRIME offload. For more information, see
|
||||
the NVIDIA docs, chapter 22. PCI-Express runtime power management.
|
||||
'';
|
||||
};
|
||||
powerManagement.finegrained = lib.mkEnableOption (lib.mdDoc ''
|
||||
experimental power management of PRIME offload. For more information, see
|
||||
the NVIDIA docs, on Chapter 22. PCI-Express Runtime D3 (RTD3) Power Management.
|
||||
'');
|
||||
|
||||
hardware.nvidia.modesetting.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Enable kernel modesetting when using the NVIDIA proprietary driver.
|
||||
dynamicBoost.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
dynamic Boost balances power between the CPU and the GPU for improved
|
||||
performance on supported laptops using the nvidia-powerd daemon. For more
|
||||
information, see the NVIDIA docs, on Chapter 23. Dynamic Boost on Linux.
|
||||
'');
|
||||
|
||||
modesetting.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
kernel modesetting when using the NVIDIA proprietary driver.
|
||||
|
||||
Enabling this fixes screen tearing when using Optimus via PRIME (see
|
||||
{option}`hardware.nvidia.prime.sync.enable`. This is not enabled
|
||||
by default because it is not officially supported by NVIDIA and would not
|
||||
work with SLI.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.prime.nvidiaBusId = mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:1:0:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
|
||||
shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
|
||||
'';
|
||||
};
|
||||
prime.nvidiaBusId = lib.mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:1:0:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
|
||||
shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
|
||||
'';
|
||||
};
|
||||
|
||||
hardware.nvidia.prime.intelBusId = mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:0:2:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
|
||||
shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
|
||||
'';
|
||||
};
|
||||
prime.intelBusId = lib.mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:0:2:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
|
||||
shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
|
||||
'';
|
||||
};
|
||||
|
||||
hardware.nvidia.prime.amdgpuBusId = mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:4:0:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the AMD APU. You can find it using lspci; for example if lspci
|
||||
shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
|
||||
'';
|
||||
};
|
||||
prime.amdgpuBusId = lib.mkOption {
|
||||
type = busIDType;
|
||||
default = "";
|
||||
example = "PCI:4:0:0";
|
||||
description = lib.mdDoc ''
|
||||
Bus ID of the AMD APU. You can find it using lspci; for example if lspci
|
||||
shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
|
||||
'';
|
||||
};
|
||||
|
||||
hardware.nvidia.prime.sync.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Enable NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
|
||||
prime.sync.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
|
||||
If enabled, the NVIDIA GPU will be always on and used for all rendering,
|
||||
while enabling output to displays attached only to the integrated Intel/AMD
|
||||
GPU without a multiplexer.
|
||||
|
@ -127,55 +98,39 @@ in
|
|||
Note that this configuration will only be successful when a display manager
|
||||
for which the {option}`services.xserver.displayManager.setupCommands`
|
||||
option is supported is used.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.prime.allowExternalGpu = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Configure X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus.
|
||||
'';
|
||||
};
|
||||
prime.allowExternalGpu = lib.mkEnableOption (lib.mdDoc ''
|
||||
configuring X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus.
|
||||
'');
|
||||
|
||||
hardware.nvidia.prime.offload.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Enable render offload support using the NVIDIA proprietary driver via PRIME.
|
||||
prime.offload.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
render offload support using the NVIDIA proprietary driver via PRIME.
|
||||
|
||||
If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
|
||||
be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
|
||||
{option}`hardware.nvidia.prime.intelBusId` or
|
||||
{option}`hardware.nvidia.prime.amdgpuBusId`).
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.prime.offload.enableOffloadCmd = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Adds a `nvidia-offload` convenience script to {option}`environment.systemPackages`
|
||||
prime.offload.enableOffloadCmd = lib.mkEnableOption (lib.mdDoc ''
|
||||
adding a `nvidia-offload` convenience script to {option}`environment.systemPackages`
|
||||
for offloading programs to an nvidia device. To work, should have also enabled
|
||||
{option}`hardware.nvidia.prime.offload.enable` or {option}`hardware.nvidia.prime.reverseSync.enable`.
|
||||
|
||||
Example usage `nvidia-offload sauerbraten_client`.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.prime.reverseSync.enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Warning: This feature is relatively new, depending on your system this might
|
||||
work poorly. AMD support, especially so.
|
||||
See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
|
||||
|
||||
Enable NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
|
||||
prime.reverseSync.enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
|
||||
PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
|
||||
enabling output to displays attached only to the NVIDIA GPU without a
|
||||
multiplexer.
|
||||
|
||||
Warning: This feature is relatively new, depending on your system this might
|
||||
work poorly. AMD support, especially so.
|
||||
See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
|
||||
|
||||
Note that this option only has any effect if the "nvidia" driver is specified
|
||||
in {option}`services.xserver.videoDrivers`, and it should preferably
|
||||
be the only driver there.
|
||||
|
@ -192,316 +147,347 @@ in
|
|||
Note that this configuration will only be successful when a display manager
|
||||
for which the {option}`services.xserver.displayManager.setupCommands`
|
||||
option is supported is used.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.nvidiaSettings = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Whether to add nvidia-settings, NVIDIA's GUI configuration tool, to
|
||||
systemPackages.
|
||||
'';
|
||||
};
|
||||
nvidiaSettings =
|
||||
(lib.mkEnableOption (lib.mdDoc ''
|
||||
nvidia-settings, NVIDIA's GUI configuration tool.
|
||||
''))
|
||||
// {default = true;};
|
||||
|
||||
hardware.nvidia.nvidiaPersistenced = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Update for NVIDA GPU headless mode, i.e. nvidia-persistenced. It ensures all
|
||||
GPUs stay awake even during headless mode.
|
||||
'';
|
||||
};
|
||||
nvidiaPersistenced = lib.mkEnableOption (lib.mdDoc ''
|
||||
nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
|
||||
It ensures all GPUs stay awake even during headless mode.
|
||||
'');
|
||||
|
||||
hardware.nvidia.forceFullCompositionPipeline = lib.mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Whether to force-enable the full composition pipeline.
|
||||
forceFullCompositionPipeline = lib.mkEnableOption (lib.mdDoc ''
|
||||
forcefully the full composition pipeline.
|
||||
This sometimes fixes screen tearing issues.
|
||||
This has been reported to reduce the performance of some OpenGL applications and may produce issues in WebGL.
|
||||
It also drastically increases the time the driver needs to clock down after load.
|
||||
'';
|
||||
};
|
||||
'');
|
||||
|
||||
hardware.nvidia.package = lib.mkOption {
|
||||
type = types.package;
|
||||
default = config.boot.kernelPackages.nvidiaPackages.stable;
|
||||
defaultText = literalExpression "config.boot.kernelPackages.nvidiaPackages.stable";
|
||||
description = lib.mdDoc ''
|
||||
The NVIDIA X11 derivation to use.
|
||||
'';
|
||||
example = literalExpression "config.boot.kernelPackages.nvidiaPackages.legacy_340";
|
||||
};
|
||||
package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
|
||||
default = "stable";
|
||||
example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
|
||||
};
|
||||
|
||||
hardware.nvidia.open = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Whether to use the open source kernel module
|
||||
'';
|
||||
open = lib.mkEnableOption (lib.mdDoc ''
|
||||
the open source NVIDIA kernel module
|
||||
'');
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
igpuDriver = if pCfg.intelBusId != "" then "modesetting" else "amdgpu";
|
||||
igpuBusId = if pCfg.intelBusId != "" then pCfg.intelBusId else pCfg.amdgpuBusId;
|
||||
in mkIf enabled {
|
||||
assertions = [
|
||||
{
|
||||
assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
|
||||
message = ''
|
||||
You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.
|
||||
'';
|
||||
}
|
||||
igpuDriver =
|
||||
if pCfg.intelBusId != ""
|
||||
then "modesetting"
|
||||
else "amdgpu";
|
||||
igpuBusId =
|
||||
if pCfg.intelBusId != ""
|
||||
then pCfg.intelBusId
|
||||
else pCfg.amdgpuBusId;
|
||||
in
|
||||
lib.mkIf (nvidia_x11 != null) {
|
||||
assertions = [
|
||||
{
|
||||
assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
|
||||
message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
|
||||
message = ''
|
||||
Offload command requires offloading or reverse prime sync to be enabled.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
|
||||
message = "Offload command requires offloading or reverse prime sync to be enabled.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
|
||||
message = ''
|
||||
When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.
|
||||
'';
|
||||
}
|
||||
{
|
||||
assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
|
||||
message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = offloadCfg.enable -> versionAtLeast nvidia_x11.version "435.21";
|
||||
message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
|
||||
}
|
||||
{
|
||||
assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
|
||||
message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> versionAtLeast nvidia_x11.version "470.0";
|
||||
message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
|
||||
}
|
||||
{
|
||||
assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
|
||||
message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = !(syncCfg.enable && offloadCfg.enable);
|
||||
message = "PRIME Sync and Offload cannot be both enabled";
|
||||
}
|
||||
{
|
||||
assertion = !(syncCfg.enable && offloadCfg.enable);
|
||||
message = "PRIME Sync and Offload cannot be both enabled";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = !(syncCfg.enable && reverseSyncCfg.enable);
|
||||
message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
|
||||
}
|
||||
{
|
||||
assertion = !(syncCfg.enable && reverseSyncCfg.enable);
|
||||
message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
|
||||
message = "Sync precludes powering down the NVIDIA GPU.";
|
||||
}
|
||||
{
|
||||
assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
|
||||
message = "Sync precludes powering down the NVIDIA GPU.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
|
||||
message = "Fine-grained power management requires offload to be enabled.";
|
||||
}
|
||||
{
|
||||
assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
|
||||
message = "Fine-grained power management requires offload to be enabled.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = cfg.powerManagement.enable -> versionAtLeast nvidia_x11.version "430.09";
|
||||
message = "Required files for driver based power management only exist on versions >= 430.09.";
|
||||
}
|
||||
{
|
||||
assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
|
||||
message = "Required files for driver based power management only exist on versions >= 430.09.";
|
||||
}
|
||||
|
||||
{
|
||||
assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
|
||||
message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
|
||||
}
|
||||
];
|
||||
{
|
||||
assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
|
||||
message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
|
||||
}
|
||||
|
||||
# If Optimus/PRIME is enabled, we:
|
||||
# - Specify the configured NVIDIA GPU bus ID in the Device section for the
|
||||
# "nvidia" driver.
|
||||
# - Add the AllowEmptyInitialConfiguration option to the Screen section for the
|
||||
# "nvidia" driver, in order to allow the X server to start without any outputs.
|
||||
# - Add a separate Device section for the Intel GPU, using the "modesetting"
|
||||
# driver and with the configured BusID.
|
||||
# - OR add a separate Device section for the AMD APU, using the "amdgpu"
|
||||
# driver and with the configures BusID.
|
||||
# - Reference that Device section from the ServerLayout section as an inactive
|
||||
# device.
|
||||
# - Configure the display manager to run specific `xrandr` commands which will
|
||||
# configure/enable displays connected to the Intel iGPU / AMD APU.
|
||||
{
|
||||
assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
|
||||
message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
|
||||
}
|
||||
];
|
||||
|
||||
# reverse sync implies offloading
|
||||
hardware.nvidia.prime.offload.enable = mkDefault reverseSyncCfg.enable;
|
||||
# If Optimus/PRIME is enabled, we:
|
||||
# - Specify the configured NVIDIA GPU bus ID in the Device section for the
|
||||
# "nvidia" driver.
|
||||
# - Add the AllowEmptyInitialConfiguration option to the Screen section for the
|
||||
# "nvidia" driver, in order to allow the X server to start without any outputs.
|
||||
# - Add a separate Device section for the Intel GPU, using the "modesetting"
|
||||
# driver and with the configured BusID.
|
||||
# - OR add a separate Device section for the AMD APU, using the "amdgpu"
|
||||
# driver and with the configures BusID.
|
||||
# - Reference that Device section from the ServerLayout section as an inactive
|
||||
# device.
|
||||
# - Configure the display manager to run specific `xrandr` commands which will
|
||||
# configure/enable displays connected to the Intel iGPU / AMD APU.
|
||||
|
||||
services.xserver.drivers = optional primeEnabled {
|
||||
name = igpuDriver;
|
||||
display = offloadCfg.enable;
|
||||
modules = optionals (igpuDriver == "amdgpu") [ pkgs.xorg.xf86videoamdgpu ];
|
||||
deviceSection = ''
|
||||
BusID "${igpuBusId}"
|
||||
${optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''Option "AccelMethod" "none"''}
|
||||
'';
|
||||
} ++ singleton {
|
||||
name = "nvidia";
|
||||
modules = [ nvidia_x11.bin ];
|
||||
display = !offloadCfg.enable;
|
||||
deviceSection = optionalString primeEnabled
|
||||
# reverse sync implies offloading
|
||||
hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
|
||||
|
||||
services.xserver.drivers =
|
||||
lib.optional primeEnabled {
|
||||
name = igpuDriver;
|
||||
display = offloadCfg.enable;
|
||||
modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
|
||||
deviceSection =
|
||||
''
|
||||
BusID "${igpuBusId}"
|
||||
''
|
||||
+ lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
|
||||
Option "AccelMethod" "none"
|
||||
'';
|
||||
}
|
||||
++ lib.singleton {
|
||||
name = "nvidia";
|
||||
modules = [nvidia_x11.bin];
|
||||
display = !offloadCfg.enable;
|
||||
deviceSection =
|
||||
lib.optionalString primeEnabled
|
||||
''
|
||||
BusID "${pCfg.nvidiaBusId}"
|
||||
''
|
||||
+ lib.optionalString pCfg.allowExternalGpu ''
|
||||
Option "AllowExternalGpus"
|
||||
'';
|
||||
screenSection =
|
||||
''
|
||||
Option "RandRRotation" "on"
|
||||
''
|
||||
+ lib.optionalString syncCfg.enable ''
|
||||
Option "AllowEmptyInitialConfiguration"
|
||||
''
|
||||
+ lib.optionalString cfg.forceFullCompositionPipeline ''
|
||||
Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
|
||||
Option "AllowIndirectGLXProtocol" "off"
|
||||
Option "TripleBuffer" "on"
|
||||
'';
|
||||
};
|
||||
|
||||
services.xserver.serverLayoutSection =
|
||||
lib.optionalString syncCfg.enable ''
|
||||
Inactive "Device-${igpuDriver}[0]"
|
||||
''
|
||||
BusID "${pCfg.nvidiaBusId}"
|
||||
${optionalString pCfg.allowExternalGpu "Option \"AllowExternalGpus\""}
|
||||
+ lib.optionalString reverseSyncCfg.enable ''
|
||||
Inactive "Device-nvidia[0]"
|
||||
''
|
||||
+ lib.optionalString offloadCfg.enable ''
|
||||
Option "AllowNVIDIAGPUScreens"
|
||||
'';
|
||||
screenSection =
|
||||
''
|
||||
Option "RandRRotation" "on"
|
||||
'' + optionalString syncCfg.enable ''
|
||||
Option "AllowEmptyInitialConfiguration"
|
||||
'' + optionalString cfg.forceFullCompositionPipeline ''
|
||||
Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
|
||||
Option "AllowIndirectGLXProtocol" "off"
|
||||
Option "TripleBuffer" "on"
|
||||
''
|
||||
;
|
||||
};
|
||||
|
||||
services.xserver.serverLayoutSection = optionalString syncCfg.enable ''
|
||||
Inactive "Device-${igpuDriver}[0]"
|
||||
'' + optionalString reverseSyncCfg.enable ''
|
||||
Inactive "Device-nvidia[0]"
|
||||
'' + optionalString offloadCfg.enable ''
|
||||
Option "AllowNVIDIAGPUScreens"
|
||||
'';
|
||||
services.xserver.displayManager.setupCommands = let
|
||||
gpuProviderName =
|
||||
if igpuDriver == "amdgpu"
|
||||
then
|
||||
# find the name of the provider if amdgpu
|
||||
"`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
|
||||
else igpuDriver;
|
||||
providerCmdParams =
|
||||
if syncCfg.enable
|
||||
then "\"${gpuProviderName}\" NVIDIA-0"
|
||||
else "NVIDIA-G0 \"${gpuProviderName}\"";
|
||||
in
|
||||
lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
|
||||
# Added by nvidia configuration module for Optimus/PRIME.
|
||||
${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
|
||||
${lib.getExe pkgs.xorg.xrandr} --auto
|
||||
'';
|
||||
|
||||
services.xserver.displayManager.setupCommands = let
|
||||
gpuProviderName = if igpuDriver == "amdgpu" then
|
||||
# find the name of the provider if amdgpu
|
||||
"`${pkgs.xorg.xrandr}/bin/xrandr --listproviders | ${pkgs.gnugrep}/bin/grep -i AMD | ${pkgs.gnused}/bin/sed -n 's/^.*name://p'`"
|
||||
else
|
||||
igpuDriver;
|
||||
providerCmdParams = if syncCfg.enable then "\"${gpuProviderName}\" NVIDIA-0" else "NVIDIA-G0 \"${gpuProviderName}\"";
|
||||
in optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
|
||||
# Added by nvidia configuration module for Optimus/PRIME.
|
||||
${pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource ${providerCmdParams}
|
||||
${pkgs.xorg.xrandr}/bin/xrandr --auto
|
||||
'';
|
||||
environment.etc = {
|
||||
"nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
|
||||
|
||||
environment.etc."nvidia/nvidia-application-profiles-rc" = mkIf nvidia_x11.useProfiles {
|
||||
source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";
|
||||
};
|
||||
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
|
||||
"egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
|
||||
};
|
||||
|
||||
# 'nvidia_x11' installs it's files to /run/opengl-driver/...
|
||||
environment.etc."egl/egl_external_platform.d".source =
|
||||
"/run/opengl-driver/share/egl/egl_external_platform.d/";
|
||||
|
||||
hardware.opengl.extraPackages = [
|
||||
nvidia_x11.out
|
||||
pkgs.nvidia-vaapi-driver
|
||||
];
|
||||
hardware.opengl.extraPackages32 = [
|
||||
nvidia_x11.lib32
|
||||
pkgs.pkgsi686Linux.nvidia-vaapi-driver
|
||||
];
|
||||
|
||||
environment.systemPackages = [ nvidia_x11.bin ]
|
||||
++ optionals cfg.nvidiaSettings [ nvidia_x11.settings ]
|
||||
++ optionals nvidiaPersistencedEnabled [ nvidia_x11.persistenced ]
|
||||
++ optionals offloadCfg.enableOffloadCmd [
|
||||
hardware.opengl = {
|
||||
extraPackages = [
|
||||
nvidia_x11.out
|
||||
pkgs.nvidia-vaapi-driver
|
||||
];
|
||||
extraPackages32 = [
|
||||
nvidia_x11.lib32
|
||||
pkgs.pkgsi686Linux.nvidia-vaapi-driver
|
||||
];
|
||||
};
|
||||
environment.systemPackages =
|
||||
[nvidia_x11.bin]
|
||||
++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
|
||||
++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
|
||||
++ lib.optional offloadCfg.enableOffloadCmd
|
||||
(pkgs.writeShellScriptBin "nvidia-offload" ''
|
||||
export __NV_PRIME_RENDER_OFFLOAD=1
|
||||
export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
|
||||
export __GLX_VENDOR_LIBRARY_NAME=nvidia
|
||||
export __VK_LAYER_NV_optimus=NVIDIA_only
|
||||
exec "$@"
|
||||
'')
|
||||
];
|
||||
'');
|
||||
|
||||
systemd.packages = optional cfg.powerManagement.enable nvidia_x11.out;
|
||||
systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
|
||||
|
||||
systemd.services = let
|
||||
baseNvidiaService = state: {
|
||||
description = "NVIDIA system ${state} actions";
|
||||
|
||||
path = with pkgs; [ kbd ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
|
||||
};
|
||||
};
|
||||
|
||||
nvidiaService = sleepState: (baseNvidiaService sleepState) // {
|
||||
before = [ "systemd-${sleepState}.service" ];
|
||||
requiredBy = [ "systemd-${sleepState}.service" ];
|
||||
};
|
||||
|
||||
services = (builtins.listToAttrs (map (t: nameValuePair "nvidia-${t}" (nvidiaService t)) ["hibernate" "suspend"]))
|
||||
// {
|
||||
nvidia-resume = (baseNvidiaService "resume") // {
|
||||
after = [ "systemd-suspend.service" "systemd-hibernate.service" ];
|
||||
requiredBy = [ "systemd-suspend.service" "systemd-hibernate.service" ];
|
||||
};
|
||||
};
|
||||
in optionalAttrs cfg.powerManagement.enable services
|
||||
// optionalAttrs nvidiaPersistencedEnabled {
|
||||
"nvidia-persistenced" = mkIf nvidiaPersistencedEnabled {
|
||||
description = "NVIDIA Persistence Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
systemd.services = let
|
||||
nvidiaService = state: {
|
||||
description = "NVIDIA system ${state} actions";
|
||||
path = [pkgs.kbd];
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
Restart = "always";
|
||||
PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
|
||||
ExecStart = "${nvidia_x11.persistenced}/bin/nvidia-persistenced --verbose";
|
||||
ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
|
||||
Type = "oneshot";
|
||||
ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
|
||||
};
|
||||
before = ["systemd-${state}.service"];
|
||||
requiredBy = ["systemd-${state}.service"];
|
||||
};
|
||||
};
|
||||
in
|
||||
lib.mkMerge [
|
||||
(lib.mkIf cfg.powerManagement.enable {
|
||||
nvidia-suspend = nvidiaService "suspend";
|
||||
nvidia-hibernate = nvidiaService "hibernate";
|
||||
nvidia-resume =
|
||||
(nvidiaService "resume")
|
||||
// {
|
||||
before = [];
|
||||
after = ["systemd-suspend.service" "systemd-hibernate.service"];
|
||||
requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
|
||||
};
|
||||
})
|
||||
(lib.mkIf cfg.nvidiaPersistenced {
|
||||
"nvidia-persistenced" = {
|
||||
description = "NVIDIA Persistence Daemon";
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig = {
|
||||
Type = "forking";
|
||||
Restart = "always";
|
||||
PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
|
||||
ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
|
||||
ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
|
||||
};
|
||||
};
|
||||
})
|
||||
(lib.mkIf cfg.dynamicBoost.enable {
|
||||
"nvidia-powerd" = {
|
||||
description = "nvidia-powerd service";
|
||||
path = [
|
||||
pkgs.util-linux # nvidia-powerd wants lscpu
|
||||
];
|
||||
wantedBy = ["multi-user.target"];
|
||||
serviceConfig = {
|
||||
Type = "dbus";
|
||||
BusName = "nvidia.powerd.server";
|
||||
ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
|
||||
};
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
systemd.tmpfiles.rules = optional config.virtualisation.docker.enableNvidia
|
||||
services.acpid.enable = true;
|
||||
|
||||
services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
|
||||
|
||||
hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
|
||||
|
||||
systemd.tmpfiles.rules =
|
||||
lib.optional config.virtualisation.docker.enableNvidia
|
||||
"L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
|
||||
++ optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
|
||||
++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
|
||||
"L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
|
||||
|
||||
boot.extraModulePackages = if cfg.open then [ nvidia_x11.open ] else [ nvidia_x11.bin ];
|
||||
hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
|
||||
boot = {
|
||||
blacklistedKernelModules = ["nouveau" "nvidiafb"];
|
||||
|
||||
# nvidia-uvm is required by CUDA applications.
|
||||
boot.kernelModules = [ "nvidia-uvm" ] ++
|
||||
optionals config.services.xserver.enable [ "nvidia" "nvidia_modeset" "nvidia_drm" ];
|
||||
extraModulePackages =
|
||||
if cfg.open
|
||||
then [nvidia_x11.open]
|
||||
else [nvidia_x11.bin];
|
||||
|
||||
# If requested enable modesetting via kernel parameter.
|
||||
boot.kernelParams = optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
|
||||
++ optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
|
||||
++ optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
|
||||
++ optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
|
||||
# nvidia-uvm is required by CUDA applications.
|
||||
kernelModules =
|
||||
["nvidia-uvm"]
|
||||
++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
|
||||
|
||||
services.udev.extraRules =
|
||||
''
|
||||
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
|
||||
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
|
||||
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
|
||||
KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
|
||||
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
|
||||
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
|
||||
'' + optionalString cfg.powerManagement.finegrained (
|
||||
optionalString (versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
|
||||
# Remove NVIDIA USB xHCI Host Controller devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
|
||||
# If requested enable modesetting via kernel parameter.
|
||||
kernelParams =
|
||||
lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
|
||||
++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
|
||||
++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
|
||||
++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
|
||||
|
||||
# Remove NVIDIA USB Type-C UCSI devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
|
||||
# enable finegrained power management
|
||||
extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
|
||||
options nvidia "NVreg_DynamicPowerManagement=0x02"
|
||||
'';
|
||||
};
|
||||
|
||||
# Remove NVIDIA Audio devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
|
||||
'' + ''
|
||||
# Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
|
||||
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
|
||||
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
|
||||
services.udev.extraRules =
|
||||
''
|
||||
# Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
|
||||
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
|
||||
KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
|
||||
KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
|
||||
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
|
||||
KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
|
||||
''
|
||||
+ lib.optionalString cfg.powerManagement.finegrained (
|
||||
lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
|
||||
# Remove NVIDIA USB xHCI Host Controller devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
|
||||
|
||||
# Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
|
||||
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
|
||||
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
|
||||
'');
|
||||
# Remove NVIDIA USB Type-C UCSI devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
|
||||
|
||||
boot.extraModprobeConfig = mkIf cfg.powerManagement.finegrained ''
|
||||
options nvidia "NVreg_DynamicPowerManagement=0x02"
|
||||
'';
|
||||
|
||||
boot.blacklistedKernelModules = [ "nouveau" "nvidiafb" ];
|
||||
|
||||
services.acpid.enable = true;
|
||||
|
||||
};
|
||||
# Remove NVIDIA Audio devices, if present
|
||||
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
|
||||
''
|
||||
+ ''
|
||||
# Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
|
||||
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
|
||||
ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
|
||||
|
||||
# Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
|
||||
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
|
||||
ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
|
||||
''
|
||||
);
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ let
|
|||
im = config.i18n.inputMethod;
|
||||
cfg = im.fcitx5;
|
||||
fcitx5Package = pkgs.fcitx5-with-addons.override { inherit (cfg) addons; };
|
||||
settingsFormat = pkgs.formats.ini { };
|
||||
in
|
||||
{
|
||||
options = {
|
||||
|
@ -40,6 +41,44 @@ in
|
|||
'';
|
||||
description = lib.mdDoc "Quick phrase files.";
|
||||
};
|
||||
settings = {
|
||||
globalOptions = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
};
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
The global options in `config` file in ini format.
|
||||
'';
|
||||
};
|
||||
inputMethod = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
};
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
The input method configure in `profile` file in ini format.
|
||||
'';
|
||||
};
|
||||
addons = lib.mkOption {
|
||||
type = with lib.types; (attrsOf anything);
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
The addon configures in `conf` folder in ini format with global sections.
|
||||
Each item is written to the corresponding file.
|
||||
'';
|
||||
example = literalExpression "{ pinyin.globalSection.EmojiEnabled = \"True\"; }";
|
||||
};
|
||||
};
|
||||
ignoreUserConfig = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Ignore the user configures. **Warning**: When this is enabled, the
|
||||
user config files are totally ignored and the user dict can't be saved
|
||||
and loaded.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -61,12 +100,30 @@ in
|
|||
(name: value: lib.nameValuePair ("share/fcitx5/data/quickphrase.d/${name}.mb") value)
|
||||
cfg.quickPhraseFiles))
|
||||
];
|
||||
environment.etc =
|
||||
let
|
||||
optionalFile = p: f: v: lib.optionalAttrs (v != { }) {
|
||||
"xdg/fcitx5/${p}".text = f v;
|
||||
};
|
||||
in
|
||||
lib.attrsets.mergeAttrsList [
|
||||
(optionalFile "config" (lib.generators.toINI { }) cfg.settings.globalOptions)
|
||||
(optionalFile "profile" (lib.generators.toINI { }) cfg.settings.inputMethod)
|
||||
(lib.concatMapAttrs
|
||||
(name: value: optionalFile
|
||||
"conf/${name}.conf"
|
||||
(lib.generators.toINIWithGlobalSection { })
|
||||
value)
|
||||
cfg.settings.addons)
|
||||
];
|
||||
|
||||
environment.variables = {
|
||||
GTK_IM_MODULE = "fcitx";
|
||||
QT_IM_MODULE = "fcitx";
|
||||
XMODIFIERS = "@im=fcitx";
|
||||
QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
|
||||
} // lib.optionalAttrs cfg.ignoreUserConfig {
|
||||
SKIP_FCITX_USER_PATH = "1";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ def add_closure_to_definition(
|
|||
|
||||
source = Path(line.strip())
|
||||
target = str(source.relative_to("/nix/store/"))
|
||||
target = f":{target}" if strip_nix_store_prefix else ""
|
||||
target = f":/{target}" if strip_nix_store_prefix else ""
|
||||
|
||||
copy_files_lines.append(f"CopyFiles={source}{target}\n")
|
||||
|
||||
|
@ -102,7 +102,7 @@ def main() -> None:
|
|||
add_contents_to_definition(definition, contents)
|
||||
|
||||
closure = config.get("closure")
|
||||
strip_nix_store_prefix = config.get("stripStorePaths")
|
||||
strip_nix_store_prefix = config.get("stripNixStorePrefix")
|
||||
add_closure_to_definition(definition, closure, strip_nix_store_prefix)
|
||||
|
||||
print(target_dir.absolute())
|
||||
|
|
|
@ -88,6 +88,13 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
package = lib.mkPackageOption pkgs "systemd-repart" {
|
||||
default = "systemd";
|
||||
example = lib.literalExpression ''
|
||||
pkgs.systemdMinimal.override { withCryptsetup = true; }
|
||||
'';
|
||||
};
|
||||
|
||||
partitions = lib.mkOption {
|
||||
type = with lib.types; attrsOf (submodule partitionOptions);
|
||||
default = { };
|
||||
|
@ -178,9 +185,9 @@ in
|
|||
in
|
||||
pkgs.runCommand cfg.name
|
||||
{
|
||||
nativeBuildInputs = with pkgs; [
|
||||
fakeroot
|
||||
systemd
|
||||
nativeBuildInputs = [
|
||||
cfg.package
|
||||
pkgs.fakeroot
|
||||
] ++ fileSystemTools;
|
||||
} ''
|
||||
amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
|
||||
|
|
|
@ -30,12 +30,6 @@
|
|||
enable = true;
|
||||
};
|
||||
|
||||
# Theme calamares with GNOME theme
|
||||
qt = {
|
||||
enable = true;
|
||||
platformTheme = "gnome";
|
||||
};
|
||||
|
||||
# Fix scaling for calamares on wayland
|
||||
environment.variables = {
|
||||
QT_QPA_PLATFORM = "$([[ $XDG_SESSION_TYPE = \"wayland\" ]] && echo \"wayland\")";
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
x86_64-linux = "/nix/store/ny9r65799s7xhp605bc2753sjvzkxrrs-nix-2.15.1";
|
||||
i686-linux = "/nix/store/ck55dz5klc7szi8rx9ghhm8gi2b5q5bw-nix-2.15.1";
|
||||
aarch64-linux = "/nix/store/cl0a02vr28913dgw98hrm45a4baqr3z1-nix-2.15.1";
|
||||
x86_64-darwin = "/nix/store/wq228jdbz16pp2lnxf32n8dv27pw53p8-nix-2.15.1";
|
||||
aarch64-darwin = "/nix/store/x11cpsjg4q236msfz5scc325pfp9xy64-nix-2.15.1";
|
||||
x86_64-linux = "/nix/store/3wqasl97rjiza3vd7fxjnvli2w9l30mk-nix-2.17.0";
|
||||
i686-linux = "/nix/store/z360xswxfx55pmm1fng3hw748rbs0kkj-nix-2.17.0";
|
||||
aarch64-linux = "/nix/store/9670sxa916xmv8n1kqs7cdvmnsrhrdjv-nix-2.17.0";
|
||||
x86_64-darwin = "/nix/store/2rdbky9j8hc3mbgl6pnda4hkjllyfwnn-nix-2.17.0";
|
||||
aarch64-darwin = "/nix/store/jl9qma14fb4zk9lq1k0syw2k9qm2gqjw-nix-2.17.0";
|
||||
}
|
||||
|
|
|
@ -9,12 +9,19 @@ let
|
|||
makeProg = args: pkgs.substituteAll (args // {
|
||||
dir = "bin";
|
||||
isExecutable = true;
|
||||
nativeBuildInputs = [
|
||||
pkgs.installShellFiles
|
||||
];
|
||||
postInstall = ''
|
||||
installManPage ${args.manPage}
|
||||
'';
|
||||
});
|
||||
|
||||
nixos-build-vms = makeProg {
|
||||
name = "nixos-build-vms";
|
||||
src = ./nixos-build-vms/nixos-build-vms.sh;
|
||||
inherit (pkgs) runtimeShell;
|
||||
manPage = ./manpages/nixos-build-vms.8;
|
||||
};
|
||||
|
||||
nixos-install = makeProg {
|
||||
|
@ -27,6 +34,7 @@ let
|
|||
nixos-enter
|
||||
pkgs.util-linuxMinimal
|
||||
];
|
||||
manPage = ./manpages/nixos-install.8;
|
||||
};
|
||||
|
||||
nixos-rebuild = pkgs.nixos-rebuild.override { nix = config.nix.package.out; };
|
||||
|
@ -40,6 +48,7 @@ let
|
|||
btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
|
||||
inherit (config.system.nixos-generate-config) configuration desktopConfiguration;
|
||||
xserverEnabled = config.services.xserver.enable;
|
||||
manPage = ./manpages/nixos-generate-config.8;
|
||||
};
|
||||
|
||||
inherit (pkgs) nixos-option;
|
||||
|
@ -57,6 +66,7 @@ let
|
|||
} // optionalAttrs (config.system.configurationRevision != null) {
|
||||
configurationRevision = config.system.configurationRevision;
|
||||
});
|
||||
manPage = ./manpages/nixos-version.8;
|
||||
};
|
||||
|
||||
nixos-enter = makeProg {
|
||||
|
@ -66,6 +76,7 @@ let
|
|||
path = makeBinPath [
|
||||
pkgs.util-linuxMinimal
|
||||
];
|
||||
manPage = ./manpages/nixos-enter.8;
|
||||
};
|
||||
|
||||
in
|
||||
|
|
|
@ -346,7 +346,7 @@ in
|
|||
system.build.manual = manual;
|
||||
|
||||
environment.systemPackages = []
|
||||
++ optional cfg.man.enable manual.manpages
|
||||
++ optional cfg.man.enable manual.nixos-configuration-reference-manpage
|
||||
++ optionals cfg.doc.enable [ manual.manualHTML nixos-help ];
|
||||
})
|
||||
|
||||
|
|
|
@ -319,6 +319,7 @@
|
|||
./services/audio/botamusique.nix
|
||||
./services/audio/gmediarender.nix
|
||||
./services/audio/gonic.nix
|
||||
./services/audio/goxlr-utility.nix
|
||||
./services/audio/hqplayerd.nix
|
||||
./services/audio/icecast.nix
|
||||
./services/audio/jack.nix
|
||||
|
@ -928,6 +929,7 @@
|
|||
./services/networking/jibri/default.nix
|
||||
./services/networking/jicofo.nix
|
||||
./services/networking/jitsi-videobridge.nix
|
||||
./services/networking/jool.nix
|
||||
./services/networking/kea.nix
|
||||
./services/networking/keepalived/default.nix
|
||||
./services/networking/keybase.nix
|
||||
|
@ -1464,6 +1466,7 @@
|
|||
./virtualisation/lxc.nix
|
||||
./virtualisation/lxcfs.nix
|
||||
./virtualisation/lxd.nix
|
||||
./virtualisation/lxd-agent.nix
|
||||
./virtualisation/multipass.nix
|
||||
./virtualisation/nixos-containers.nix
|
||||
./virtualisation/oci-containers.nix
|
||||
|
|
|
@ -7,6 +7,8 @@ let
|
|||
concatStringsSep escapeShellArgs optionalString
|
||||
literalExpression mkEnableOption mkIf mkOption mkOptionDefault types;
|
||||
|
||||
requiresSetcapWrapper = config.boot.kernelPackages.kernelOlder "5.7" && cfg.bindInterface;
|
||||
|
||||
browserDefault = chromium: concatStringsSep " " [
|
||||
''env XDG_CONFIG_HOME="$PREV_CONFIG_HOME"''
|
||||
''${chromium}/bin/chromium''
|
||||
|
@ -23,11 +25,23 @@ let
|
|||
desktopItem = pkgs.makeDesktopItem {
|
||||
name = "captive-browser";
|
||||
desktopName = "Captive Portal Browser";
|
||||
exec = "/run/wrappers/bin/captive-browser";
|
||||
exec = "captive-browser";
|
||||
icon = "nix-snowflake";
|
||||
categories = [ "Network" ];
|
||||
};
|
||||
|
||||
captive-browser-configured = pkgs.writeShellScriptBin "captive-browser" ''
|
||||
export PREV_CONFIG_HOME="$XDG_CONFIG_HOME"
|
||||
export XDG_CONFIG_HOME=${pkgs.writeTextDir "captive-browser.toml" ''
|
||||
browser = """${cfg.browser}"""
|
||||
dhcp-dns = """${cfg.dhcp-dns}"""
|
||||
socks5-addr = """${cfg.socks5-addr}"""
|
||||
${optionalString cfg.bindInterface ''
|
||||
bind-device = """${cfg.interface}"""
|
||||
''}
|
||||
''}
|
||||
exec ${cfg.package}/bin/captive-browser
|
||||
'';
|
||||
in
|
||||
{
|
||||
###### interface
|
||||
|
@ -101,6 +115,7 @@ in
|
|||
(pkgs.runCommand "captive-browser-desktop-item" { } ''
|
||||
install -Dm444 -t $out/share/applications ${desktopItem}/share/applications/*.desktop
|
||||
'')
|
||||
captive-browser-configured
|
||||
];
|
||||
|
||||
programs.captive-browser.dhcp-dns =
|
||||
|
@ -131,22 +146,11 @@ in
|
|||
source = "${pkgs.busybox}/bin/udhcpc";
|
||||
};
|
||||
|
||||
security.wrappers.captive-browser = {
|
||||
security.wrappers.captive-browser = mkIf requiresSetcapWrapper {
|
||||
owner = "root";
|
||||
group = "root";
|
||||
capabilities = "cap_net_raw+p";
|
||||
source = pkgs.writeShellScript "captive-browser" ''
|
||||
export PREV_CONFIG_HOME="$XDG_CONFIG_HOME"
|
||||
export XDG_CONFIG_HOME=${pkgs.writeTextDir "captive-browser.toml" ''
|
||||
browser = """${cfg.browser}"""
|
||||
dhcp-dns = """${cfg.dhcp-dns}"""
|
||||
socks5-addr = """${cfg.socks5-addr}"""
|
||||
${optionalString cfg.bindInterface ''
|
||||
bind-device = """${cfg.interface}"""
|
||||
''}
|
||||
''}
|
||||
exec ${cfg.package}/bin/captive-browser
|
||||
'';
|
||||
source = "${captive-browser-configured}/bin/captive-browser";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -51,13 +51,6 @@ in
|
|||
|
||||
environment.extraInit =
|
||||
''
|
||||
unset ASPELL_CONF
|
||||
for i in ${concatStringsSep " " (reverseList cfg.profiles)} ; do
|
||||
if [ -d "$i/lib/aspell" ]; then
|
||||
export ASPELL_CONF="dict-dir $i/lib/aspell"
|
||||
fi
|
||||
done
|
||||
|
||||
export NIX_USER_PROFILE_DIR="/nix/var/nix/profiles/per-user/$USER"
|
||||
export NIX_PROFILES="${concatStringsSep " " (reverseList cfg.profiles)}"
|
||||
'';
|
||||
|
|
|
@ -271,7 +271,7 @@ in
|
|||
''
|
||||
mkdir -p $out
|
||||
if [ -d $package/share/man ]; then
|
||||
find $package/share/man -type f | xargs ${pkgs.python3.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
|
||||
find $package/share/man -type f | xargs ${pkgs.python3.pythonForBuild.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
|
||||
fi
|
||||
'';
|
||||
in
|
||||
|
|
|
@ -9,7 +9,8 @@ let
|
|||
fmt = value:
|
||||
if isList value then concatStringsSep " " (map fmt value) else
|
||||
if isString value then value else
|
||||
if isBool value || isInt value then toString value else
|
||||
if isBool value then if value then "1" else "0" else
|
||||
if isInt value then toString value else
|
||||
throw "Unrecognized type ${typeOf value} in htop settings";
|
||||
|
||||
in
|
||||
|
|
|
@ -32,11 +32,10 @@ in
|
|||
readOnly = true;
|
||||
default = cfg.package.override {
|
||||
enableXWayland = cfg.xwayland.enable;
|
||||
hidpiXWayland = cfg.xwayland.hidpi;
|
||||
nvidiaPatches = cfg.nvidiaPatches;
|
||||
enableNvidiaPatches = cfg.enableNvidiaPatches;
|
||||
};
|
||||
defaultText = literalExpression
|
||||
"`wayland.windowManager.hyprland.package` with applied configuration";
|
||||
"`programs.hyprland.package` with applied configuration";
|
||||
description = mdDoc ''
|
||||
The Hyprland package after applying configuration.
|
||||
'';
|
||||
|
@ -44,17 +43,9 @@ in
|
|||
|
||||
portalPackage = mkPackageOptionMD pkgs "xdg-desktop-portal-hyprland" { };
|
||||
|
||||
xwayland = {
|
||||
enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
|
||||
hidpi = mkEnableOption null // {
|
||||
description = mdDoc ''
|
||||
Enable HiDPI XWayland, based on [XWayland MR 733](https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/733).
|
||||
See <https://wiki.hyprland.org/Nix/Options-Overrides/#xwayland-hidpi> for more info.
|
||||
'';
|
||||
};
|
||||
};
|
||||
xwayland.enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
|
||||
|
||||
nvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
|
||||
enableNvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
@ -77,4 +68,15 @@ in
|
|||
extraPortals = [ finalPortalPackage ];
|
||||
};
|
||||
};
|
||||
|
||||
imports = with lib; [
|
||||
(mkRemovedOptionModule
|
||||
[ "programs" "hyprland" "xwayland" "hidpi" ]
|
||||
"XWayland patches are deprecated. Refer to https://wiki.hyprland.org/Configuring/XWayland"
|
||||
)
|
||||
(mkRenamedOptionModule
|
||||
[ "programs" "hyprland" "nvidiaPatches" ]
|
||||
[ "programs" "hyprland" "enableNvidiaPatches" ]
|
||||
)
|
||||
];
|
||||
}
|
||||
|
|
|
@ -697,7 +697,7 @@ let
|
|||
session required ${config.systemd.package}/lib/security/pam_systemd_home.so
|
||||
'' +
|
||||
optionalString cfg.makeHomeDir ''
|
||||
session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=0077
|
||||
session required ${pkgs.pam}/lib/security/pam_mkhomedir.so silent skel=${config.security.pam.makeHomeDir.skelDirectory} umask=${config.security.pam.makeHomeDir.umask}
|
||||
'' +
|
||||
optionalString cfg.updateWtmp ''
|
||||
session required ${pkgs.pam}/lib/security/pam_lastlog.so silent
|
||||
|
@ -902,6 +902,16 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
security.pam.makeHomeDir.umask = mkOption {
|
||||
type = types.str;
|
||||
default = "0077";
|
||||
example = "0022";
|
||||
description = lib.mdDoc ''
|
||||
The user file mode creation mask to use on home directories
|
||||
newly created by `pam_mkhomedir`.
|
||||
'';
|
||||
};
|
||||
|
||||
security.pam.enableSSHAgentAuth = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#define _GNU_SOURCE
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
@ -16,7 +17,10 @@
|
|||
#include <syscall.h>
|
||||
#include <byteswap.h>
|
||||
|
||||
// aborts when false, printing the failed expression
|
||||
#define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
|
||||
// aborts when returns non-zero, printing the failed expression and errno
|
||||
#define MUSTSUCCEED(expr) ((expr) ? print_errno_and_die(#expr) : (void) 0)
|
||||
|
||||
extern char **environ;
|
||||
|
||||
|
@ -41,6 +45,12 @@ static noreturn void assert_failure(const char *assertion) {
|
|||
abort();
|
||||
}
|
||||
|
||||
static noreturn void print_errno_and_die(const char *assertion) {
|
||||
fprintf(stderr, "Call `%s` in NixOS's wrapper.c failed: %s\n", assertion, strerror(errno));
|
||||
fflush(stderr);
|
||||
abort();
|
||||
}
|
||||
|
||||
int get_last_cap(unsigned *last_cap) {
|
||||
FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
|
||||
if (file == NULL) {
|
||||
|
@ -177,6 +187,17 @@ int main(int argc, char **argv) {
|
|||
fprintf(stderr, "cannot readlink /proc/self/exe: %s", strerror(-self_path_size));
|
||||
}
|
||||
|
||||
unsigned int ruid, euid, suid, rgid, egid, sgid;
|
||||
MUSTSUCCEED(getresuid(&ruid, &euid, &suid));
|
||||
MUSTSUCCEED(getresgid(&rgid, &egid, &sgid));
|
||||
|
||||
// If true, then we did not benefit from setuid privilege escalation,
|
||||
// where the original uid is still in ruid and different from euid == suid.
|
||||
int didnt_suid = (ruid == euid) && (euid == suid);
|
||||
// If true, then we did not benefit from setgid privilege escalation
|
||||
int didnt_sgid = (rgid == egid) && (egid == sgid);
|
||||
|
||||
|
||||
// Make sure that we are being executed from the right location,
|
||||
// i.e., `safe_wrapper_dir'. This is to prevent someone from creating
|
||||
// hard link `X' from some other location, along with a false
|
||||
|
@ -189,15 +210,22 @@ int main(int argc, char **argv) {
|
|||
ASSERT('/' == wrapper_dir[0]);
|
||||
ASSERT('/' == self_path[len]);
|
||||
|
||||
// Make *really* *really* sure that we were executed as
|
||||
// `self_path', and not, say, as some other setuid program. That
|
||||
// is, our effective uid/gid should match the uid/gid of
|
||||
// `self_path'.
|
||||
// If we got privileges with the fs set[ug]id bit, check that the privilege we
|
||||
// got matches the one one we expected, ie that our effective uid/gid
|
||||
// matches the uid/gid of `self_path`. This ensures that we were executed as
|
||||
// `self_path', and not, say, as some other setuid program.
|
||||
// We don't check that if we did not benefit from the set[ug]id bit, as
|
||||
// can be the case in nosuid mounts or user namespaces.
|
||||
struct stat st;
|
||||
ASSERT(lstat(self_path, &st) != -1);
|
||||
|
||||
ASSERT(!(st.st_mode & S_ISUID) || (st.st_uid == geteuid()));
|
||||
ASSERT(!(st.st_mode & S_ISGID) || (st.st_gid == getegid()));
|
||||
// if the wrapper gained privilege with suid, check that we got the uid of the file owner
|
||||
ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == euid));
|
||||
// if the wrapper gained privilege with sgid, check that we got the gid of the file group
|
||||
ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == egid));
|
||||
// same, but with suid instead of euid
|
||||
ASSERT(!((st.st_mode & S_ISUID) && !didnt_suid) || (st.st_uid == suid));
|
||||
ASSERT(!((st.st_mode & S_ISGID) && !didnt_sgid) || (st.st_gid == sgid));
|
||||
|
||||
// And, of course, we shouldn't be writable.
|
||||
ASSERT(!(st.st_mode & (S_IWGRP | S_IWOTH)));
|
||||
|
|
48
third_party/nixpkgs/nixos/modules/services/audio/goxlr-utility.nix
vendored
Normal file
48
third_party/nixpkgs/nixos/modules/services/audio/goxlr-utility.nix
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.goxlr-utility;
|
||||
in
|
||||
|
||||
with lib;
|
||||
{
|
||||
|
||||
options = {
|
||||
services.goxlr-utility = {
|
||||
enable = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable goxlr-utility for controlling your TC-Helicon GoXLR or GoXLR Mini
|
||||
'';
|
||||
};
|
||||
package = mkPackageOptionMD pkgs "goxlr-utility" { };
|
||||
autoStart.xdg = mkOption {
|
||||
default = true;
|
||||
type = with types; bool;
|
||||
description = lib.mdDoc ''
|
||||
Start the daemon automatically using XDG autostart.
|
||||
Sets `xdg.autostart.enable = true` if not already enabled.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf config.services.goxlr-utility.enable
|
||||
{
|
||||
services.udev.packages = [ cfg.package ];
|
||||
|
||||
xdg.autostart.enable = mkIf cfg.autoStart.xdg true;
|
||||
environment.systemPackages = mkIf cfg.autoStart.xdg
|
||||
[
|
||||
cfg.package
|
||||
(pkgs.makeAutostartItem
|
||||
{
|
||||
name = "goxlr-utility";
|
||||
package = cfg.package;
|
||||
})
|
||||
];
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ errnoh ];
|
||||
}
|
|
@ -18,6 +18,7 @@ let
|
|||
ExecStart = "${pkgs.liquidsoap}/bin/liquidsoap ${stream}";
|
||||
User = "liquidsoap";
|
||||
LogsDirectory = "liquidsoap";
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -31,6 +31,8 @@ let
|
|||
|
||||
cfg = config.services.gitea-actions-runner;
|
||||
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
|
||||
# Check whether any runner instance label requires a container runtime
|
||||
# Empty label strings result in the upstream defined defaultLabels, which require docker
|
||||
# https://gitea.com/gitea/act_runner/src/tag/v0.1.5/internal/app/cmd/register.go#L93-L98
|
||||
|
@ -119,6 +121,18 @@ in
|
|||
that follows the filesystem hierarchy standard.
|
||||
'';
|
||||
};
|
||||
settings = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Configuration for `act_runner daemon`.
|
||||
See https://gitea.com/gitea/act_runner/src/branch/main/internal/pkg/config/config.example.yaml for an example configuration
|
||||
'';
|
||||
|
||||
type = types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
};
|
||||
|
||||
default = { };
|
||||
};
|
||||
|
||||
hostPackages = mkOption {
|
||||
type = listOf package;
|
||||
|
@ -169,6 +183,7 @@ in
|
|||
wantsHost = hasHostScheme instance;
|
||||
wantsDocker = wantsContainerRuntime && config.virtualisation.docker.enable;
|
||||
wantsPodman = wantsContainerRuntime && config.virtualisation.podman.enable;
|
||||
configFile = settingsFormat.generate "config.yaml" instance.settings;
|
||||
in
|
||||
nameValuePair "gitea-runner-${escapeSystemdPath name}" {
|
||||
inherit (instance) enable;
|
||||
|
@ -196,7 +211,12 @@ in
|
|||
User = "gitea-runner";
|
||||
StateDirectory = "gitea-runner";
|
||||
WorkingDirectory = "-/var/lib/gitea-runner/${name}";
|
||||
ExecStartPre = pkgs.writeShellScript "gitea-register-runner-${name}" ''
|
||||
|
||||
# gitea-runner might fail when gitea is restarted during upgrade.
|
||||
Restart = "on-failure";
|
||||
RestartSec = 2;
|
||||
|
||||
ExecStartPre = [(pkgs.writeShellScript "gitea-register-runner-${name}" ''
|
||||
export INSTANCE_DIR="$STATE_DIRECTORY/${name}"
|
||||
mkdir -vp "$INSTANCE_DIR"
|
||||
cd "$INSTANCE_DIR"
|
||||
|
@ -221,8 +241,8 @@ in
|
|||
echo "$LABELS_WANTED" > "$LABELS_FILE"
|
||||
fi
|
||||
|
||||
'';
|
||||
ExecStart = "${cfg.package}/bin/act_runner daemon";
|
||||
'')];
|
||||
ExecStart = "${cfg.package}/bin/act_runner daemon --config ${configFile}";
|
||||
SupplementaryGroups = optionals (wantsDocker) [
|
||||
"docker"
|
||||
] ++ optionals (wantsPodman) [
|
||||
|
|
|
@ -1,8 +1,17 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
inherit
|
||||
(lib)
|
||||
escapeShellArg
|
||||
hasAttr
|
||||
literalExpression
|
||||
mkEnableOption
|
||||
mkIf
|
||||
mkOption
|
||||
types
|
||||
;
|
||||
|
||||
format = pkgs.formats.json { };
|
||||
cfg = config.services.influxdb2;
|
||||
configFile = format.generate "config.json" cfg.settings;
|
||||
|
@ -24,14 +33,60 @@ in
|
|||
description = lib.mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
|
||||
type = format.type;
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = mkEnableOption "initial database setup and provisioning";
|
||||
|
||||
initialSetup = {
|
||||
organization = mkOption {
|
||||
type = types.str;
|
||||
example = "main";
|
||||
description = "Primary organization name";
|
||||
};
|
||||
|
||||
bucket = mkOption {
|
||||
type = types.str;
|
||||
example = "example";
|
||||
description = "Primary bucket name";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
default = "admin";
|
||||
description = "Primary username";
|
||||
};
|
||||
|
||||
retention = mkOption {
|
||||
type = types.str;
|
||||
default = "0";
|
||||
description = ''
|
||||
The duration for which the bucket will retain data (0 is infinite).
|
||||
Accepted units are `ns` (nanoseconds), `us` or `µs` (microseconds), `ms` (milliseconds),
|
||||
`s` (seconds), `m` (minutes), `h` (hours), `d` (days) and `w` (weeks).
|
||||
'';
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.path;
|
||||
description = "Password for primary user. Don't use a file from the nix store!";
|
||||
};
|
||||
|
||||
tokenFile = mkOption {
|
||||
type = types.path;
|
||||
description = "API Token to set for the admin user. Don't use a file from the nix store!";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [{
|
||||
assertion = !(builtins.hasAttr "bolt-path" cfg.settings) && !(builtins.hasAttr "engine-path" cfg.settings);
|
||||
message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
|
||||
}];
|
||||
assertions = [
|
||||
{
|
||||
assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
|
||||
message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services.influxdb2 = {
|
||||
description = "InfluxDB is an open-source, distributed, time series database";
|
||||
|
@ -52,7 +107,62 @@ in
|
|||
LimitNOFILE = 65536;
|
||||
KillMode = "control-group";
|
||||
Restart = "on-failure";
|
||||
LoadCredential = mkIf cfg.provision.enable [
|
||||
"admin-password:${cfg.provision.initialSetup.passwordFile}"
|
||||
"admin-token:${cfg.provision.initialSetup.tokenFile}"
|
||||
];
|
||||
};
|
||||
|
||||
path = [pkgs.influxdb2-cli];
|
||||
|
||||
# Mark if this is the first startup so postStart can do the initial setup
|
||||
preStart = mkIf cfg.provision.enable ''
|
||||
if ! test -e "$STATE_DIRECTORY/influxd.bolt"; then
|
||||
touch "$STATE_DIRECTORY/.first_startup"
|
||||
fi
|
||||
'';
|
||||
|
||||
postStart = let
|
||||
initCfg = cfg.provision.initialSetup;
|
||||
in mkIf cfg.provision.enable (
|
||||
''
|
||||
set -euo pipefail
|
||||
export INFLUX_HOST="http://"${escapeShellArg (cfg.settings.http-bind-address or "localhost:8086")}
|
||||
|
||||
# Wait for the influxdb server to come online
|
||||
count=0
|
||||
while ! influx ping &>/dev/null; do
|
||||
if [ "$count" -eq 300 ]; then
|
||||
echo "Tried for 30 seconds, giving up..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! kill -0 "$MAINPID"; then
|
||||
echo "Main server died, giving up..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 0.1
|
||||
count=$((count++))
|
||||
done
|
||||
|
||||
# Do the initial database setup. Pass /dev/null as configs-path to
|
||||
# avoid saving the token as the active config.
|
||||
if test -e "$STATE_DIRECTORY/.first_startup"; then
|
||||
influx setup \
|
||||
--configs-path /dev/null \
|
||||
--org ${escapeShellArg initCfg.organization} \
|
||||
--bucket ${escapeShellArg initCfg.bucket} \
|
||||
--username ${escapeShellArg initCfg.username} \
|
||||
--password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
|
||||
--token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
|
||||
--retention ${escapeShellArg initCfg.retention} \
|
||||
--force >/dev/null
|
||||
|
||||
rm -f "$STATE_DIRECTORY/.first_startup"
|
||||
fi
|
||||
''
|
||||
);
|
||||
};
|
||||
|
||||
users.extraUsers.influxdb2 = {
|
||||
|
@ -63,5 +173,5 @@ in
|
|||
users.extraGroups.influxdb2 = {};
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ nickcao ];
|
||||
meta.maintainers = with lib.maintainers; [ nickcao oddlama ];
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
inherit (lib) concatStringsSep mkEnableOption mkIf mkOption types;
|
||||
cfg = config.services.openarena;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
services.openarena = {
|
||||
enable = mkEnableOption (lib.mdDoc "OpenArena");
|
||||
package = lib.mkPackageOptionMD pkgs "openarena" { };
|
||||
|
||||
openPorts = mkOption {
|
||||
type = types.bool;
|
||||
|
@ -43,7 +43,7 @@ in
|
|||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
StateDirectory = "openarena";
|
||||
ExecStart = "${pkgs.openarena}/bin/oa_ded +set fs_basepath ${pkgs.openarena}/openarena-0.8.8 +set fs_homepath /var/lib/openarena ${concatStringsSep " " cfg.extraFlags}";
|
||||
ExecStart = "${cfg.package}/bin/oa_ded +set fs_basepath ${cfg.package}/share/openarena +set fs_homepath /var/lib/openarena ${concatStringsSep " " cfg.extraFlags}";
|
||||
Restart = "on-failure";
|
||||
|
||||
# Hardening
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
with lib;
|
||||
|
||||
let
|
||||
inherit (lib) literalMD mkEnableOption mkIf mkOption types;
|
||||
cfg = config.services.quake3-server;
|
||||
|
||||
configFile = pkgs.writeText "q3ds-extra.cfg" ''
|
||||
set net_port ${builtins.toString cfg.port}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
defaultBaseq3 = pkgs.requireFile rec {
|
||||
name = "baseq3";
|
||||
hashMode = "recursive";
|
||||
|
@ -25,6 +27,7 @@ let
|
|||
$services.quake3-server.baseq3/.q3a/
|
||||
'';
|
||||
};
|
||||
|
||||
home = pkgs.runCommand "quake3-home" {} ''
|
||||
mkdir -p $out/.q3a/baseq3
|
||||
|
||||
|
@ -38,6 +41,7 @@ in {
|
|||
options = {
|
||||
services.quake3-server = {
|
||||
enable = mkEnableOption (lib.mdDoc "Quake 3 dedicated server");
|
||||
package = lib.mkPackageOptionMD pkgs "ioquake3" { };
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
|
@ -103,10 +107,10 @@ in {
|
|||
ReadOnlyPaths = if baseq3InStore then home else cfg.baseq3;
|
||||
ExecStartPre = optionalString (!baseq3InStore) "+${pkgs.coreutils}/bin/cp ${configFile} ${cfg.baseq3}/.q3a/baseq3/nix.cfg";
|
||||
|
||||
ExecStart = "${pkgs.ioquake3}/ioq3ded.x86_64 +exec nix.cfg";
|
||||
ExecStart = "${cfg.package}/bin/ioq3ded +exec nix.cfg";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
meta.maintainers = with maintainers; [ f4814n ];
|
||||
meta.maintainers = with lib.maintainers; [ f4814n ];
|
||||
}
|
||||
|
|
|
@ -37,8 +37,8 @@ in
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = if versionOlder config.system.stateVersion "23.05" then pkgs.graylog-3_3 else pkgs.graylog-5_0;
|
||||
defaultText = literalExpression (if versionOlder config.system.stateVersion "23.05" then "pkgs.graylog-3_3" else "pkgs.graylog-5_0");
|
||||
default = if versionOlder config.system.stateVersion "23.05" then pkgs.graylog-3_3 else pkgs.graylog-5_1;
|
||||
defaultText = literalExpression (if versionOlder config.system.stateVersion "23.05" then "pkgs.graylog-3_3" else "pkgs.graylog-5_1");
|
||||
description = lib.mdDoc "Graylog package to use.";
|
||||
};
|
||||
|
||||
|
|
|
@ -94,6 +94,16 @@ in
|
|||
instance will require manual migration of data.
|
||||
'';
|
||||
};
|
||||
global.allow_check_for_updates = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Whether to allow Conduit to automatically contact
|
||||
<https://conduit.rs> hourly to check for important Conduit news.
|
||||
|
||||
Disabled by default because nixpkgs handles updates.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
default = {};
|
||||
|
|
|
@ -27,7 +27,8 @@ please refer to the
|
|||
{ pkgs, lib, config, ... }:
|
||||
let
|
||||
fqdn = "${config.networking.hostName}.${config.networking.domain}";
|
||||
clientConfig."m.homeserver".base_url = "https://${fqdn}";
|
||||
baseUrl = "https://${fqdn}";
|
||||
clientConfig."m.homeserver".base_url = baseUrl;
|
||||
serverConfig."m.server" = "${fqdn}:443";
|
||||
mkWellKnown = data: ''
|
||||
add_header Content-Type application/json;
|
||||
|
@ -97,6 +98,11 @@ in {
|
|||
services.matrix-synapse = {
|
||||
enable = true;
|
||||
settings.server_name = config.networking.domain;
|
||||
# The public base URL value must match the `base_url` value set in `clientConfig` above.
|
||||
# The default value here is based on `server_name`, so if your `server_name` is different
|
||||
# from the value of `fqdn` above, you will likely run into some mismatched domain names
|
||||
# in client applications.
|
||||
settings.public_baseurl = baseUrl;
|
||||
settings.listeners = [
|
||||
{ port = 8008;
|
||||
bind_addresses = [ "::1" ];
|
||||
|
|
|
@ -9,11 +9,6 @@ let
|
|||
# remove null values from the final configuration
|
||||
finalSettings = lib.filterAttrsRecursive (_: v: v != null) cfg.settings;
|
||||
configFile = format.generate "homeserver.yaml" finalSettings;
|
||||
logConfigFile = format.generate "log_config.yaml" cfg.logConfig;
|
||||
|
||||
pluginsEnv = cfg.package.python.buildEnv.override {
|
||||
extraLibs = cfg.plugins;
|
||||
};
|
||||
|
||||
usePostgresql = cfg.settings.database.name == "psycopg2";
|
||||
hasLocalPostgresDB = let args = cfg.settings.database.args; in
|
||||
|
@ -50,6 +45,29 @@ let
|
|||
"${bindAddress}"
|
||||
}:${builtins.toString listener.port}/"
|
||||
'';
|
||||
|
||||
defaultExtras = [
|
||||
"systemd"
|
||||
"postgres"
|
||||
"url-preview"
|
||||
"user-search"
|
||||
];
|
||||
|
||||
wantedExtras = cfg.extras
|
||||
++ lib.optional (cfg.settings ? oidc_providers) "oidc"
|
||||
++ lib.optional (cfg.settings ? jwt_config) "jwt"
|
||||
++ lib.optional (cfg.settings ? saml2_config) "saml2"
|
||||
++ lib.optional (cfg.settings ? opentracing) "opentracing"
|
||||
++ lib.optional (cfg.settings ? redis) "redis"
|
||||
++ lib.optional (cfg.settings ? sentry) "sentry"
|
||||
++ lib.optional (cfg.settings ? user_directory) "user-search"
|
||||
++ lib.optional (cfg.settings.url_preview_enabled) "url-preview"
|
||||
++ lib.optional (cfg.settings.database.name == "psycopg2") "postgres";
|
||||
|
||||
wrapped = pkgs.matrix-synapse.override {
|
||||
extras = wantedExtras;
|
||||
inherit (cfg) plugins;
|
||||
};
|
||||
in {
|
||||
|
||||
imports = [
|
||||
|
@ -151,10 +169,53 @@ in {
|
|||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.matrix-synapse;
|
||||
defaultText = literalExpression "pkgs.matrix-synapse";
|
||||
readOnly = true;
|
||||
description = lib.mdDoc ''
|
||||
Overridable attribute of the matrix synapse server package to use.
|
||||
Reference to the `matrix-synapse` wrapper with all extras
|
||||
(e.g. for `oidc` or `saml2`) added to the `PYTHONPATH` of all executables.
|
||||
|
||||
This option is useful to reference the "final" `matrix-synapse` package that's
|
||||
actually used by `matrix-synapse.service`. For instance, when using
|
||||
workers, it's possible to run
|
||||
`''${config.services.matrix-synapse.package}/bin/synapse_worker` and
|
||||
no additional PYTHONPATH needs to be specified for extras or plugins configured
|
||||
via `services.matrix-synapse`.
|
||||
|
||||
However, this means that this option is supposed to be only declared
|
||||
by the `services.matrix-synapse` module itself and is thus read-only.
|
||||
In order to modify `matrix-synapse` itself, use an overlay to override
|
||||
`pkgs.matrix-synapse-unwrapped`.
|
||||
'';
|
||||
};
|
||||
|
||||
extras = mkOption {
|
||||
type = types.listOf (types.enum (lib.attrNames pkgs.matrix-synapse-unwrapped.optional-dependencies));
|
||||
default = defaultExtras;
|
||||
example = literalExpression ''
|
||||
[
|
||||
"cache-memory" # Provide statistics about caching memory consumption
|
||||
"jwt" # JSON Web Token authentication
|
||||
"opentracing" # End-to-end tracing support using Jaeger
|
||||
"oidc" # OpenID Connect authentication
|
||||
"postgres" # PostgreSQL database backend
|
||||
"redis" # Redis support for the replication stream between worker processes
|
||||
"saml2" # SAML2 authentication
|
||||
"sentry" # Error tracking and performance metrics
|
||||
"systemd" # Provide the JournalHandler used in the default log_config
|
||||
"url-preview" # Support for oEmbed URL previews
|
||||
"user-search" # Support internationalized domain names in user-search
|
||||
]
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
Explicitly install extras provided by matrix-synapse. Most
|
||||
will require some additional configuration.
|
||||
|
||||
Extras will automatically be enabled, when the relevant
|
||||
configuration sections are present.
|
||||
|
||||
Please note that this option is additive: i.e. when adding a new item
|
||||
to this list, the defaults are still kept. To override the defaults as well,
|
||||
use `lib.mkForce`.
|
||||
'';
|
||||
};
|
||||
|
||||
|
@ -193,7 +254,7 @@ in {
|
|||
default = {};
|
||||
description = mdDoc ''
|
||||
The primary synapse configuration. See the
|
||||
[sample configuration](https://github.com/matrix-org/synapse/blob/v${cfg.package.version}/docs/sample_config.yaml)
|
||||
[sample configuration](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
|
||||
for possible values.
|
||||
|
||||
Secrets should be passed in by using the `extraConfigFiles` option.
|
||||
|
@ -706,6 +767,10 @@ in {
|
|||
];
|
||||
|
||||
services.matrix-synapse.configFile = configFile;
|
||||
services.matrix-synapse.package = wrapped;
|
||||
|
||||
# default them, so they are additive
|
||||
services.matrix-synapse.extras = defaultExtras;
|
||||
|
||||
users.users.matrix-synapse = {
|
||||
group = "matrix-synapse";
|
||||
|
@ -729,9 +794,7 @@ in {
|
|||
--keys-directory ${cfg.dataDir} \
|
||||
--generate-keys
|
||||
'';
|
||||
environment = {
|
||||
PYTHONPATH = makeSearchPathOutput "lib" cfg.package.python.sitePackages [ pluginsEnv ];
|
||||
} // optionalAttrs (cfg.withJemalloc) {
|
||||
environment = optionalAttrs (cfg.withJemalloc) {
|
||||
LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
|
||||
};
|
||||
serviceConfig = {
|
||||
|
|
|
@ -351,7 +351,7 @@ in {
|
|||
CacheDirectory = dirs cacheDirs;
|
||||
RuntimeDirectory = dirName;
|
||||
ReadWriteDirectories = lib.mkIf useCustomDir [ cfg.storageDir ];
|
||||
StateDirectory = dirs (lib.optional (!useCustomDir) libDirs);
|
||||
StateDirectory = dirs (lib.optionals (!useCustomDir) libDirs);
|
||||
LogsDirectory = dirName;
|
||||
PrivateTmp = true;
|
||||
ProtectSystem = "strict";
|
||||
|
|
|
@ -153,6 +153,18 @@ in {
|
|||
type = types.bool;
|
||||
};
|
||||
|
||||
processAgentPackage = mkOption {
|
||||
default = pkgs.datadog-process-agent;
|
||||
defaultText = literalExpression "pkgs.datadog-process-agent";
|
||||
description = lib.mdDoc ''
|
||||
Which DataDog v7 agent package to use. Note that the provided
|
||||
package is expected to have an overridable `pythonPackages`-attribute
|
||||
which configures the Python environment with the Datadog
|
||||
checks.
|
||||
'';
|
||||
type = types.package;
|
||||
};
|
||||
|
||||
enableTraceAgent = mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable the trace agent.
|
||||
|
@ -270,7 +282,7 @@ in {
|
|||
path = [ ];
|
||||
script = ''
|
||||
export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
|
||||
${pkgs.datadog-process-agent}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
|
||||
${cfg.processAgentPackage}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
|
||||
'';
|
||||
});
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ in
|
|||
unitConfig = {
|
||||
Description = "dae Service";
|
||||
Documentation = "https://github.com/daeuniverse/dae";
|
||||
After = [ "network.target" "systemd-sysctl.service" ];
|
||||
Wants = [ "network.target" ];
|
||||
After = [ "network-online.target" "systemd-sysctl.service" ];
|
||||
Wants = [ "network-online.target" ];
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
|
|
|
@ -17,14 +17,9 @@ with lib;
|
|||
options = {
|
||||
services.haproxy = {
|
||||
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable HAProxy, the reliable, high performance TCP/HTTP
|
||||
load balancer.
|
||||
'';
|
||||
};
|
||||
enable = mkEnableOption (lib.mdDoc "HAProxy, the reliable, high performance TCP/HTTP load balancer.");
|
||||
|
||||
package = mkPackageOptionMD pkgs "haproxy" { };
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
|
@ -70,15 +65,15 @@ with lib;
|
|||
ExecStartPre = [
|
||||
# when the master process receives USR2, it reloads itself using exec(argv[0]),
|
||||
# so we create a symlink there and update it before reloading
|
||||
"${pkgs.coreutils}/bin/ln -sf ${pkgs.haproxy}/sbin/haproxy /run/haproxy/haproxy"
|
||||
"${pkgs.coreutils}/bin/ln -sf ${lib.getExe cfg.package} /run/haproxy/haproxy"
|
||||
# when running the config test, don't be quiet so we can see what goes wrong
|
||||
"/run/haproxy/haproxy -c -f ${haproxyCfg}"
|
||||
];
|
||||
ExecStart = "/run/haproxy/haproxy -Ws -f /etc/haproxy.cfg -p /run/haproxy/haproxy.pid";
|
||||
# support reloading
|
||||
ExecReload = [
|
||||
"${pkgs.haproxy}/sbin/haproxy -c -f ${haproxyCfg}"
|
||||
"${pkgs.coreutils}/bin/ln -sf ${pkgs.haproxy}/sbin/haproxy /run/haproxy/haproxy"
|
||||
"${lib.getExe cfg.package} -c -f ${haproxyCfg}"
|
||||
"${pkgs.coreutils}/bin/ln -sf ${lib.getExe cfg.package} /run/haproxy/haproxy"
|
||||
"${pkgs.coreutils}/bin/kill -USR2 $MAINPID"
|
||||
];
|
||||
KillMode = "mixed";
|
||||
|
|
|
@ -292,7 +292,7 @@ in {
|
|||
};
|
||||
|
||||
client_secret_path = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = lib.mdDoc ''
|
||||
Path to OpenID Connect client secret file. Expands environment variables in format ''${VAR}.
|
||||
|
|
|
@ -987,7 +987,7 @@ in {
|
|||
} // optionalAttrs (bssCfg.authentication.wpaPassword != null) {
|
||||
wpa_passphrase = bssCfg.authentication.wpaPassword;
|
||||
} // optionalAttrs (bssCfg.authentication.wpaPskFile != null) {
|
||||
wpa_psk_file = bssCfg.authentication.wpaPskFile;
|
||||
wpa_psk_file = toString bssCfg.authentication.wpaPskFile;
|
||||
};
|
||||
|
||||
dynamicConfigScripts = let
|
||||
|
|
222
third_party/nixpkgs/nixos/modules/services/networking/jool.nix
vendored
Normal file
222
third_party/nixpkgs/nixos/modules/services/networking/jool.nix
vendored
Normal file
|
@ -0,0 +1,222 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.networking.jool;
|
||||
|
||||
jool = config.boot.kernelPackages.jool;
|
||||
jool-cli = pkgs.jool-cli;
|
||||
|
||||
hardening = {
|
||||
# Run as unprivileged user
|
||||
User = "jool";
|
||||
Group = "jool";
|
||||
DynamicUser = true;
|
||||
|
||||
# Restrict filesystem to only read the jool module
|
||||
TemporaryFileSystem = [ "/" ];
|
||||
BindReadOnlyPaths = [
|
||||
builtins.storeDir
|
||||
"/run/current-system/kernel-modules"
|
||||
];
|
||||
|
||||
# Give capabilities to load the module and configure it
|
||||
AmbientCapabilities = [ "CAP_SYS_MODULE" "CAP_NET_ADMIN" ];
|
||||
RestrictAddressFamilies = [ "AF_NETLINK" ];
|
||||
|
||||
# Other restrictions
|
||||
RestrictNamespaces = [ "net" ];
|
||||
SystemCallFilter = [ "@system-service" "@module" ];
|
||||
CapabilityBoundingSet = [ "CAP_SYS_MODULE" "CAP_NET_ADMIN" ];
|
||||
};
|
||||
|
||||
configFormat = pkgs.formats.json {};
|
||||
|
||||
mkDefaultAttrs = lib.mapAttrs (n: v: lib.mkDefault v);
|
||||
|
||||
defaultNat64 = {
|
||||
instance = "default";
|
||||
framework = "netfilter";
|
||||
global.pool6 = "64:ff9b::/96";
|
||||
};
|
||||
defaultSiit = {
|
||||
instance = "default";
|
||||
framework = "netfilter";
|
||||
};
|
||||
|
||||
nat64Conf = configFormat.generate "jool-nat64.conf" cfg.nat64.config;
|
||||
siitConf = configFormat.generate "jool-siit.conf" cfg.siit.config;
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
###### interface
|
||||
|
||||
options = {
|
||||
networking.jool.enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
relatedPackages = [ "linuxPackages.jool" "jool-cli" ];
|
||||
description = lib.mdDoc ''
|
||||
Whether to enable Jool, an Open Source implementation of IPv4/IPv6
|
||||
translation on Linux.
|
||||
|
||||
Jool can perform stateless IP/ICMP translation (SIIT) or stateful
|
||||
NAT64, analogous to the IPv4 NAPT. Refer to the upstream
|
||||
[documentation](https://nicmx.github.io/Jool/en/intro-xlat.html) for
|
||||
the supported modes of translation and how to configure them.
|
||||
'';
|
||||
};
|
||||
|
||||
networking.jool.nat64.enable = lib.mkEnableOption (lib.mdDoc "a NAT64 instance of Jool.");
|
||||
networking.jool.nat64.config = lib.mkOption {
|
||||
type = configFormat.type;
|
||||
default = defaultNat64;
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
# custom NAT64 prefix
|
||||
global.pool6 = "2001:db8:64::/96";
|
||||
|
||||
# Port forwarding
|
||||
bib = [
|
||||
{ # SSH 192.0.2.16 → 2001:db8:a::1
|
||||
"protocol" = "TCP";
|
||||
"ipv4 address" = "192.0.2.16#22";
|
||||
"ipv6 address" = "2001:db8:a::1#22";
|
||||
}
|
||||
{ # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
|
||||
"protocol" = "TCP";
|
||||
"ipv4 address" = "192.0.2.16#53";
|
||||
"ipv6 address" = "2001:db8:a::2#53";
|
||||
}
|
||||
{ # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
|
||||
"protocol" = "UDP";
|
||||
"ipv4 address" = "192.0.2.16#53";
|
||||
"ipv6 address" = "2001:db8:a::2#53";
|
||||
}
|
||||
];
|
||||
|
||||
pool4 = [
|
||||
# Ports for dynamic translation
|
||||
{ protocol = "TCP"; prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
|
||||
{ protocol = "UDP"; prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
|
||||
{ protocol = "ICMP"; prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
|
||||
|
||||
# Ports for static BIB entries
|
||||
{ protocol = "TCP"; prefix = "192.0.2.16/32"; "port range" = "22"; }
|
||||
{ protocol = "UDP"; prefix = "192.0.2.16/32"; "port range" = "53"; }
|
||||
];
|
||||
}
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
The configuration of a stateful NAT64 instance of Jool managed through
|
||||
NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
|
||||
available options.
|
||||
|
||||
::: {.note}
|
||||
Existing or more instances created manually will not interfere with the
|
||||
NixOS instance, provided the respective `pool4` addresses and port
|
||||
ranges are not overlapping.
|
||||
:::
|
||||
|
||||
::: {.warning}
|
||||
Changes to the NixOS instance performed via `jool instance nixos-nat64`
|
||||
are applied correctly but will be lost after restarting
|
||||
`jool-nat64.service`.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
networking.jool.siit.enable = lib.mkEnableOption (lib.mdDoc "a SIIT instance of Jool.");
|
||||
networking.jool.siit.config = lib.mkOption {
|
||||
type = configFormat.type;
|
||||
default = defaultSiit;
|
||||
example = lib.literalExpression ''
|
||||
{
|
||||
# Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
|
||||
pool6 = "2001:db8::/96";
|
||||
|
||||
# Explicit address mappings
|
||||
eamt = [
|
||||
# 2001:db8:1:: ←→ 192.0.2.0
|
||||
{ "ipv6 prefix": "2001:db8:1::/128", "ipv4 prefix": "192.0.2.0" }
|
||||
# 2001:db8:1::x ←→ 198.51.100.x
|
||||
{ "ipv6 prefix": "2001:db8:2::/120", "ipv4 prefix": "198.51.100.0/24" }
|
||||
]
|
||||
}
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
The configuration of a SIIT instance of Jool managed through
|
||||
NixOS. See https://nicmx.github.io/Jool/en/config-atomic.html for the
|
||||
available options.
|
||||
|
||||
::: {.note}
|
||||
Existing or more instances created manually will not interfere with the
|
||||
NixOS instance, provided the respective `EAMT` address mappings are not
|
||||
overlapping.
|
||||
:::
|
||||
|
||||
::: {.warning}
|
||||
Changes to the NixOS instance performed via `jool instance nixos-siit`
|
||||
are applied correctly but will be lost after restarting
|
||||
`jool-siit.service`.
|
||||
:::
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
###### implementation
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
environment.systemPackages = [ jool-cli ];
|
||||
boot.extraModulePackages = [ jool ];
|
||||
|
||||
systemd.services.jool-nat64 = lib.mkIf cfg.nat64.enable {
|
||||
description = "Jool, NAT64 setup";
|
||||
documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
reloadIfChanged = true;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
|
||||
ExecStart = "${jool-cli}/bin/jool file handle ${nat64Conf}";
|
||||
ExecStop = "${jool-cli}/bin/jool -f ${nat64Conf} instance remove";
|
||||
} // hardening;
|
||||
};
|
||||
|
||||
systemd.services.jool-siit = lib.mkIf cfg.siit.enable {
|
||||
description = "Jool, SIIT setup";
|
||||
documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
reloadIfChanged = true;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
|
||||
ExecStart = "${jool-cli}/bin/jool_siit file handle ${siitConf}";
|
||||
ExecStop = "${jool-cli}/bin/jool_siit -f ${siitConf} instance remove";
|
||||
} // hardening;
|
||||
};
|
||||
|
||||
system.checks = lib.singleton (pkgs.runCommand "jool-validated" {
|
||||
nativeBuildInputs = [ pkgs.buildPackages.jool-cli ];
|
||||
preferLocalBuild = true;
|
||||
} ''
|
||||
printf 'Validating Jool configuration... '
|
||||
${lib.optionalString cfg.siit.enable "jool_siit file check ${siitConf}"}
|
||||
${lib.optionalString cfg.nat64.enable "jool file check ${nat64Conf}"}
|
||||
printf 'ok\n'
|
||||
touch "$out"
|
||||
'');
|
||||
|
||||
networking.jool.nat64.config = mkDefaultAttrs defaultNat64;
|
||||
networking.jool.siit.config = mkDefaultAttrs defaultSiit;
|
||||
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ rnhmjoj ];
|
||||
|
||||
}
|
|
@ -13,7 +13,9 @@ in
|
|||
example = [ "a8a2c3c10c1a68de" ];
|
||||
type = types.listOf types.str;
|
||||
description = lib.mdDoc ''
|
||||
List of ZeroTier Network IDs to join on startup
|
||||
List of ZeroTier Network IDs to join on startup.
|
||||
Note that networks are only ever joined, but not automatically left after removing them from the list.
|
||||
To remove networks, use the ZeroTier CLI: `zerotier-cli leave <network-id>`
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
|
@ -69,6 +69,8 @@ in
|
|||
enableServer = lib.mkEnableOption (lib.mdDoc "the Kanidm server");
|
||||
enablePam = lib.mkEnableOption (lib.mdDoc "the Kanidm PAM and NSS integration");
|
||||
|
||||
package = lib.mkPackageOptionMD pkgs "kanidm" {};
|
||||
|
||||
serverSettings = lib.mkOption {
|
||||
type = lib.types.submodule {
|
||||
freeformType = settingsFormat.type;
|
||||
|
@ -222,7 +224,7 @@ in
|
|||
}
|
||||
];
|
||||
|
||||
environment.systemPackages = lib.mkIf cfg.enableClient [ pkgs.kanidm ];
|
||||
environment.systemPackages = lib.mkIf cfg.enableClient [ cfg.package ];
|
||||
|
||||
systemd.services.kanidm = lib.mkIf cfg.enableServer {
|
||||
description = "kanidm identity management daemon";
|
||||
|
@ -237,7 +239,7 @@ in
|
|||
StateDirectory = "kanidm";
|
||||
StateDirectoryMode = "0700";
|
||||
RuntimeDirectory = "kanidmd";
|
||||
ExecStart = "${pkgs.kanidm}/bin/kanidmd server -c ${serverConfigFile}";
|
||||
ExecStart = "${cfg.package}/bin/kanidmd server -c ${serverConfigFile}";
|
||||
User = "kanidm";
|
||||
Group = "kanidm";
|
||||
|
||||
|
@ -270,7 +272,7 @@ in
|
|||
CacheDirectory = "kanidm-unixd";
|
||||
CacheDirectoryMode = "0700";
|
||||
RuntimeDirectory = "kanidm-unixd";
|
||||
ExecStart = "${pkgs.kanidm}/bin/kanidm_unixd";
|
||||
ExecStart = "${cfg.package}/bin/kanidm_unixd";
|
||||
User = "kanidm-unixd";
|
||||
Group = "kanidm-unixd";
|
||||
|
||||
|
@ -302,7 +304,7 @@ in
|
|||
partOf = [ "kanidm-unixd.service" ];
|
||||
restartTriggers = [ unixConfigFile clientConfigFile ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.kanidm}/bin/kanidm_unixd_tasks";
|
||||
ExecStart = "${cfg.package}/bin/kanidm_unixd_tasks";
|
||||
|
||||
BindReadOnlyPaths = [
|
||||
"/nix/store"
|
||||
|
@ -346,7 +348,7 @@ in
|
|||
})
|
||||
];
|
||||
|
||||
system.nssModules = lib.mkIf cfg.enablePam [ pkgs.kanidm ];
|
||||
system.nssModules = lib.mkIf cfg.enablePam [ cfg.package ];
|
||||
|
||||
system.nssDatabases.group = lib.optional cfg.enablePam "kanidm";
|
||||
system.nssDatabases.passwd = lib.optional cfg.enablePam "kanidm";
|
||||
|
@ -365,7 +367,7 @@ in
|
|||
description = "Kanidm server";
|
||||
isSystemUser = true;
|
||||
group = "kanidm";
|
||||
packages = with pkgs; [ kanidm ];
|
||||
packages = [ cfg.package ];
|
||||
};
|
||||
})
|
||||
(lib.mkIf cfg.enablePam {
|
||||
|
|
|
@ -579,7 +579,7 @@ in
|
|||
description = "OAuth2 Proxy";
|
||||
path = [ cfg.package ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
User = "oauth2_proxy";
|
||||
|
|
|
@ -147,7 +147,7 @@ in {
|
|||
config = mkIf cfg.enable {
|
||||
|
||||
# pkg.opensnitch is referred to elsewhere in the module so we don't need to worry about it being garbage collected
|
||||
services.opensnitch.settings = mapAttrs (_: v: mkDefault v) (builtins.fromJSON (builtins.unsafeDiscardStringContext (builtins.readFile "${pkgs.opensnitch}/etc/default-config.json")));
|
||||
services.opensnitch.settings = mapAttrs (_: v: mkDefault v) (builtins.fromJSON (builtins.unsafeDiscardStringContext (builtins.readFile "${pkgs.opensnitch}/etc/opensnitchd/default-config.json")));
|
||||
|
||||
systemd = {
|
||||
packages = [ pkgs.opensnitch ];
|
||||
|
@ -171,9 +171,19 @@ in {
|
|||
${concatMapStrings ({ file, local }: ''
|
||||
ln -sf '${file}' "${local}"
|
||||
'') rules}
|
||||
|
||||
if [ ! -f /etc/opensnitch-system-fw.json ]; then
|
||||
cp "${pkgs.opensnitch}/etc/opensnitchd/system-fw.json" "/etc/opensnitchd/system-fw.json"
|
||||
fi
|
||||
'');
|
||||
|
||||
environment.etc."opensnitchd/default-config.json".source = format.generate "default-config.json" cfg.settings;
|
||||
environment.etc = mkMerge [ ({
|
||||
"opensnitchd/default-config.json".source = format.generate "default-config.json" cfg.settings;
|
||||
}) (mkIf (cfg.settings.ProcMonitorMethod == "ebpf") {
|
||||
"opensnitchd/opensnitch.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch.o";
|
||||
"opensnitchd/opensnitch-dns.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch-dns.o";
|
||||
"opensnitchd/opensnitch-procs.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch-procs.o";
|
||||
})];
|
||||
|
||||
};
|
||||
}
|
||||
|
|
|
@ -27,6 +27,18 @@ in {
|
|||
Specify a path to a configuration file that Tempo should use.
|
||||
'';
|
||||
};
|
||||
|
||||
extraFlags = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = lib.literalExpression
|
||||
''
|
||||
[ "-config.expand-env=true" ]
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
Additional flags to pass to the `ExecStart=` in `tempo.service`.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
@ -54,7 +66,7 @@ in {
|
|||
else cfg.configFile;
|
||||
in
|
||||
{
|
||||
ExecStart = "${pkgs.tempo}/bin/tempo --config.file=${conf}";
|
||||
ExecStart = "${pkgs.tempo}/bin/tempo --config.file=${conf} ${lib.escapeShellArgs cfg.extraFlags}";
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
ProtectSystem = "full";
|
||||
|
|
|
@ -1,79 +1,66 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.mediamtx;
|
||||
package = pkgs.mediamtx;
|
||||
format = pkgs.formats.yaml {};
|
||||
in
|
||||
{
|
||||
meta.maintainers = with lib.maintainers; [ fpletz ];
|
||||
|
||||
options = {
|
||||
services.mediamtx = {
|
||||
enable = mkEnableOption (lib.mdDoc "MediaMTX");
|
||||
enable = lib.mkEnableOption (lib.mdDoc "MediaMTX");
|
||||
|
||||
settings = mkOption {
|
||||
package = lib.mkPackageOptionMD pkgs "mediamtx" { };
|
||||
|
||||
settings = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Settings for MediaMTX.
|
||||
Read more at <https://github.com/aler9/mediamtx/blob/main/mediamtx.yml>
|
||||
Settings for MediaMTX. Refer to the defaults at
|
||||
<https://github.com/bluenviron/mediamtx/blob/main/mediamtx.yml>.
|
||||
'';
|
||||
type = format.type;
|
||||
|
||||
default = {
|
||||
logLevel = "info";
|
||||
logDestinations = [
|
||||
"stdout"
|
||||
];
|
||||
# we set this so when the user uses it, it just works (see LogsDirectory below). but it's not used by default.
|
||||
logFile = "/var/log/mediamtx/mediamtx.log";
|
||||
};
|
||||
|
||||
default = {};
|
||||
example = {
|
||||
paths = {
|
||||
cam = {
|
||||
runOnInit = "ffmpeg -f v4l2 -i /dev/video0 -f rtsp rtsp://localhost:$RTSP_PORT/$RTSP_PATH";
|
||||
runOnInit = "\${lib.getExe pkgs.ffmpeg} -f v4l2 -i /dev/video0 -f rtsp rtsp://localhost:$RTSP_PORT/$RTSP_PATH";
|
||||
runOnInitRestart = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
env = mkOption {
|
||||
type = with types; attrsOf anything;
|
||||
env = lib.mkOption {
|
||||
type = with lib.types; attrsOf anything;
|
||||
description = lib.mdDoc "Extra environment variables for MediaMTX";
|
||||
default = {};
|
||||
example = {
|
||||
MTX_CONFKEY = "mykey";
|
||||
};
|
||||
};
|
||||
|
||||
allowVideoAccess = lib.mkEnableOption (lib.mdDoc ''
|
||||
Enable access to video devices like cameras on the system.
|
||||
'');
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf (cfg.enable) {
|
||||
config = lib.mkIf cfg.enable {
|
||||
# NOTE: mediamtx watches this file and automatically reloads if it changes
|
||||
environment.etc."mediamtx.yaml".source = format.generate "mediamtx.yaml" cfg.settings;
|
||||
|
||||
systemd.services.mediamtx = {
|
||||
environment = cfg.env;
|
||||
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
path = with pkgs; [
|
||||
ffmpeg
|
||||
];
|
||||
environment = cfg.env;
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
User = "mediamtx";
|
||||
Group = "mediamtx";
|
||||
|
||||
LogsDirectory = "mediamtx";
|
||||
|
||||
# user likely may want to stream cameras, can't hurt to add video group
|
||||
SupplementaryGroups = "video";
|
||||
|
||||
ExecStart = "${package}/bin/mediamtx /etc/mediamtx.yaml";
|
||||
SupplementaryGroups = lib.mkIf cfg.allowVideoAccess "video";
|
||||
ExecStart = "${cfg.package}/bin/mediamtx /etc/mediamtx.yaml";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -115,10 +115,8 @@ in {
|
|||
user = "grocy";
|
||||
group = "nginx";
|
||||
|
||||
# PHP 8.0 is the only version which is supported/tested by upstream:
|
||||
# https://github.com/grocy/grocy/blob/v3.3.0/README.md#how-to-install
|
||||
# Compatibility with PHP 8.1 is available on their development branch:
|
||||
# https://github.com/grocy/grocy/commit/38a4ad8ec480c29a1bff057b3482fd103b036848
|
||||
# PHP 8.1 is the only version which is supported/tested by upstream:
|
||||
# https://github.com/grocy/grocy/blob/v4.0.0/README.md#platform-support
|
||||
phpPackage = pkgs.php81;
|
||||
|
||||
inherit (cfg.phpfpm) settings;
|
||||
|
@ -168,7 +166,7 @@ in {
|
|||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with maintainers; [ ma27 ];
|
||||
maintainers = with maintainers; [ n0emis ];
|
||||
doc = ./grocy.md;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -7,6 +7,9 @@ let
|
|||
|
||||
settingsFile = settingsFormat.generate "invidious-settings" cfg.settings;
|
||||
|
||||
generatedHmacKeyFile = "/var/lib/invidious/hmac_key";
|
||||
generateHmac = cfg.hmacKeyFile == null;
|
||||
|
||||
serviceConfig = {
|
||||
systemd.services.invidious = {
|
||||
description = "Invidious (An alternative YouTube front-end)";
|
||||
|
@ -14,22 +17,47 @@ let
|
|||
after = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
script =
|
||||
let
|
||||
jqFilter = "."
|
||||
+ lib.optionalString (cfg.database.host != null) "[0].db.password = \"'\"'\"$(cat ${lib.escapeShellArg cfg.database.passwordFile})\"'\"'\""
|
||||
+ " | .[0]"
|
||||
+ lib.optionalString (cfg.extraSettingsFile != null) " * .[1]";
|
||||
jqFiles = [ settingsFile ] ++ lib.optional (cfg.extraSettingsFile != null) cfg.extraSettingsFile;
|
||||
in
|
||||
''
|
||||
export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s "${jqFilter}" ${lib.escapeShellArgs jqFiles})"
|
||||
exec ${cfg.package}/bin/invidious
|
||||
'';
|
||||
preStart = lib.optionalString generateHmac ''
|
||||
if [[ ! -e "${generatedHmacKeyFile}" ]]; then
|
||||
${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
|
||||
chmod 0600 "${generatedHmacKeyFile}"
|
||||
fi
|
||||
'';
|
||||
|
||||
script = ''
|
||||
configParts=()
|
||||
''
|
||||
# autogenerated hmac_key
|
||||
+ lib.optionalString generateHmac ''
|
||||
configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
|
||||
''
|
||||
# generated settings file
|
||||
+ ''
|
||||
configParts+=("$(< ${lib.escapeShellArg settingsFile})")
|
||||
''
|
||||
# optional database password file
|
||||
+ lib.optionalString (cfg.database.host != null) ''
|
||||
configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
|
||||
''
|
||||
# optional extra settings file
|
||||
+ lib.optionalString (cfg.extraSettingsFile != null) ''
|
||||
configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
|
||||
''
|
||||
# explicitly specified hmac key file
|
||||
+ lib.optionalString (cfg.hmacKeyFile != null) ''
|
||||
configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
|
||||
''
|
||||
# merge all parts into a single configuration with later elements overriding previous elements
|
||||
+ ''
|
||||
export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
|
||||
exec ${cfg.package}/bin/invidious
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
RestartSec = "2s";
|
||||
DynamicUser = true;
|
||||
StateDirectory = "invidious";
|
||||
StateDirectoryMode = "0750";
|
||||
|
||||
CapabilityBoundingSet = "";
|
||||
PrivateDevices = true;
|
||||
|
@ -171,6 +199,18 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
hmacKeyFile = lib.mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = lib.mdDoc ''
|
||||
A path to a file containing the `hmac_key`. If `null`, a key will be generated automatically on first
|
||||
start.
|
||||
|
||||
If non-`null`, this option overrides any `hmac_key` specified in {option}`services.invidious.settings` or
|
||||
via {option}`services.invidious.extraSettingsFile`.
|
||||
'';
|
||||
};
|
||||
|
||||
extraSettingsFile = lib.mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
|
|
|
@ -160,6 +160,10 @@ in
|
|||
root * ${cfg.ui.package}/dist
|
||||
file_server
|
||||
}
|
||||
handle_path /static/undefined/* {
|
||||
root * ${cfg.ui.package}/dist
|
||||
file_server
|
||||
}
|
||||
@for_backend {
|
||||
path /api/* /pictrs/* /feeds/* /nodeinfo/*
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.netbox;
|
||||
pythonFmt = pkgs.formats.pythonVars {};
|
||||
|
@ -17,7 +15,7 @@ let
|
|||
pkg = (cfg.package.overrideAttrs (old: {
|
||||
installPhase = old.installPhase + ''
|
||||
ln -s ${configFile} $out/opt/netbox/netbox/netbox/configuration.py
|
||||
'' + optionalString cfg.enableLdap ''
|
||||
'' + lib.optionalString cfg.enableLdap ''
|
||||
ln -s ${cfg.ldapConfigPath} $out/opt/netbox/netbox/netbox/ldap_config.py
|
||||
'';
|
||||
})).override {
|
||||
|
@ -31,7 +29,7 @@ let
|
|||
|
||||
in {
|
||||
options.services.netbox = {
|
||||
enable = mkOption {
|
||||
enable = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
|
@ -66,18 +64,18 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
listenAddress = mkOption {
|
||||
type = types.str;
|
||||
listenAddress = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "[::1]";
|
||||
description = lib.mdDoc ''
|
||||
Address the server will listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
|
||||
defaultText = literalExpression ''
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = if lib.versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
|
||||
defaultText = lib.literalExpression ''
|
||||
if versionAtLeast config.system.stateVersion "23.05" then pkgs.netbox else pkgs.netbox_3_3;
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
|
@ -85,18 +83,18 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8001;
|
||||
description = lib.mdDoc ''
|
||||
Port the server will listen on.
|
||||
'';
|
||||
};
|
||||
|
||||
plugins = mkOption {
|
||||
type = types.functionTo (types.listOf types.package);
|
||||
plugins = lib.mkOption {
|
||||
type = with lib.types; functionTo (listOf package);
|
||||
default = _: [];
|
||||
defaultText = literalExpression ''
|
||||
defaultText = lib.literalExpression ''
|
||||
python3Packages: with python3Packages; [];
|
||||
'';
|
||||
description = lib.mdDoc ''
|
||||
|
@ -104,23 +102,23 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.str;
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/var/lib/netbox";
|
||||
description = lib.mdDoc ''
|
||||
Storage path of netbox.
|
||||
'';
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
type = types.path;
|
||||
secretKeyFile = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
description = lib.mdDoc ''
|
||||
Path to a file containing the secret key.
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.lines;
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.lines;
|
||||
default = "";
|
||||
description = lib.mdDoc ''
|
||||
Additional lines of configuration appended to the `configuration.py`.
|
||||
|
@ -128,8 +126,8 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
enableLdap = mkOption {
|
||||
type = types.bool;
|
||||
enableLdap = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = lib.mdDoc ''
|
||||
Enable LDAP-Authentication for Netbox.
|
||||
|
@ -138,8 +136,8 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
ldapConfigPath = mkOption {
|
||||
type = types.path;
|
||||
ldapConfigPath = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "";
|
||||
description = lib.mdDoc ''
|
||||
Path to the Configuration-File for LDAP-Authentication, will be loaded as `ldap_config.py`.
|
||||
|
@ -173,15 +171,17 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.netbox = {
|
||||
plugins = mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
|
||||
plugins = lib.mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
|
||||
settings = {
|
||||
STATIC_ROOT = staticDir;
|
||||
MEDIA_ROOT = "${cfg.dataDir}/media";
|
||||
REPORTS_ROOT = "${cfg.dataDir}/reports";
|
||||
SCRIPTS_ROOT = "${cfg.dataDir}/scripts";
|
||||
|
||||
GIT_PATH = "${pkgs.gitMinimal}/bin/git";
|
||||
|
||||
DATABASE = {
|
||||
NAME = "netbox";
|
||||
USER = "netbox";
|
||||
|
@ -264,39 +264,39 @@ in {
|
|||
RestartSec = 30;
|
||||
};
|
||||
in {
|
||||
netbox-migration = {
|
||||
description = "NetBox migrations";
|
||||
wantedBy = [ "netbox.target" ];
|
||||
|
||||
environment = {
|
||||
PYTHONPATH = pkg.pythonPath;
|
||||
};
|
||||
|
||||
serviceConfig = defaultServiceConfig // {
|
||||
Type = "oneshot";
|
||||
ExecStart = ''
|
||||
${pkg}/bin/netbox migrate
|
||||
'';
|
||||
PrivateTmp = true;
|
||||
};
|
||||
};
|
||||
|
||||
netbox = {
|
||||
description = "NetBox WSGI Service";
|
||||
documentation = [ "https://docs.netbox.dev/" ];
|
||||
|
||||
wantedBy = [ "netbox.target" ];
|
||||
|
||||
after = [ "network-online.target" "netbox-migration.service" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
environment.PYTHONPATH = pkg.pythonPath;
|
||||
|
||||
preStart = ''
|
||||
# On the first run, or on upgrade / downgrade, run migrations and related.
|
||||
# This mostly correspond to upstream NetBox's 'upgrade.sh' script.
|
||||
versionFile="${cfg.dataDir}/version"
|
||||
|
||||
if [[ -e "$versionFile" && "$(cat "$versionFile")" == "${cfg.package.version}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
${pkg}/bin/netbox migrate
|
||||
${pkg}/bin/netbox trace_paths --no-input
|
||||
${pkg}/bin/netbox collectstatic --no-input
|
||||
${pkg}/bin/netbox remove_stale_contenttypes --no-input
|
||||
'';
|
||||
# TODO: remove the condition when we remove netbox_3_3
|
||||
${lib.optionalString
|
||||
(lib.versionAtLeast cfg.package.version "3.5.0")
|
||||
"${pkg}/bin/netbox reindex --lazy"}
|
||||
${pkg}/bin/netbox clearsessions
|
||||
${pkg}/bin/netbox clearcache
|
||||
|
||||
environment.PYTHONPATH = pkg.pythonPath;
|
||||
echo "${cfg.package.version}" > "$versionFile"
|
||||
'';
|
||||
|
||||
serviceConfig = defaultServiceConfig // {
|
||||
ExecStart = ''
|
||||
|
@ -331,7 +331,7 @@ in {
|
|||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
after = [ "network-online.target" ];
|
||||
after = [ "network-online.target" "netbox.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
environment.PYTHONPATH = pkg.pythonPath;
|
||||
|
@ -351,7 +351,7 @@ in {
|
|||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
after = [ "network-online.target" ];
|
||||
after = [ "network-online.target" "netbox.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
|
||||
timerConfig = {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue