Project import generated by Copybara.

GitOrigin-RevId: c59ea8b8a0e7f927e7291c14ea6cd1bd3a16ff38
This commit is contained in:
Default email 2020-08-20 19:08:02 +02:00
parent ab0f4b5863
commit 0eaa97ffad
2962 changed files with 66050 additions and 29917 deletions

View file

@ -43,15 +43,47 @@ indent_style = space
# Disable file types or individual files
# some of these files may be auto-generated and/or require significant changes
[*.{c,h}]
insert_final_newline = unset
trim_trailing_whitespace = unset
[*.{key,ovpn}]
insert_final_newline = unset
end_of_line = unset
[*.lock]
indent_size = unset
[deps.nix]
insert_final_newline = unset
[gemset.nix]
insert_final_newline = unset
[node-packages.nix]
insert_final_newline = unset
[nixos/modules/services/networking/ircd-hybrid/*.{conf,in}]
trim_trailing_whitespace = unset
[nixos/tests/systemd-networkd-vrf.nix]
trim_trailing_whitespace = unset
[pkgs/applications/editors/emacs-modes/recipes-archive-melpa.json]
indent_size = unset
[pkgs/build-support/dotnetenv/Wrapper/**]
end_of_line = unset
insert_final_newline = unset
trim_trailing_whitespace = unset
[pkgs/build-support/upstream-updater/**]
trim_trailing_whitespace = unset
[pkgs/development/compilers/elm/registry.dat]
end_of_line = unset
insert_final_newline = unset
[pkgs/development/lisp-modules/quicklisp-to-nix.nix]
indent_size = unset

View file

@ -10,6 +10,12 @@
# This file
/.github/CODEOWNERS @edolstra
# GitHub actions
/.github/workflows @Mic92 @zowoq
# EditorConfig
/.editorconfig @Mic92 @zowoq
# Libraries
/lib @edolstra @nbp @infinisil
/lib/systems @nbp @ericson2314 @matthewbauer
@ -196,3 +202,8 @@
# Blockchains
/pkgs/applications/blockchains @mmahut
# Go
/pkgs/development/compilers/go @kalbasit @Mic92 @zowoq
/pkgs/development/go-modules @kalbasit @Mic92 @zowoq
/pkgs/development/go-packages @kalbasit @Mic92 @zowoq

View file

@ -12,6 +12,7 @@ result-*
.DS_Store
.mypy_cache
__pycache__
/pkgs/development/libraries/qt-5/*/tmp/
/pkgs/desktops/kde-5/*/tmp/

View file

@ -1,4 +1,4 @@
MD_TARGETS=$(addsuffix .xml, $(basename $(wildcard ./*.md ./**/*.md)))
MD_TARGETS=$(addsuffix .xml, $(basename $(shell find . -type f -regex '.*\.md$$')))
.PHONY: all
all: validate format out/html/index.html out/epub/manual.epub

View file

@ -0,0 +1,94 @@
# Cataclysm: Dark Days Ahead
## How to install Cataclysm DDA
To install the latest stable release of Cataclysm DDA to your profile, execute
`nix-env -f "<nixpkgs>" -iA cataclysm-dda`. For the curses build (build
without tiles), install `cataclysmDDA.stable.curses`. Note: `cataclysm-dda` is
an alias to `cataclysmDDA.stable.tiles`.
If you like access to a development build of your favorite git revision,
override `cataclysm-dda-git` (or `cataclysmDDA.git.curses` if you like curses
build):
```nix
cataclysm-dda-git.override {
version = "YYYY-MM-DD";
rev = "YOUR_FAVORITE_REVISION";
sha256 = "CHECKSUM_OF_THE_REVISION";
}
```
The sha256 checksum can be obtained by
```sh
nix-prefetch-url --unpack "https://github.com/CleverRaven/Cataclysm-DDA/archive/${YOUR_FAVORITE_REVISION}.tar.gz"
```
The default configuration directory is `~/.cataclysm-dda`. If you prefer
`$XDG_CONFIG_HOME/cataclysm-dda`, override the derivation:
```nix
cataclysm-dda.override {
useXdgDir = true;
}
```
## Customizing with mods
To install Cataclysm DDA with mods of your choice, you can use `withMods`
attribute:
```nix
cataclysm-dda.withMods (mods: with mods; [
tileset.UndeadPeople
])
```
All mods, soundpacks, and tilesets available in nixpkgs are found in
`cataclysmDDA.pkgs`.
Here is an example to modify existing mods and/or add more mods not available
in nixpkgs:
```nix
let
customMods = self: super: lib.recursiveUpdate super {
# Modify existing mod
tileset.UndeadPeople = super.tileset.UndeadPeople.overrideAttrs (old: {
# If you like to apply a patch to the tileset for example
patches = [ ./path/to/your.patch ];
});
# Add another mod
mod.Awesome = cataclysmDDA.buildMod {
modName = "Awesome";
version = "0.x";
src = fetchFromGitHub {
owner = "Someone";
repo = "AwesomeMod";
rev = "...";
sha256 = "...";
};
# Path to be installed in the unpacked source (default: ".")
modRoot = "contents/under/this/path/will/be/installed";
};
# Add another soundpack
soundpack.Fantastic = cataclysmDDA.buildSoundPack {
# ditto
};
# Add another tileset
tileset.SuperDuper = cataclysmDDA.buildTileSet {
# ditto
};
};
in
cataclysm-dda.withMods (mods: with mods.extend customMods; [
tileset.UndeadPeople
mod.Awesome
soundpack.Fantastic
tileset.SuperDuper
])
```

View file

@ -18,6 +18,7 @@
<xi:include href="opengl.xml" />
<xi:include href="shell-helpers.xml" />
<xi:include href="steam.xml" />
<xi:include href="cataclysm-dda.section.xml" />
<xi:include href="urxvt.xml" />
<xi:include href="weechat.xml" />
<xi:include href="xorg.xml" />

View file

@ -45,13 +45,7 @@
<title>How to play</title>
<para>
For 64-bit systems it's important to have
<programlisting>hardware.opengl.driSupport32Bit = true;</programlisting>
in your <filename>/etc/nixos/configuration.nix</filename>. You'll also need
<programlisting>hardware.pulseaudio.support32Bit = true;</programlisting>
if you are using PulseAudio - this will enable 32bit ALSA apps integration. To use the Steam controller or other Steam supported controllers such as the DualShock 4 or Nintendo Switch Pro, you need to add
<programlisting>hardware.steam-hardware.enable = true;</programlisting>
to your configuration.
Use <programlisting>programs.steam.enable = true;</programlisting> if you want to add steam to systemPackages and also enable a few workarrounds aswell as Steam controller support or other Steam supported controllers such as the DualShock 4 or Nintendo Switch Pr.
</para>
</section>

View file

@ -191,6 +191,8 @@ androidenv.emulateApp {
}
```
Additional flags may be applied to the Android SDK's emulator through the runtime environment variable `$NIX_ANDROID_EMULATOR_FLAGS`.
It is also possible to specify an APK to deploy inside the emulator
and the package and activity names to launch it:

View file

@ -182,4 +182,3 @@ Use `nix-shell -I nixpkgs=/some/dir/nixpkgs -A emscriptenPackages.libz` and from
Using this toolchain makes it easy to leverage `nix` from NixOS, MacOSX or even Windows (WSL+ubuntu+nix). This toolchain is reproducible, behaves like the rest of the packages from nixpkgs and contains a set of well working examples to learn and adapt from.
If in trouble, ask the maintainers.

View file

@ -40,6 +40,10 @@ pet = buildGoModule rec {
subPackages = [ "." ]; <co xml:id='ex-buildGoModule-2' />
deleteVendor = true; <co xml:id='ex-buildGoModule-3' />
runVend = true; <co xml:id='ex-buildGoModule-4' />
meta = with lib; {
description = "Simple command-line snippet manager, written in Go";
homepage = "https://github.com/knqyf263/pet";
@ -64,6 +68,16 @@ pet = buildGoModule rec {
<varname>subPackages</varname> limits the builder from building child packages that have not been listed. If <varname>subPackages</varname> is not specified, all child packages will be built.
</para>
</callout>
<callout arearefs='ex-buildGoModule-3'>
<para>
<varname>deleteVendor</varname> removes the pre-existing vendor directory and fetches the dependencies. This should only be used if the dependencies included in the vendor folder are broken or incomplete.
</para>
</callout>
<callout arearefs='ex-buildGoModule-4'>
<para>
<varname>runVend</varname> runs the vend command to generate the vendor directory. This is useful if your code depends on c code and go mod tidy does not include the needed sources to build.
</para>
</callout>
</calloutlist>
</para>

View file

@ -538,8 +538,123 @@ buildPythonPackage rec {
```
Note also the line `doCheck = false;`, we explicitly disabled running the test-suite.
#### Testing Python Packages
#### Develop local package
It is highly encouraged to have testing as part of the package build. This
helps to avoid situations where the package was able to build and install,
but is not usable at runtime. Currently, all packages will use the `test`
command provided by the setup.py (i.e. `python setup.py test`). However,
this is currently deprecated https://github.com/pypa/setuptools/pull/1878
and your package should provide its own checkPhase.
*NOTE:* The `checkPhase` for python maps to the `installCheckPhase` on a
normal derivation. This is due to many python packages not behaving well
to the pre-installed version of the package. Version info, and natively
compiled extensions generally only exist in the install directory, and
thus can cause issues when a test suite asserts on that behavior.
*NOTE:* Tests should only be disabled if they don't agree with nix
(e.g. external dependencies, network access, flakey tests), however,
as many tests should be enabled as possible. Failing tests can still be
a good indication that the package is not in a valid state.
#### Using pytest
Pytest is the most common test runner for python repositories. A trivial
test run would be:
```
checkInputs = [ pytest ];
checkPhase = "pytest";
```
However, many repositories' test suites do not translate well to nix's build
sandbox, and will generally need many tests to be disabled.
To filter tests using pytest, one can do the following:
```
checkInputs = [ pytest ];
# avoid tests which need additional data or touch network
checkPhase = ''
pytest tests/ --ignore=tests/integration -k 'not download and not update'
'';
```
`--ignore` will tell pytest to ignore that file or directory from being
collected as part of a test run. This is useful is a file uses a package
which is not available in nixpkgs, thus skipping that test file is much
easier than having to create a new package.
`-k` is used to define a predicate for test names. In this example, we are
filtering out tests which contain `download` or `update` in their test case name.
Only one `-k` argument is allows, and thus a long predicate should be concatenated
with "\" and wrapped to the next line.
*NOTE:* In pytest==6.0.1, the use of "\" to continue a line (e.g. `-k 'not download \'`) has
been removed, in this case, it's recommended to use `pytestCheckHook`.
#### Using pytestCheckHook
`pytestCheckHook` is a convenient hook which will substitute the setuptools
`test` command for a checkPhase which runs `pytest`. This is also beneficial
when a package may need many items disabled to run the test suite.
Using the example above, the analagous pytestCheckHook usage would be:
```
checkInputs = [ pytestCheckHook ];
# requires additional data
pytestFlagsArray = [ "tests/" "--ignore=tests/integration" ];
disabledTests = [
# touches network
"download"
"update"
];
```
This is expecially useful when tests need to be conditionallydisabled,
for example:
```
disabledTests = [
# touches network
"download"
"update"
] ++ lib.optionals (pythonAtLeast "3.8") [
# broken due to python3.8 async changes
"async"
] ++ lib.optionals stdenv.isDarwin [
# can fail when building with other packages
"socket"
];
```
Trying to concatenate the related strings to disable tests in a regular checkPhase
would be much harder to read. This also enables us to comment on why specific tests
are disabled.
#### Using pythonImportsCheck
Although unit tests are highly prefered to valid correctness of a package. Not
all packages have test suites that can be ran easily, and some have none at all.
To help ensure the package still works, `pythonImportsCheck` can attempt to import
the listed modules.
```
pythonImportsCheck = [ "requests" "urllib" ];
```
roughly translates to:
```
postCheck = ''
PYTHONPATH=$out/${python.sitePackages}:$PYTHONPATH
python -c "import requests; import urllib"
'';
```
However, this is done in it's own phase, and not dependent on whether `doCheck = true;`
This can also be useful in verifying that the package doesn't assume commonly
present packages (e.g. `setuptools`)
### Develop local package
As a Python developer you're likely aware of [development mode](http://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode)
(`python setup.py develop`); instead of installing the package this command
@ -640,8 +755,8 @@ and in this case the `python38` interpreter is automatically used.
### Interpreters
Versions 2.7, 3.5, 3.6, 3.7 and 3.8 of the CPython interpreter are available as
respectively `python27`, `python35`, `python36`, `python37` and `python38`. The
Versions 2.7, 3.6, 3.7 and 3.8 of the CPython interpreter are available as
respectively `python27`, `python36`, `python37` and `python38`. The
aliases `python2` and `python3` correspond to respectively `python27` and
`python38`. The default interpreter, `python`, maps to `python2`. The PyPy
interpreters compatible with Python 2.7 and 3 are available as `pypy27` and
@ -689,15 +804,16 @@ attribute set is created for each available Python interpreter. The available
sets are
* `pkgs.python27Packages`
* `pkgs.python35Packages`
* `pkgs.python36Packages`
* `pkgs.python37Packages`
* `pkgs.python38Packages`
* `pkgs.python39Packages`
* `pkgs.pypyPackages`
and the aliases
* `pkgs.python2Packages` pointing to `pkgs.python27Packages`
* `pkgs.python3Packages` pointing to `pkgs.python37Packages`
* `pkgs.python3Packages` pointing to `pkgs.python38Packages`
* `pkgs.pythonPackages` pointing to `pkgs.python2Packages`
#### `buildPythonPackage` function
@ -1016,7 +1132,7 @@ are used in `buildPythonPackage`.
- `pipBuildHook` to build a wheel using `pip` and PEP 517. Note a build system
(e.g. `setuptools` or `flit`) should still be added as `nativeBuildInput`.
- `pipInstallHook` to install wheels.
- `pytestCheckHook` to run tests with `pytest`.
- `pytestCheckHook` to run tests with `pytest`. See [example usage](#using-pytestcheckhook).
- `pythonCatchConflictsHook` to check whether a Python package is not already existing.
- `pythonImportsCheckHook` to check whether importing the listed modules works.
- `pythonRemoveBinBytecode` to remove bytecode from the `/bin` folder.

View file

@ -43,7 +43,6 @@ rustPlatform.buildRustPackage rec {
homepage = "https://github.com/BurntSushi/ripgrep";
license = licenses.unlicense;
maintainers = [ maintainers.tailhook ];
platforms = platforms.all;
};
}
```

View file

@ -254,7 +254,7 @@ let f(h, h + 1, i) = i + h
<variablelist>
<title>Variables specifying dependencies</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsBuildBuild">
<term>
<varname>depsBuildBuild</varname>
</term>
@ -267,7 +267,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-nativeBuildInputs">
<term>
<varname>nativeBuildInputs</varname>
</term>
@ -280,7 +280,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsBuildTarget">
<term>
<varname>depsBuildTarget</varname>
</term>
@ -296,7 +296,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsHostHost">
<term>
<varname>depsHostHost</varname>
</term>
@ -306,7 +306,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-buildInputs">
<term>
<varname>buildInputs</varname>
</term>
@ -319,7 +319,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsTargetTarget">
<term>
<varname>depsTargetTarget</varname>
</term>
@ -329,7 +329,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsBuildBuildPropagated">
<term>
<varname>depsBuildBuildPropagated</varname>
</term>
@ -339,7 +339,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-propagatedNativeBuildInputs">
<term>
<varname>propagatedNativeBuildInputs</varname>
</term>
@ -349,7 +349,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsBuildTargetPropagated">
<term>
<varname>depsBuildTargetPropagated</varname>
</term>
@ -359,7 +359,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsHostHostPropagated">
<term>
<varname>depsHostHostPropagated</varname>
</term>
@ -369,7 +369,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-propagatedBuildInputs">
<term>
<varname>propagatedBuildInputs</varname>
</term>
@ -379,7 +379,7 @@ let f(h, h + 1, i) = i + h
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-depsTargetTargetPropagated">
<term>
<varname>depsTargetTargetPropagated</varname>
</term>
@ -396,7 +396,7 @@ let f(h, h + 1, i) = i + h
<variablelist>
<title>Variables affecting <literal>stdenv</literal> initialisation</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-NIX_DEBUG">
<term>
<varname>NIX_DEBUG</varname>
</term>
@ -410,7 +410,7 @@ let f(h, h + 1, i) = i + h
<variablelist>
<title>Attributes affecting build properties</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-enableParallelBuilding">
<term>
<varname>enableParallelBuilding</varname>
</term>
@ -427,7 +427,7 @@ let f(h, h + 1, i) = i + h
<variablelist>
<title>Special variables</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-passthru">
<term>
<varname>passthru</varname>
</term>
@ -504,7 +504,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
There are a number of variables that control what phases are executed and in what order:
<variablelist>
<title>Variables affecting phase control</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-phases">
<term>
<varname>phases</varname>
</term>
@ -517,7 +517,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-prePhases">
<term>
<varname>prePhases</varname>
</term>
@ -527,7 +527,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preConfigurePhases">
<term>
<varname>preConfigurePhases</varname>
</term>
@ -537,7 +537,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preBuildPhases">
<term>
<varname>preBuildPhases</varname>
</term>
@ -547,7 +547,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preInstallPhases">
<term>
<varname>preInstallPhases</varname>
</term>
@ -557,7 +557,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preFixupPhases">
<term>
<varname>preFixupPhases</varname>
</term>
@ -567,7 +567,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preDistPhases">
<term>
<varname>preDistPhases</varname>
</term>
@ -577,7 +577,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postPhases">
<term>
<varname>postPhases</varname>
</term>
@ -635,7 +635,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
<variablelist>
<title>Variables controlling the unpack phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-src">
<term>
<varname>srcs</varname> / <varname>src</varname>
</term>
@ -645,7 +645,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-sourceRoot">
<term>
<varname>sourceRoot</varname>
</term>
@ -655,7 +655,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-setSourceRoot">
<term>
<varname>setSourceRoot</varname>
</term>
@ -665,7 +665,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preUnpack">
<term>
<varname>preUnpack</varname>
</term>
@ -675,7 +675,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postUnpack">
<term>
<varname>postUnpack</varname>
</term>
@ -685,7 +685,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontUnpack">
<term>
<varname>dontUnpack</varname>
</term>
@ -695,7 +695,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontMakeSourcesWritable">
<term>
<varname>dontMakeSourcesWritable</varname>
</term>
@ -705,7 +705,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-unpackCmd">
<term>
<varname>unpackCmd</varname>
</term>
@ -727,7 +727,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
<variablelist>
<title>Variables controlling the patch phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontPatch">
<term>
<varname>dontPatch</varname>
</term>
@ -737,7 +737,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-patches">
<term>
<varname>patches</varname>
</term>
@ -747,7 +747,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-patchFlags">
<term>
<varname>patchFlags</varname>
</term>
@ -757,7 +757,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-prePatch">
<term>
<varname>prePatch</varname>
</term>
@ -767,7 +767,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postPatch">
<term>
<varname>postPatch</varname>
</term>
@ -789,7 +789,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
<variablelist>
<title>Variables controlling the configure phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-configureScript">
<term>
<varname>configureScript</varname>
</term>
@ -799,7 +799,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-configureFlags">
<term>
<varname>configureFlags</varname>
</term>
@ -809,7 +809,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontConfigure">
<term>
<varname>dontConfigure</varname>
</term>
@ -819,7 +819,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-configureFlagsArray">
<term>
<varname>configureFlagsArray</varname>
</term>
@ -829,7 +829,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontAddPrefix">
<term>
<varname>dontAddPrefix</varname>
</term>
@ -839,7 +839,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-prefix">
<term>
<varname>prefix</varname>
</term>
@ -849,7 +849,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-prefixKey">
<term>
<varname>prefixKey</varname>
</term>
@ -859,7 +859,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontAddDisableDepTrack">
<term>
<varname>dontAddDisableDepTrack</varname>
</term>
@ -869,7 +869,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontFixLibtool">
<term>
<varname>dontFixLibtool</varname>
</term>
@ -885,7 +885,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontDisableStatic">
<term>
<varname>dontDisableStatic</varname>
</term>
@ -898,7 +898,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-configurePlatforms">
<term>
<varname>configurePlatforms</varname>
</term>
@ -913,7 +913,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preConfigure">
<term>
<varname>preConfigure</varname>
</term>
@ -923,7 +923,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postConfigure">
<term>
<varname>postConfigure</varname>
</term>
@ -945,7 +945,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
<variablelist>
<title>Variables controlling the build phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontBuild">
<term>
<varname>dontBuild</varname>
</term>
@ -955,7 +955,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-makefile">
<term>
<varname>makefile</varname>
</term>
@ -965,7 +965,7 @@ passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ]
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-makeFlags">
<term>
<varname>makeFlags</varname>
</term>
@ -983,7 +983,7 @@ makeFlags = [ "PREFIX=$(out)" ];
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-makeFlagsArray">
<term>
<varname>makeFlagsArray</varname>
</term>
@ -999,7 +999,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-buildFlags">
<term>
<varname>buildFlags</varname> / <varname>buildFlagsArray</varname>
</term>
@ -1009,7 +1009,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preBuild">
<term>
<varname>preBuild</varname>
</term>
@ -1019,7 +1019,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postBuild">
<term>
<varname>postBuild</varname>
</term>
@ -1049,7 +1049,7 @@ preBuild = ''
<variablelist>
<title>Variables controlling the check phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-doCheck">
<term>
<varname>doCheck</varname>
</term>
@ -1067,11 +1067,11 @@ preBuild = ''
</term>
<listitem>
<para>
See the build phase for details.
See the <link xlink:href="#var-stdenv-makeFlags">build phase</link> for details.
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-checkTarget">
<term>
<varname>checkTarget</varname>
</term>
@ -1081,7 +1081,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-checkFlags">
<term>
<varname>checkFlags</varname> / <varname>checkFlagsArray</varname>
</term>
@ -1091,7 +1091,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-checkInputs">
<term>
<varname>checkInputs</varname>
</term>
@ -1101,7 +1101,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preCheck">
<term>
<varname>preCheck</varname>
</term>
@ -1111,7 +1111,7 @@ preBuild = ''
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postCheck">
<term>
<varname>postCheck</varname>
</term>
@ -1133,7 +1133,7 @@ preBuild = ''
<variablelist>
<title>Variables controlling the install phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontInstall">
<term>
<varname>dontInstall</varname>
</term>
@ -1149,11 +1149,11 @@ preBuild = ''
</term>
<listitem>
<para>
See the build phase for details.
See the <link xlink:href="#var-stdenv-makeFlags">build phase</link> for details.
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-installTargets">
<term>
<varname>installTargets</varname>
</term>
@ -1165,7 +1165,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-installFlags">
<term>
<varname>installFlags</varname> / <varname>installFlagsArray</varname>
</term>
@ -1175,7 +1175,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preInstall">
<term>
<varname>preInstall</varname>
</term>
@ -1185,7 +1185,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postInstall">
<term>
<varname>postInstall</varname>
</term>
@ -1229,7 +1229,7 @@ installTargets = "install-bin install-doc";</programlisting>
<variablelist>
<title>Variables controlling the fixup phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontFixup">
<term>
<varname>dontFixup</varname>
</term>
@ -1239,7 +1239,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontStrip">
<term>
<varname>dontStrip</varname>
</term>
@ -1249,7 +1249,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontStripHost">
<term>
<varname>dontStripHost</varname>
</term>
@ -1259,7 +1259,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontStripTarget">
<term>
<varname>dontStripTarget</varname>
</term>
@ -1269,7 +1269,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontMoveSbin">
<term>
<varname>dontMoveSbin</varname>
</term>
@ -1279,7 +1279,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-stripAllList">
<term>
<varname>stripAllList</varname>
</term>
@ -1289,7 +1289,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-stripAllFlags">
<term>
<varname>stripAllFlags</varname>
</term>
@ -1299,7 +1299,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-stripDebugList">
<term>
<varname>stripDebugList</varname>
</term>
@ -1309,7 +1309,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-stripDebugFlags">
<term>
<varname>stripDebugFlags</varname>
</term>
@ -1319,7 +1319,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontPatchELF">
<term>
<varname>dontPatchELF</varname>
</term>
@ -1329,7 +1329,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontPatchShebangs">
<term>
<varname>dontPatchShebangs</varname>
</term>
@ -1339,7 +1339,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontPruneLibtoolFiles">
<term>
<varname>dontPruneLibtoolFiles</varname>
</term>
@ -1349,7 +1349,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-forceShare">
<term>
<varname>forceShare</varname>
</term>
@ -1359,7 +1359,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-setupHook">
<term>
<varname>setupHook</varname>
</term>
@ -1370,7 +1370,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preFixup">
<term>
<varname>preFixup</varname>
</term>
@ -1380,7 +1380,7 @@ installTargets = "install-bin install-doc";</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postFixup">
<term>
<varname>postFixup</varname>
</term>
@ -1419,7 +1419,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
<variablelist>
<title>Variables controlling the installCheck phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-doInstallCheck">
<term>
<varname>doInstallCheck</varname>
</term>
@ -1431,7 +1431,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-installCheckTarget">
<term>
<varname>installCheckTarget</varname>
</term>
@ -1441,7 +1441,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-installCheckFlags">
<term>
<varname>installCheckFlags</varname> / <varname>installCheckFlagsArray</varname>
</term>
@ -1451,7 +1451,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-installCheckInputs">
<term>
<varname>installCheckInputs</varname>
</term>
@ -1461,7 +1461,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preInstallCheck">
<term>
<varname>preInstallCheck</varname>
</term>
@ -1471,7 +1471,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postInstallCheck">
<term>
<varname>postInstallCheck</varname>
</term>
@ -1493,7 +1493,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
<variablelist>
<title>Variables controlling the distribution phase</title>
<varlistentry>
<varlistentry xml:id="var-stdenv-distTarget">
<term>
<varname>distTarget</varname>
</term>
@ -1503,7 +1503,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-distFlags">
<term>
<varname>distFlags</varname> / <varname>distFlagsArray</varname>
</term>
@ -1513,7 +1513,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-tarballs">
<term>
<varname>tarballs</varname>
</term>
@ -1523,7 +1523,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-dontCopyDist">
<term>
<varname>dontCopyDist</varname>
</term>
@ -1533,7 +1533,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-preDist">
<term>
<varname>preDist</varname>
</term>
@ -1543,7 +1543,7 @@ set debug-file-directory ~/.nix-profile/lib/debug
</para>
</listitem>
</varlistentry>
<varlistentry>
<varlistentry xml:id="var-stdenv-postDist">
<term>
<varname>postDist</varname>
</term>

View file

@ -178,26 +178,40 @@ self: super:
<para>
<link
xlink:href="https://software.intel.com/en-us/mkl">Intel
MKL</link> (only works on x86 architecture, unfree)
MKL</link> (only works on the x86_64 architecture, unfree)
</para>
<para>
The Nixpkgs attribute is <literal>mkl</literal>.
</para>
</listitem>
<listitem>
<para>
<link
xlink:href="https://developer.amd.com/amd-aocl/blas-library/">AMD
BLIS/LIBFLAME</link> (optimized for modern AMD x86_64 CPUs)
</para>
<para>
The AMD BLIS library, with attribute <literal>amd-blis</literal>,
provides a BLAS implementation. The complementary AMD LIBFLAME
library, with attribute <literal>amd-libflame</literal>, provides
a LAPACK implementation.
</para>
</listitem>
</itemizedlist>
<para>
Introduced in <link
xlink:href="https://github.com/NixOS/nixpkgs/pull/83888">PR
#83888</link>, we are able to override the blas and lapack
packages to use different implementations, through the
blasProvider and lapackProvider argument. This can be used
#83888</link>, we are able to override the <literal>blas</literal>
and <literal>lapack</literal> packages to use different implementations,
through the <literal>blasProvider</literal> and
<literal>lapackProvider</literal> argument. This can be used
to select a different provider. BLAS providers will have
symlinks in <literal>$out/lib/libblas.so.3</literal> and
<literal>$out/lib/libcblas.so.3</literal> to their respective
BLAS libraries. Likewise, LAPACK providers will have symlinks
in <literal>$out/lib/liblapack.so.3</literal> and
<literal>$out/lib/liblapacke.so.3</literal> to their respective
LAPCK libraries. For example, Intel MKL is both a BLAS and
LAPACK libraries. For example, Intel MKL is both a BLAS and
LAPACK provider. An overlay can be created to use Intel MKL
that looks like:
</para>
@ -216,8 +230,9 @@ self: super:
<para>
This overlay uses Intels MKL library for both BLAS and LAPACK
interfaces. Note that the same can be accomplished at runtime
using <literal>LD_LIBRARY_PATH</literal> of libblas.so.3 and
liblapack.so.3. For instance:
using <literal>LD_LIBRARY_PATH</literal> of
<literal>libblas.so.3</literal> and
<literal>liblapack.so.3</literal>. For instance:
</para>
<programlisting>
$ LD_LIBRARY_PATH=$(nix-build -A mkl)/lib:$LD_LIBRARY_PATH nix-shell -p octave --run octave

View file

@ -12,23 +12,30 @@
lib = import ./lib;
systems = [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
systems = [
"x86_64-linux"
"i686-linux"
"x86_64-darwin"
"aarch64-linux"
"armv6l-linux"
"armv7l-linux"
];
forAllSystems = f: lib.genAttrs systems (system: f system);
in
{
lib = lib // {
lib = lib.extend (final: prev: {
nixosSystem = { modules, ... } @ args:
import ./nixos/lib/eval-config.nix (args // {
modules = modules ++
[ { system.nixos.versionSuffix =
".${lib.substring 0 8 (self.lastModifiedDate or self.lastModified)}.${self.shortRev or "dirty"}";
system.nixos.revision = lib.mkIf (self ? rev) self.rev;
".${final.substring 0 8 (self.lastModifiedDate or self.lastModified)}.${self.shortRev or "dirty"}";
system.nixos.revision = final.mkIf (self ? rev) self.rev;
}
];
});
};
});
checks.x86_64-linux.tarball = jobs.tarball;

View file

@ -67,7 +67,7 @@ let
inherit (trivial) id const pipe concat or and bitAnd bitOr bitXor
bitNot boolToString mergeAttrs flip mapNullable inNixShell min max
importJSON warn info showWarnings nixpkgsVersion version mod compare
splitByAndCompare functionArgs setFunctionArgs isFunction;
splitByAndCompare functionArgs setFunctionArgs isFunction toHexString toBaseDigits;
inherit (fixedPoints) fix fix' converge extends composeExtensions
makeExtensible makeExtensibleWithCustomName;
inherit (attrsets) attrByPath hasAttrByPath setAttrByPath

View file

@ -48,8 +48,10 @@ rec {
else if isAttrs v then err "attrsets" v
# functions cant be printed of course
else if isFunction v then err "functions" v
# lets not talk about floats. There is no sensible `toString` for them.
else if isFloat v then err "floats" v
# Floats currently can't be converted to precise strings,
# condition warning on nix version once this isn't a problem anymore
# See https://github.com/NixOS/nix/pull/3480
else if isFloat v then libStr.floatToString v
else err "this value is" (toString v);

View file

@ -85,6 +85,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
fullName = ''Beerware License'';
};
blueOak100 = spdx {
spdxId = "BlueOak-1.0.0";
fullName = "Blue Oak Model License 1.0.0";
};
bsd0 = spdx {
spdxId = "0BSD";
fullName = "BSD Zero Clause License";
@ -110,6 +115,11 @@ lib.mapAttrs (n: v: v // { shortName = n; }) {
fullName = ''BSD 4-clause "Original" or "Old" License'';
};
bsdProtection = spdx {
spdxId = "BSD-Protection";
fullName = "BSD Protection License";
};
bsl11 = {
fullName = "Business Source License 1.1";
url = "https://mariadb.com/bsl11";

View file

@ -58,6 +58,23 @@ rec {
default = check;
description = "Whether to check whether all option definitions have matching declarations.";
};
_module.freeformType = mkOption {
# Disallow merging for now, but could be implemented nicely with a `types.optionType`
type = types.nullOr (types.uniq types.attrs);
internal = true;
default = null;
description = ''
If set, merge all definitions that don't have an associated option
together using this type. The result then gets combined with the
values of all declared options to produce the final <literal>
config</literal> value.
If this is <literal>null</literal>, definitions without an option
will throw an error unless <option>_module.check</option> is
turned off.
'';
};
};
config = {
@ -65,35 +82,55 @@ rec {
};
};
collected = collectModules
merged =
let collected = collectModules
(specialArgs.modulesPath or "")
(modules ++ [ internalModule ])
({ inherit config options lib; } // specialArgs);
({ inherit lib options config; } // specialArgs);
in mergeModules prefix (reverseList collected);
options = mergeModules prefix (reverseList collected);
options = merged.matchedOptions;
# Traverse options and extract the option values into the final
# config set. At the same time, check whether all option
# definitions have matching declarations.
# !!! _module.check's value can't depend on any other config values
# without an infinite recursion. One way around this is to make the
# 'config' passed around to the modules be unconditionally unchecked,
# and only do the check in 'result'.
config = yieldConfig prefix options;
yieldConfig = prefix: set:
let res = removeAttrs (mapAttrs (n: v:
if isOption v then v.value
else yieldConfig (prefix ++ [n]) v) set) ["_definedNames"];
config =
let
# For definitions that have an associated option
declaredConfig = mapAttrsRecursiveCond (v: ! isOption v) (_: v: v.value) options;
# If freeformType is set, this is for definitions that don't have an associated option
freeformConfig =
let
defs = map (def: {
file = def.file;
value = setAttrByPath def.prefix def.value;
}) merged.unmatchedDefns;
in if defs == [] then {}
else declaredConfig._module.freeformType.merge prefix defs;
in if declaredConfig._module.freeformType == null then declaredConfig
# Because all definitions that had an associated option ended in
# declaredConfig, freeformConfig can only contain the non-option
# paths, meaning recursiveUpdate will never override any value
else recursiveUpdate freeformConfig declaredConfig;
checkUnmatched =
if config._module.check && config._module.freeformType == null && merged.unmatchedDefns != [] then
let
firstDef = head merged.unmatchedDefns;
baseMsg = "The option `${showOption (prefix ++ firstDef.prefix)}' defined in `${firstDef.file}' does not exist.";
in
if options._module.check.value && set ? _definedNames then
foldl' (res: m:
foldl' (res: name:
if set ? ${name} then res else throw "The option `${showOption (prefix ++ [name])}' defined in `${m.file}' does not exist.")
res m.names)
res set._definedNames
else
res;
result = {
if attrNames options == [ "_module" ]
then throw ''
${baseMsg}
However there are no options defined in `${showOption prefix}'. Are you sure you've
declared your options properly? This can happen if you e.g. declared your options in `types.submodule'
under `config' rather than `options'.
''
else throw baseMsg
else null;
result = builtins.seq checkUnmatched {
inherit options;
config = removeAttrs config [ "_module" ];
inherit (config) _module;
@ -174,12 +211,16 @@ rec {
/* Massage a module into canonical form, that is, a set consisting
of options, config and imports attributes. */
unifyModuleSyntax = file: key: m:
let addMeta = config: if m ? meta
let
addMeta = config: if m ? meta
then mkMerge [ config { meta = m.meta; } ]
else config;
addFreeformType = config: if m ? freeformType
then mkMerge [ config { _module.freeformType = m.freeformType; } ]
else config;
in
if m ? config || m ? options then
let badAttrs = removeAttrs m ["_file" "key" "disabledModules" "imports" "options" "config" "meta"]; in
let badAttrs = removeAttrs m ["_file" "key" "disabledModules" "imports" "options" "config" "meta" "freeformType"]; in
if badAttrs != {} then
throw "Module `${key}' has an unsupported attribute `${head (attrNames badAttrs)}'. This is caused by introducing a top-level `config' or `options' attribute. Add configuration attributes immediately on the top level instead, or move all of them (namely: ${toString (attrNames badAttrs)}) into the explicit `config' attribute."
else
@ -188,7 +229,7 @@ rec {
disabledModules = m.disabledModules or [];
imports = m.imports or [];
options = m.options or {};
config = addMeta (m.config or {});
config = addFreeformType (addMeta (m.config or {}));
}
else
{ _file = m._file or file;
@ -196,7 +237,7 @@ rec {
disabledModules = m.disabledModules or [];
imports = m.require or [] ++ m.imports or [];
options = {};
config = addMeta (removeAttrs m ["_file" "key" "disabledModules" "require" "imports"]);
config = addFreeformType (addMeta (removeAttrs m ["_file" "key" "disabledModules" "require" "imports" "freeformType"]));
};
applyIfFunction = key: f: args@{ config, options, lib, ... }: if isFunction f then
@ -233,7 +274,23 @@ rec {
declarations in all modules, combining them into a single set.
At the same time, for each option declaration, it will merge the
corresponding option definitions in all machines, returning them
in the value attribute of each option. */
in the value attribute of each option.
This returns a set like
{
# A recursive set of options along with their final values
matchedOptions = {
foo = { _type = "option"; value = "option value of foo"; ... };
bar.baz = { _type = "option"; value = "option value of bar.baz"; ... };
...
};
# A list of definitions that weren't matched by any option
unmatchedDefns = [
{ file = "file.nix"; prefix = [ "qux" ]; value = "qux"; }
...
];
}
*/
mergeModules = prefix: modules:
mergeModules' prefix modules
(concatMap (m: map (config: { file = m._file; inherit config; }) (pushDownProperties m.config)) modules);
@ -280,8 +337,8 @@ rec {
defnsByName' = byName "config" (module: value:
[{ inherit (module) file; inherit value; }]
) configs;
in
(flip mapAttrs declsByName (name: decls:
resultsByName = flip mapAttrs declsByName (name: decls:
# We're descending into attribute name.
let
loc = prefix ++ [name];
@ -291,7 +348,10 @@ rec {
in
if nrOptions == length decls then
let opt = fixupOptionType loc (mergeOptionDecls loc decls);
in evalOptionValue loc opt defns'
in {
matchedOptions = evalOptionValue loc opt defns';
unmatchedDefns = [];
}
else if nrOptions != 0 then
let
firstOption = findFirst (m: isOption m.options) "" decls;
@ -299,9 +359,27 @@ rec {
in
throw "The option `${showOption loc}' in `${firstOption._file}' is a prefix of options in `${firstNonOption._file}'."
else
mergeModules' loc decls defns
))
// { _definedNames = map (m: { inherit (m) file; names = attrNames m.config; }) configs; };
mergeModules' loc decls defns);
matchedOptions = mapAttrs (n: v: v.matchedOptions) resultsByName;
# an attrset 'name' => list of unmatched definitions for 'name'
unmatchedDefnsByName =
# Propagate all unmatched definitions from nested option sets
mapAttrs (n: v: v.unmatchedDefns) resultsByName
# Plus the definitions for the current prefix that don't have a matching option
// removeAttrs defnsByName' (attrNames matchedOptions);
in {
inherit matchedOptions;
# Transforms unmatchedDefnsByName into a list of definitions
unmatchedDefns = concatLists (mapAttrsToList (name: defs:
map (def: def // {
# Set this so we know when the definition first left unmatched territory
prefix = [name] ++ (def.prefix or []);
}) defs
) unmatchedDefnsByName);
};
/* Merge multiple option declarations into a single declaration. In
general, there should be only one declaration of each option.

View file

@ -612,6 +612,22 @@ rec {
*/
fixedWidthNumber = width: n: fixedWidthString width "0" (toString n);
/* Convert a float to a string, but emit a warning when precision is lost
during the conversion
Example:
floatToString 0.000001
=> "0.000001"
floatToString 0.0000001
=> trace: warning: Imprecise conversion from float to string 0.000000
"0.000000"
*/
floatToString = float: let
result = toString float;
precise = float == builtins.fromJSON result;
in if precise then result
else lib.warn "Imprecise conversion from float to string ${result}" result;
/* Check whether a value can be coerced to a string */
isCoercibleToString = x:
builtins.elem (builtins.typeOf x) [ "path" "string" "null" "int" "float" "bool" ] ||

View file

@ -32,6 +32,7 @@ rec {
/**/ if final.isDarwin then "libSystem"
else if final.isMinGW then "msvcrt"
else if final.isWasi then "wasilibc"
else if final.isRedox then "relibc"
else if final.isMusl then "musl"
else if final.isUClibc then "uclibc"
else if final.isAndroid then "bionic"
@ -65,6 +66,7 @@ rec {
freebsd = "FreeBSD";
openbsd = "OpenBSD";
wasi = "Wasi";
redox = "Redox";
genode = "Genode";
}.${final.parsed.kernel.name} or null;

View file

@ -22,6 +22,8 @@ let
"wasm64-wasi" "wasm32-wasi"
"x86_64-redox"
"powerpc64le-linux"
"riscv32-linux" "riscv64-linux"
@ -36,7 +38,7 @@ let
"js-ghcjs"
"aarch64-genode" "x86_64-genode"
"aarch64-genode" "i686-genode" "x86_64-genode"
];
allParsed = map parse.mkSystemFromString all;
@ -69,6 +71,7 @@ in {
openbsd = filterDoubles predicates.isOpenBSD;
unix = filterDoubles predicates.isUnix;
wasi = filterDoubles predicates.isWasi;
redox = filterDoubles predicates.isRedox;
windows = filterDoubles predicates.isWindows;
genode = filterDoubles predicates.isGenode;

View file

@ -163,6 +163,15 @@ rec {
libc = "newlib";
};
#
# Redox
#
x86_64-unknown-redox = {
config = "x86_64-unknown-redox";
libc = "relibc";
};
#
# Darwin
#

View file

@ -33,7 +33,7 @@ rec {
isBSD = { kernel = { families = { inherit (kernelFamilies) bsd; }; }; };
isDarwin = { kernel = { families = { inherit (kernelFamilies) darwin; }; }; };
isUnix = [ isBSD isDarwin isLinux isSunOS isCygwin ];
isUnix = [ isBSD isDarwin isLinux isSunOS isCygwin isRedox ];
isMacOS = { kernel = kernels.macos; };
isiOS = { kernel = kernels.ios; };
@ -46,6 +46,7 @@ rec {
isCygwin = { kernel = kernels.windows; abi = abis.cygnus; };
isMinGW = { kernel = kernels.windows; abi = abis.gnu; };
isWasi = { kernel = kernels.wasi; };
isRedox = { kernel = kernels.redox; };
isGhcjs = { kernel = kernels.ghcjs; };
isGenode = { kernel = kernels.genode; };
isNone = { kernel = kernels.none; };

View file

@ -277,6 +277,7 @@ rec {
openbsd = { execFormat = elf; families = { inherit bsd; }; };
solaris = { execFormat = elf; families = { }; };
wasi = { execFormat = wasm; families = { }; };
redox = { execFormat = elf; families = { }; };
windows = { execFormat = pe; families = { }; };
ghcjs = { execFormat = unknown; families = { }; };
genode = { execFormat = elf; families = { }; };
@ -390,6 +391,8 @@ rec {
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "windows"; }
else if (elemAt l 2 == "wasi")
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "wasi"; }
else if (elemAt l 2 == "redox")
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = "redox"; }
else if hasPrefix "netbsd" (elemAt l 2)
then { cpu = elemAt l 0; vendor = elemAt l 1; kernel = elemAt l 2; }
else if (elem (elemAt l 2) ["eabi" "eabihf" "elf"])

View file

@ -102,6 +102,16 @@ runTests {
expected = 9;
};
testToHexString = {
expr = toHexString 250;
expected = "FA";
};
testToBaseDigits = {
expr = toBaseDigits 2 6;
expected = [ 1 1 0 ];
};
# STRINGS
testConcatMapStrings = {

View file

@ -210,6 +210,29 @@ checkConfigOutput "empty" config.value.foo ./declare-lazyAttrsOf.nix ./attrsOf-c
checkConfigError 'The option value .* in .* is not of type .*' \
config.value ./declare-int-unsigned-value.nix ./define-value-list.nix ./define-value-int-positive.nix
## Freeform modules
# Assigning without a declared option should work
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix
# No freeform assigments shouldn't make it error
checkConfigOutput '{ }' config ./freeform-attrsOf.nix
# but only if the type matches
checkConfigError 'The option value .* in .* is not of type .*' config.value ./freeform-attrsOf.nix ./define-value-list.nix
# and properties should be applied
checkConfigOutput yes config.value ./freeform-attrsOf.nix ./define-value-string-properties.nix
# Options should still be declarable, and be able to have a type that doesn't match the freeform type
checkConfigOutput false config.enable ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
checkConfigOutput 24 config.value ./freeform-attrsOf.nix ./define-value-string.nix ./declare-enable.nix
# and this should work too with nested values
checkConfigOutput false config.nest.foo ./freeform-attrsOf.nix ./freeform-nested.nix
checkConfigOutput bar config.nest.bar ./freeform-attrsOf.nix ./freeform-nested.nix
# Check whether a declared option can depend on an freeform-typed one
checkConfigOutput null config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix
checkConfigOutput 24 config.foo ./freeform-attrsOf.nix ./freeform-str-dep-unstr.nix ./define-value-string.nix
# Check whether an freeform-typed value can depend on a declared option, this can only work with lazyAttrsOf
checkConfigError 'infinite recursion encountered' config.foo ./freeform-attrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigError 'The option .* is used but not defined' config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix
checkConfigOutput 24 config.foo ./freeform-lazyAttrsOf.nix ./freeform-unstr-dep-str.nix ./define-value-string.nix
cat <<EOF
====== module tests ======
$pass Pass

View file

@ -0,0 +1,12 @@
{ lib, ... }: {
imports = [{
value = lib.mkDefault "def";
}];
value = lib.mkMerge [
(lib.mkIf false "nope")
"yes"
];
}

View file

@ -0,0 +1,3 @@
{ lib, ... }: {
freeformType = with lib.types; attrsOf (either str (attrsOf str));
}

View file

@ -0,0 +1,3 @@
{ lib, ... }: {
freeformType = with lib.types; lazyAttrsOf (either str (lazyAttrsOf str));
}

View file

@ -0,0 +1,7 @@
{ lib, ... }: {
options.nest.foo = lib.mkOption {
type = lib.types.bool;
default = false;
};
config.nest.bar = "bar";
}

View file

@ -0,0 +1,8 @@
{ lib, config, ... }: {
options.foo = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
};
config.foo = lib.mkIf (config ? value) config.value;
}

View file

@ -0,0 +1,8 @@
{ lib, config, ... }: {
options.value = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
};
config.foo = lib.mkIf (config.value != null) config.value;
}

View file

@ -12,22 +12,23 @@ let
expected = lib.sort lib.lessThan y;
};
in with lib.systems.doubles; lib.runTests {
testall = mseteq all (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ wasi ++ windows ++ embedded ++ js ++ genode);
testall = mseteq all (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ wasi ++ windows ++ embedded ++ js ++ genode ++ redox);
testarm = mseteq arm [ "armv5tel-linux" "armv6l-linux" "armv6l-none" "armv7a-linux" "armv7l-linux" "arm-none" "armv7a-darwin" ];
testi686 = mseteq i686 [ "i686-linux" "i686-freebsd" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" "i686-none" "i686-darwin" ];
testi686 = mseteq i686 [ "i686-linux" "i686-freebsd" "i686-genode" "i686-netbsd" "i686-openbsd" "i686-cygwin" "i686-windows" "i686-none" "i686-darwin" ];
testmips = mseteq mips [ "mipsel-linux" ];
testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd" "x86_64-genode" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" "x86_64-none" ];
testx86_64 = mseteq x86_64 [ "x86_64-linux" "x86_64-darwin" "x86_64-freebsd" "x86_64-genode" "x86_64-redox" "x86_64-openbsd" "x86_64-netbsd" "x86_64-cygwin" "x86_64-solaris" "x86_64-windows" "x86_64-none" ];
testcygwin = mseteq cygwin [ "i686-cygwin" "x86_64-cygwin" ];
testdarwin = mseteq darwin [ "x86_64-darwin" "i686-darwin" "aarch64-darwin" "armv7a-darwin" ];
testfreebsd = mseteq freebsd [ "i686-freebsd" "x86_64-freebsd" ];
testgenode = mseteq genode [ "aarch64-genode" "x86_64-genode" ];
testgenode = mseteq genode [ "aarch64-genode" "i686-genode" "x86_64-genode" ];
testredox = mseteq redox [ "x86_64-redox" ];
testgnu = mseteq gnu (linux /* ++ kfreebsd ++ ... */);
testillumos = mseteq illumos [ "x86_64-solaris" ];
testlinux = mseteq linux [ "aarch64-linux" "armv5tel-linux" "armv6l-linux" "armv7a-linux" "armv7l-linux" "i686-linux" "mipsel-linux" "riscv32-linux" "riscv64-linux" "x86_64-linux" "powerpc64le-linux" ];
testnetbsd = mseteq netbsd [ "i686-netbsd" "x86_64-netbsd" ];
testopenbsd = mseteq openbsd [ "i686-openbsd" "x86_64-openbsd" ];
testwindows = mseteq windows [ "i686-cygwin" "x86_64-cygwin" "i686-windows" "x86_64-windows" ];
testunix = mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ cygwin);
testunix = mseteq unix (linux ++ darwin ++ freebsd ++ openbsd ++ netbsd ++ illumos ++ cygwin ++ redox);
}

View file

@ -332,4 +332,55 @@ rec {
*/
isFunction = f: builtins.isFunction f ||
(f ? __functor && isFunction (f.__functor f));
/* Convert the given positive integer to a string of its hexadecimal
representation. For example:
toHexString 0 => "0"
toHexString 16 => "10"
toHexString 250 => "FA"
*/
toHexString = i:
let
toHexDigit = d:
if d < 10
then toString d
else
{
"10" = "A";
"11" = "B";
"12" = "C";
"13" = "D";
"14" = "E";
"15" = "F";
}.${toString d};
in
lib.concatMapStrings toHexDigit (toBaseDigits 16 i);
/* `toBaseDigits base i` converts the positive integer i to a list of its
digits in the given base. For example:
toBaseDigits 10 123 => [ 1 2 3 ]
toBaseDigits 2 6 => [ 1 1 0 ]
toBaseDigits 16 250 => [ 15 10 ]
*/
toBaseDigits = base: i:
let
go = i:
if i < base
then [i]
else
let
r = i - ((i / base) * base);
q = (i - r) / base;
in
[r] ++ go q;
in
assert (base >= 2);
assert (i >= 0);
lib.reverseList (go i);
}

View file

@ -486,9 +486,15 @@ rec {
else value
) defs;
freeformType = (evalModules {
inherit modules specialArgs;
args.name = "name";
})._module.freeformType;
in
mkOptionType rec {
name = "submodule";
description = freeformType.description or name;
check = x: isAttrs x || isFunction x || path.check x;
merge = loc: defs:
(evalModules {

View file

@ -26,6 +26,13 @@
`handle == github` is strongly preferred whenever `github` is an acceptable attribute name and is short and convenient.
If `github` begins with a numeral, `handle` should be prefixed with an underscore.
```nix
_1example = {
github = "1example";
};
```
Add PGP/GPG keys only if you actually use them to sign commits and/or mail.
To get the required PGP/GPG values for a key run
@ -35,14 +42,14 @@
!!! Note that PGP/GPG values stored here are for informational purposes only, don't use this file as a source of truth.
More fields may be added in the future.
More fields may be added in the future, however, in order to comply with GDPR this file should stay as minimal as possible.
Please keep the list alphabetically sorted.
See `./scripts/check-maintainer-github-handles.sh` for an example on how to work with this data.
*/
{
"0x4A6F" = {
email = "0x4A6F@shackspace.de";
_0x4A6F = {
email = "mail-maintainer@0x4A6F.dev";
name = "Joachim Ernst";
github = "0x4A6F";
githubId = 9675338;
@ -51,7 +58,7 @@
fingerprint = "F466 A548 AD3F C1F1 8C88 4576 8702 7528 B006 D66D";
}];
};
"1000101" = {
_1000101 = {
email = "b1000101@pm.me";
github = "1000101";
githubId = 791309;
@ -313,6 +320,12 @@
githubId = 43479487;
name = "Titouan Biteau";
};
alerque = {
email = "caleb@alerque.com";
github = "alerque";
githubId = 173595;
name = "Caleb Maclennan";
};
alexarice = {
email = "alexrice999@hotmail.co.uk";
github = "alexarice";
@ -543,6 +556,12 @@
githubId = 750786;
name = "Justin Wood";
};
anna328p = {
email = "anna328p@gmail.com";
github = "anna328p";
githubId = 9790772;
name = "Anna";
};
anmonteiro = {
email = "anmonteiro@gmail.com";
github = "anmonteiro";
@ -673,6 +692,12 @@
githubId = 3965744;
name = "Arthur Lee";
};
arturcygan = {
email = "arczicygan@gmail.com";
github = "arcz";
githubId = 4679721;
name = "Artur Cygan";
};
artuuge = {
email = "artuuge@gmail.com";
github = "artuuge";
@ -815,6 +840,12 @@
githubId = 135230;
name = "Aycan iRiCAN";
};
artturin = {
email = "artturin@artturin.com";
github = "artturin";
githubId = 56650223;
name = "Artturi N";
};
b4dm4n = {
email = "fabianm88@gmail.com";
github = "B4dM4n";
@ -1109,6 +1140,12 @@
githubId = 3465841;
name = "Boris Sukholitko";
};
bouk = {
name = "Bouke van der Bijl";
email = "i@bou.ke";
github = "bouk";
githubId = 97820;
};
bradediger = {
email = "brad@bradediger.com";
github = "bradediger";
@ -1406,6 +1443,22 @@
githubId = 30435868;
name = "Okina Matara";
};
Chili-Man = {
email = "dr.elhombrechile@gmail.com";
name = "Diego Rodriguez";
github = "Chili-Man";
githubId = 631802;
keys = [{
longkeyid = "rsa4096/0xE0EBAD78F0190BD9";
fingerprint = "099E 3F97 FA08 3D47 8C75 EBEC E0EB AD78 F019 0BD9";
}];
};
chiroptical = {
email = "chiroptical@gmail.com";
github = "chiroptical";
githubId = 3086255;
name = "Barry Moore II";
};
chkno = {
email = "chuck@intelligence.org";
github = "chkno";
@ -1720,6 +1773,16 @@
githubId = 490965;
name = "Craig Swank";
};
cust0dian = {
email = "serg@effectful.software";
github = "cust0dian";
githubId = 389387;
name = "Serg Nesterov";
keys = [{
longkeyid = "rsa4096/0x1512F6EB84AECC8C";
fingerprint = "6E7D BA30 DB5D BA60 693C 3BE3 1512 F6EB 84AE CC8C";
}];
};
cwoac = {
email = "oliver@codersoffortune.net";
github = "cwoac";
@ -1828,7 +1891,7 @@
githubId = 4971975;
name = "Janne Heß";
};
"dasj19" = {
dasj19 = {
email = "daniel@serbanescu.dk";
github = "dasj19";
githubId = 7589338;
@ -1896,6 +1959,12 @@
githubId = 14032;
name = "Daniel Brockman";
};
ddelabru = {
email = "ddelabru@redhat.com";
github = "ddelabru";
githubId = 39909293;
name = "Dominic Delabruere";
};
dduan = {
email = "daniel@duan.ca";
github = "dduan";
@ -2052,12 +2121,6 @@
githubId = 1316469;
name = "Naomi Morse";
};
dkudriavtsev = {
email = "dkudriavtsev@gmail.com";
github = "dkudriavtsev";
githubId = 9790772;
name = "Dmitry Kudriavtsev";
};
dmalikov = {
email = "malikov.d.y@gmail.com";
github = "dmalikov";
@ -2130,6 +2193,16 @@
githubId = 974130;
name = "David Pätzel";
};
dpausp = {
email = "dpausp@posteo.de";
github = "dpausp";
githubId = 1965950;
name = "Tobias Stenzel";
keys = [{
longkeyid = "rsa2048/0x78C7DD40DF23FB16";
fingerprint = "4749 0887 CF3B 85A1 6355 C671 78C7 DD40 DF23 FB16";
}];
};
dpflug = {
email = "david@pflug.email";
github = "dpflug";
@ -2364,6 +2437,12 @@
githubId = 1753498;
name = "Dejan Lukan";
};
elliottvillars = {
email = "elliottvillars@gmail.com";
github = "elliottvillars";
githubId = 48104179;
name = "Elliott Villars";
};
eliasp = {
email = "mail@eliasprobst.eu";
github = "eliasp";
@ -2388,6 +2467,12 @@
githubId = 97852;
name = "Ellis Whitehead";
};
elkowar = {
email = "thereal.elkowar@gmail.com";
github = "elkowar";
githubId = 5300871;
name = "Leon Kowarschick";
};
elohmeier = {
email = "elo-nixos@nerdworks.de";
github = "elohmeier";
@ -2928,6 +3013,12 @@
githubId = 7047019;
name = "Florent Becker";
};
galagora = {
email = "lightningstrikeiv@gmail.com";
github = "galagora";
githubId = 45048741;
name = "Alwanga Oyango";
};
gamb = {
email = "adam.gamble@pm.me";
github = "gamb";
@ -2964,12 +3055,6 @@
githubId = 313929;
name = "Gabriel Ebner";
};
geistesk = {
email = "post@0x21.biz";
github = "geistesk";
githubId = 8402811;
name = "Alvar Penning";
};
genesis = {
email = "ronan@aimao.org";
github = "bignaux";
@ -3058,6 +3143,12 @@
githubId = 25820499;
name = "Roman Kretschmer";
};
goertzenator = {
email = "daniel.goertzen@gmail.com";
github = "goertzenator";
githubId = 605072;
name = "Daniel Goertzen";
};
goibhniu = {
email = "cillian.deroiste@gmail.com";
github = "cillianderoiste";
@ -3146,6 +3237,12 @@
githubId = 6768842;
name = "Joris Guyonvarch";
};
gvolpe = {
email = "volpegabriel@gmail.com";
github = "gvolpe";
githubId = 443978;
name = "Gabriel Volpe";
};
hakuch = {
email = "hakuch@gmail.com";
github = "hakuch";
@ -3169,6 +3266,10 @@
github = "haozeke";
githubId = 4336207;
name = "Rohit Goswami";
keys = [{
longkeyid = "rsa4096/0x9CCCE36402CB49A6";
fingerprint = "74B1 F67D 8E43 A94A 7554 0768 9CCC E364 02CB 49A6";
}];
};
haslersn = {
email = "haslersn@fius.informatik.uni-stuttgart.de";
@ -3864,6 +3965,12 @@
githubId = 8735102;
name = "John Ramsden";
};
johntitor = {
email = "huyuumi.dev@gmail.com";
github = "JohnTitor";
githubId = 25030997;
name = "Yuki Okushi";
};
jojosch = {
name = "Johannes Schleifenbaum";
email = "johannes@js-webcoding.de";
@ -4083,6 +4190,12 @@
githubId = 87115;
name = "Wael Nasreddine";
};
kalekseev = {
email = "mail@kalekseev.com";
github = "kalekseev";
githubId = 367259;
name = "Konstantin Alekseev";
};
kamadorueda = {
name = "Kevin Amado";
email = "kamadorueda@gmail.com";
@ -4111,6 +4224,12 @@
github = "karantan";
githubId = 7062631;
};
KarlJoad = {
email = "karl@hallsby.com";
github = "KarlJoad";
githubId = 34152449;
name = "Karl Hallsby";
};
karolchmist = {
email = "info+nix@chmist.com";
name = "karolchmist";
@ -4314,6 +4433,12 @@
githubId = 524268;
name = "Koral";
};
koslambrou = {
email = "koslambrou@gmail.com";
github = "koslambrou";
githubId = 2037002;
name = "Konstantinos";
};
kovirobi = {
email = "kovirobi@gmail.com";
github = "kovirobi";
@ -4565,6 +4690,12 @@
fingerprint = "7FE2 113A A08B 695A C8B8 DDE6 AE53 B4C2 E58E DD45";
}];
};
lf- = {
email = "nix-maint@lfcode.ca";
github = "lf-";
githubId = 6652840;
name = "Jade";
};
lheckemann = {
email = "git@sphalerite.org";
github = "lheckemann";
@ -4705,6 +4836,12 @@
githubId = 1202012;
name = "Ignat Loskutov";
};
louisdk1 = {
email = "louis@louis.dk";
github = "louisdk1";
githubId = 4969294;
name = "Louis Tim Larsen";
};
lovek323 = {
email = "jason@oconal.id.au";
github = "lovek323";
@ -4757,6 +4894,12 @@
githubId = 59375051;
name = "Lucas Ransan";
};
lucperkins = {
email = "lucperkins@gmail.com";
github = "lucperkins";
githubId = 1523104;
name = "Luc Perkins";
};
lucus16 = {
email = "lars.jellema@gmail.com";
github = "Lucus16";
@ -5123,6 +5266,12 @@
githubId = 13689192;
name = "Nguyn Gia Phong";
};
mcwitt = {
email = "mcwitt@gmail.com";
github = "mcwitt";
githubId = 319411;
name = "Matt Wittmann";
};
mdaiter = {
email = "mdaiter8121@gmail.com";
github = "mdaiter";
@ -5197,6 +5346,12 @@
github = "metadark";
githubId = 382041;
};
meutraa = {
email = "paul+nixpkgs@lost.host";
name = "Paul Meredith";
github = "meutraa";
githubId = 68550871;
};
mfossen = {
email = "msfossen@gmail.com";
github = "mfossen";
@ -5814,6 +5969,12 @@
githubId = 1224006;
name = "Roberto Abdelkader Martínez Pérez";
};
nilsirl = {
email = "nils@nilsand.re";
github = "NilsIrl";
githubId = 26231126;
name = "Nils ANDRÉ-CHANG";
};
ninjatrappeur = {
email = "felix@alternativebit.fr";
github = "ninjatrappeur";
@ -6070,6 +6231,16 @@
fingerprint = "514B B966 B46E 3565 0508 86E8 0E6C A66E 5C55 7AA8";
}];
};
oxzi = {
email = "post@0x21.biz";
github = "oxzi";
githubId = 8402811;
name = "Alvar Penning";
keys = [{
longkeyid = "rsa4096/0xF32A45637FA25E31";
fingerprint = "EB14 4E67 E57D 27E2 B5A4 CD8C F32A 4563 7FA2 5E31";
}];
};
oyren = {
email = "m.scheuren@oyra.eu";
github = "oyren";
@ -6142,6 +6313,12 @@
githubId = 20792;
name = "Sebastian Galkin";
};
parasrah = {
email = "nixos@parasrah.com";
github = "parasrah";
githubId = 14935550;
name = "Brad Pfannmuller";
};
pashashocky = {
email = "pashashocky@gmail.com";
github = "pashashocky";
@ -6316,6 +6493,12 @@
githubId = 119460;
name = "Perry Barnoy";
};
pjjw = {
email = "peter@shortbus.org";
github = "pjjw";
githubId = 638;
name = "Peter Woodman";
};
pjones = {
email = "pjones@devalot.com";
github = "pjones";
@ -6466,6 +6649,12 @@
fingerprint = "86E6 792F C27B FD47 8860 C110 91F3 B339 B9A0 2A3D";
}];
};
psanford = {
email = "psanford@sanford.io";
github = "psanford";
githubId = 33375;
name = "Peter Sanford";
};
pshendry = {
email = "paul@pshendry.com";
github = "pshendry";
@ -6804,6 +6993,12 @@
githubId = 2507744;
name = "Roland Koebler";
};
rizary = {
email = "andika@numtide.com";
github = "Rizary";
githubId = 7221768;
name = "Andika Demas Riyandi";
};
rkrzr = {
email = "ops+nixpkgs@channable.com";
github = "rkrzr";
@ -7074,6 +7269,16 @@
githubId = 132835;
name = "Samuel Dionne-Riel";
};
samuelgrf = {
email = "git@samuelgrf.com";
github = "samuelgrf";
githubId = 67663538;
name = "Samuel Gräfenstein";
keys = [{
longkeyid = "rsa4096/0xEF76A063F15C63C8";
fingerprint = "FF24 5832 8FAF 4660 18C6 186E EF76 A063 F15C 63C8";
}];
};
samuelrivas = {
email = "samuelrivas@gmail.com";
github = "samuelrivas";
@ -7398,6 +7603,12 @@
githubId = 2770647;
name = "Simon Vandel Sillesen";
};
siraben = {
email = "bensiraphob@gmail.com";
github = "siraben";
githubId = 8219659;
name = "Siraphob Phipathananunth";
};
siriobalmelli = {
email = "sirio@b-ad.ch";
github = "siriobalmelli";
@ -7768,6 +7979,12 @@
githubId = 332289;
name = "Rafał Łasocha";
};
syberant = {
email = "sybrand@neuralcoding.com";
github = "syberant";
githubId = 20063502;
name = "Sybrand Aarnoutse";
};
symphorien = {
email = "symphorien_nixpkgs@xlumurb.eu";
github = "symphorien";
@ -7940,6 +8157,12 @@
githubId = 26417242;
name = "Mikolaj Galkowski";
};
TethysSvensson = {
email = "freaken@freaken.dk";
github = "TethysSvensson";
githubId = 4294434;
name = "Tethys Svensson";
};
teto = {
email = "mcoudron@hotmail.com";
github = "teto";
@ -8006,7 +8229,7 @@
githubId = 8547242;
name = "Stefan Rohrbacher";
};
"thelegy" = {
thelegy = {
email = "mail+nixos@0jb.de";
github = "thelegy";
githubId = 3105057;
@ -8224,6 +8447,12 @@
githubId = 207457;
name = "Matthieu Chevrier";
};
trepetti = {
email = "trepetti@cs.columbia.edu";
github = "trepetti";
githubId = 25440339;
name = "Tom Repetti";
};
trevorj = {
email = "nix@trevor.joynson.io";
github = "akatrevorjay";
@ -8551,6 +8780,14 @@
githubId = 13259982;
name = "Vanessa McHale";
};
voidless = {
email = "julius.schmitt@yahoo.de";
github = "voidIess";
githubId = 45292658;
name = "Julius Schmitt";
};
volhovm = {
email = "volhovm.cs@gmail.com";
github = "volhovm";
@ -8947,6 +9184,16 @@
email = "zef@zef.me";
name = "Zef Hemel";
};
zeratax = {
email = "mail@zera.tax";
github = "ZerataX";
githubId = 5024958;
name = "Jona Abdinghoff";
keys = [{
longkeyid = "rsa4096/0x8333735E784DF9D4";
fingerprint = "44F7 B797 9D3A 27B1 89E0 841E 8333 735E 784D F9D4";
}];
};
zfnmxt = {
name = "zfnmxt";
email = "zfnmxt@zfnmxt.com";
@ -9139,4 +9386,10 @@
github = "deifactor";
githubId = 30192992;
};
fzakaria = {
name = "Farid Zakaria";
email = "farid.m.zakaria@gmail.com";
github = "fzakaria";
githubId = 605070;
};
}

View file

@ -79,5 +79,4 @@ say,,,,,
std__debug,std._debug,,,,
std_normalize,std.normalize,,,,
stdlib,,,,,vyp
pulseaudio,,,,,doronbehar
vstruct,,,,,

1 # nix name luarocks name server version luaversion maintainers
79 std__debug std._debug
80 std_normalize std.normalize
81 stdlib vyp
pulseaudio doronbehar
82 vstruct

View file

@ -124,4 +124,3 @@ if [ -n "$optPrint" ]; then
echo
cat "$newlist"
fi

View file

@ -59,6 +59,16 @@ with lib.maintainers; {
scope = "Maintain GNOME desktop environment and platform.";
};
jitsi = {
members = [
mmilata
petabyteboy
prusnak
ryantm
];
scope = "Maintain Jitsi.";
};
matrix = {
members = [
ma27

View file

@ -52,7 +52,7 @@
<para>
The proper installation of OpenCL drivers can be verified through
the <command>clinfo</command> command of the <package>clinfo</package>
package. This command will report the number of hardware devides
package. This command will report the number of hardware devices
that is found and give detailed information for each device:
</para>
@ -100,5 +100,182 @@ ROCR_EXT_DIR=`nix-build '&lt;nixpkgs&gt;' --no-out-link -A rocm-runtime-ext`/lib
Image support Yes</screen>
</para>
</section>
<section xml:id="sec-gpu-accel-opencl-intel">
<title>Intel</title>
<para>
<link
xlink:href="https://en.wikipedia.org/wiki/List_of_Intel_graphics_processing_units#Gen8">Intel
Gen8 and later GPUs</link> are supported by the Intel NEO OpenCL
runtime that is provided by the
<package>intel-compute-runtime</package> package. For Gen7 GPUs,
the deprecated Beignet runtime can be used, which is provided
by the <package>beignet</package> package. The proprietary Intel
OpenCL runtime, in the <package>intel-ocl</package> package, is
an alternative for Gen7 GPUs.
</para>
<para>
The <package>intel-compute-runtime</package>, <package>beignet</package>,
or <package>intel-ocl</package> package can be added to
<xref linkend="opt-hardware.opengl.extraPackages"/> to enable OpenCL
support. For example, for Gen8 and later GPUs, the following
configuration can be used:
<programlisting><xref linkend="opt-hardware.opengl.extraPackages"/> = [
intel-compute-runtime
];</programlisting>
</para>
</section>
</section>
<section xml:id="sec-gpu-accel-vulkan">
<title>Vulkan</title>
<para>
<link xlink:href="https://en.wikipedia.org/wiki/Vulkan_(API)">Vulkan</link> is a
graphics and compute API for GPUs. It is used directly by games or indirectly though
compatibility layers like <link xlink:href="https://github.com/doitsujin/dxvk/wiki">DXVK</link>.
</para>
<para>
By default, if <xref linkend="opt-hardware.opengl.driSupport"/> is enabled,
<package>mesa</package> is installed and provides Vulkan for supported hardware.
</para>
<para>
Similar to OpenCL, Vulkan drivers are loaded through the <emphasis>Installable Client
Driver</emphasis> (ICD) mechanism. ICD files for Vulkan are JSON files that specify
the path to the driver library and the supported Vulkan version. All successfully
loaded drivers are exposed to the application as different GPUs.
In NixOS, there are two ways to make ICD files visible to Vulkan applications: an
environment variable and a module option.
</para>
<para>
The first option is through the <varname>VK_ICD_FILENAMES</varname>
environment variable. This variable can contain multiple JSON files, separated by
<literal>:</literal>. For example:
<screen><prompt>$</prompt> export \
VK_ICD_FILENAMES=`nix-build '&lt;nixpkgs&gt;' --no-out-link -A amdvlk`/share/vulkan/icd.d/amd_icd64.json</screen>
</para>
<para>
The second mechanism is to add the Vulkan driver package to
<xref linkend="opt-hardware.opengl.extraPackages"/>. This links the
ICD file under <filename>/run/opengl-driver</filename>, where it will
be visible to the ICD loader.
</para>
<para>
The proper installation of Vulkan drivers can be verified through
the <command>vulkaninfo</command> command of the <package>vulkan-tools</package>
package. This command will report the hardware devices and drivers found,
in this example output amdvlk and radv:
</para>
<screen><prompt>$</prompt> vulkaninfo | grep GPU
GPU id : 0 (Unknown AMD GPU)
GPU id : 1 (AMD RADV NAVI10 (LLVM 9.0.1))
...
GPU0:
deviceType = PHYSICAL_DEVICE_TYPE_DISCRETE_GPU
deviceName = Unknown AMD GPU
GPU1:
deviceType = PHYSICAL_DEVICE_TYPE_DISCRETE_GPU</screen>
<para>
A simple graphical application that uses Vulkan is <command>vkcube</command>
from the <package>vulkan-tools</package> package.
</para>
<section xml:id="sec-gpu-accel-vulkan-amd">
<title>AMD</title>
<para>
Modern AMD <link
xlink:href="https://en.wikipedia.org/wiki/Graphics_Core_Next">Graphics
Core Next</link> (GCN) GPUs are supported through either radv, which is
part of <package>mesa</package>, or the <package>amdvlk</package> package.
Adding the <package>amdvlk</package> package to
<xref linkend="opt-hardware.opengl.extraPackages"/> makes both drivers
available for applications and lets them choose. A specific driver can
be forced as follows:
<programlisting><xref linkend="opt-hardware.opengl.extraPackages"/> = [
<package>amdvlk</package>
];
# For amdvlk
<xref linkend="opt-environment.variables"/>.VK_ICD_FILENAMES =
"/run/opengl-driver/share/vulkan/icd.d/amd_icd64.json";
# For radv
<xref linkend="opt-environment.variables"/>.VK_ICD_FILENAMES =
"/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
</programlisting>
</para>
</section>
</section>
<section xml:id="sec-gpu-accel-common-issues">
<title>Common issues</title>
<section xml:id="sec-gpu-accel-common-issues-permissions">
<title>User permissions</title>
<para>
Except where noted explicitly, it should not be necessary to
adjust user permissions to use these acceleration APIs. In the default
configuration, GPU devices have world-read/write permissions
(<filename>/dev/dri/renderD*</filename>) or are tagged as
<code>uaccess</code> (<filename>/dev/dri/card*</filename>). The
access control lists of devices with the <varname>uaccess</varname>
tag will be updated automatically when a user logs in through
<command>systemd-logind</command>. For example, if the user
<emphasis>jane</emphasis> is logged in, the access control list
should look as follows:
<screen><prompt>$</prompt> getfacl /dev/dri/card0
# file: dev/dri/card0
# owner: root
# group: video
user::rw-
user:jane:rw-
group::rw-
mask::rw-
other::---</screen>
If you disabled (this functionality of) <command>systemd-logind</command>,
you may need to add the user to the <code>video</code> group and
log in again.
</para>
</section>
<section xml:id="sec-gpu-accel-common-issues-mixing-nixpkgs">
<title>Mixing different versions of nixpkgs</title>
<para>
The <emphasis>Installable Client Driver</emphasis> (ICD)
mechanism used by OpenCL and Vulkan loads runtimes into its address
space using <code>dlopen</code>. Mixing an ICD loader mechanism and
runtimes from different version of nixpkgs may not work. For example,
if the ICD loader uses an older version of <package>glibc</package>
than the runtime, the runtime may not be loadable due to
missing symbols. Unfortunately, the loader will generally be quiet
about such issues.
</para>
<para>
If you suspect that you are running into library version mismatches
between an ICL loader and a runtime, you could run an application with
the <code>LD_DEBUG</code> variable set to get more diagnostic
information. For example, OpenCL can be tested with
<code>LD_DEBUG=files clinfo</code>, which should report missing
symbols.
</para>
</section>
</section>
</chapter>

View file

@ -0,0 +1,68 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-freeform-modules">
<title>Freeform modules</title>
<para>
Freeform modules allow you to define values for option paths that have not been declared explicitly. This can be used to add attribute-specific types to what would otherwise have to be <literal>attrsOf</literal> options in order to accept all attribute names.
</para>
<para>
This feature can be enabled by using the attribute <literal>freeformType</literal> to define a freeform type. By doing this, all assignments without an associated option will be merged using the freeform type and combined into the resulting <literal>config</literal> set. Since this feature nullifies name checking for entire option trees, it is only recommended for use in submodules.
</para>
<example xml:id="ex-freeform-module">
<title>Freeform submodule</title>
<para>
The following shows a submodule assigning a freeform type that allows arbitrary attributes with <literal>str</literal> values below <literal>settings</literal>, but also declares an option for the <literal>settings.port</literal> attribute to have it type-checked and assign a default value. See <xref linkend="ex-settings-typed-attrs"/> for a more complete example.
</para>
<programlisting>
{ lib, config, ... }: {
options.settings = lib.mkOption {
type = lib.types.submodule {
freeformType = with lib.types; attrsOf str;
# We want this attribute to be checked for the correct type
options.port = lib.mkOption {
type = lib.types.port;
# Declaring the option also allows defining a default value
default = 8080;
};
};
};
}
</programlisting>
<para>
And the following shows what such a module then allows
</para>
<programlisting>
{
# Not a declared option, but the freeform type allows this
settings.logLevel = "debug";
# Not allowed because the the freeform type only allows strings
# settings.enable = true;
# Allowed because there is a port option declared
settings.port = 80;
# Not allowed because the port option doesn't allow strings
# settings.port = "443";
}
</programlisting>
</example>
<note>
<para>
Freeform attributes cannot depend on other attributes of the same set without infinite recursion:
<programlisting>
{
# This throws infinite recursion encountered
settings.logLevel = lib.mkIf (config.settings.port == 80) "debug";
}
</programlisting>
To prevent this, declare options for all attributes that need to depend on others. For above example this means to declare <literal>logLevel</literal> to be an option.
</para>
</note>
</section>

View file

@ -0,0 +1,216 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-settings-options">
<title>Options for Program Settings</title>
<para>
Many programs have configuration files where program-specific settings can be declared. File formats can be separated into two categories:
<itemizedlist>
<listitem>
<para>
Nix-representable ones: These can trivially be mapped to a subset of Nix syntax. E.g. JSON is an example, since its values like <literal>{"foo":{"bar":10}}</literal> can be mapped directly to Nix: <literal>{ foo = { bar = 10; }; }</literal>. Other examples are INI, YAML and TOML. The following section explains the convention for these settings.
</para>
</listitem>
<listitem>
<para>
Non-nix-representable ones: These can't be trivially mapped to a subset of Nix syntax. Most generic programming languages are in this group, e.g. bash, since the statement <literal>if true; then echo hi; fi</literal> doesn't have a trivial representation in Nix.
</para>
<para>
Currently there are no fixed conventions for these, but it is common to have a <literal>configFile</literal> option for setting the configuration file path directly. The default value of <literal>configFile</literal> can be an auto-generated file, with convenient options for controlling the contents. For example an option of type <literal>attrsOf str</literal> can be used for representing environment variables which generates a section like <literal>export FOO="foo"</literal>. Often it can also be useful to also include an <literal>extraConfig</literal> option of type <literal>lines</literal> to allow arbitrary text after the autogenerated part of the file.
</para>
</listitem>
</itemizedlist>
</para>
<section xml:id="sec-settings-nix-representable">
<title>Nix-representable Formats (JSON, YAML, TOML, INI, ...)</title>
<para>
By convention, formats like this are handled with a generic <literal>settings</literal> option, representing the full program configuration as a Nix value. The type of this option should represent the format. The most common formats have a predefined type and string generator already declared under <literal>pkgs.formats</literal>:
<variablelist>
<varlistentry>
<term>
<varname>pkgs.formats.json</varname> { }
</term>
<listitem>
<para>
A function taking an empty attribute set (for future extensibility) and returning a set with JSON-specific attributes <varname>type</varname> and <varname>generate</varname> as specified <link linkend='pkgs-formats-result'>below</link>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>pkgs.formats.yaml</varname> { }
</term>
<listitem>
<para>
A function taking an empty attribute set (for future extensibility) and returning a set with YAML-specific attributes <varname>type</varname> and <varname>generate</varname> as specified <link linkend='pkgs-formats-result'>below</link>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>pkgs.formats.ini</varname> { <replaceable>listsAsDuplicateKeys</replaceable> ? false, ... }
</term>
<listitem>
<para>
A function taking an attribute set with values
<variablelist>
<varlistentry>
<term>
<varname>listsAsDuplicateKeys</varname>
</term>
<listitem>
<para>
A boolean for controlling whether list values can be used to represent duplicate INI keys
</para>
</listitem>
</varlistentry>
</variablelist>
It returns a set with INI-specific attributes <varname>type</varname> and <varname>generate</varname> as specified <link linkend='pkgs-formats-result'>below</link>.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>pkgs.formats.toml</varname> { }
</term>
<listitem>
<para>
A function taking an empty attribute set (for future extensibility) and returning a set with TOML-specific attributes <varname>type</varname> and <varname>generate</varname> as specified <link linkend='pkgs-formats-result'>below</link>.
</para>
</listitem>
</varlistentry>
</variablelist>
</para>
<para xml:id="pkgs-formats-result">
These functions all return an attribute set with these values:
<variablelist>
<varlistentry>
<term>
<varname>type</varname>
</term>
<listitem>
<para>
A module system type representing a value of the format
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
<varname>generate</varname> <replaceable>filename</replaceable> <replaceable>jsonValue</replaceable>
</term>
<listitem>
<para>
A function that can render a value of the format to a file. Returns a file path.
<note>
<para>
This function puts the value contents in the Nix store. So this should be avoided for secrets.
</para>
</note>
</para>
</listitem>
</varlistentry>
</variablelist>
</para>
<example xml:id="ex-settings-nix-representable">
<title>Module with conventional <literal>settings</literal> option</title>
<para>
The following shows a module for an example program that uses a JSON configuration file. It demonstrates how above values can be used, along with some other related best practices. See the comments for explanations.
</para>
<programlisting>
{ options, config, lib, pkgs, ... }:
let
cfg = config.services.foo;
# Define the settings format used for this program
settingsFormat = pkgs.formats.json {};
in {
options.services.foo = {
enable = lib.mkEnableOption "foo service";
settings = lib.mkOption {
# Setting this type allows for correct merging behavior
type = settingsFormat.type;
default = {};
description = ''
Configuration for foo, see
&lt;link xlink:href="https://example.com/docs/foo"/&gt;
for supported settings.
'';
};
};
config = lib.mkIf cfg.enable {
# We can assign some default settings here to make the service work by just
# enabling it. We use `mkDefault` for values that can be changed without
# problems
services.foo.settings = {
# Fails at runtime without any value set
log_level = lib.mkDefault "WARN";
# We assume systemd's `StateDirectory` is used, so we require this value,
# therefore no mkDefault
data_path = "/var/lib/foo";
# Since we use this to create a user we need to know the default value at
# eval time
user = lib.mkDefault "foo";
};
environment.etc."foo.json".source =
# The formats generator function takes a filename and the Nix value
# representing the format value and produces a filepath with that value
# rendered in the format
settingsFormat.generate "foo-config.json" cfg.settings;
# We know that the `user` attribute exists because we set a default value
# for it above, allowing us to use it without worries here
users.users.${cfg.settings.user} = {};
# ...
};
}
</programlisting>
</example>
<section xml:id="sec-settings-attrs-options">
<title>Option declarations for attributes</title>
<para>
Some <literal>settings</literal> attributes may deserve some extra care. They may need a different type, default or merging behavior, or they are essential options that should show their documentation in the manual. This can be done using <xref linkend='sec-freeform-modules'/>.
<example xml:id="ex-settings-typed-attrs">
<title>Declaring a type-checked <literal>settings</literal> attribute</title>
<para>
We extend above example using freeform modules to declare an option for the port, which will enforce it to be a valid integer and make it show up in the manual.
</para>
<programlisting>
settings = lib.mkOption {
type = lib.types.submodule {
freeformType = settingsFormat.type;
# Declare an option for the port such that the type is checked and this option
# is shown in the manual.
options.port = lib.mkOption {
type = lib.types.port;
default = 8080;
description = ''
Which port this service should listen on.
'';
};
};
default = {};
description = ''
Configuration for Foo, see
&lt;link xlink:href="https://example.com/docs/foo"/&gt;
for supported values.
'';
};
</programlisting>
</example>
</para>
</section>
</section>
</section>

View file

@ -183,4 +183,6 @@ in {
<xi:include href="meta-attributes.xml" />
<xi:include href="importing-modules.xml" />
<xi:include href="replace-modules.xml" />
<xi:include href="freeform-modules.xml" />
<xi:include href="settings-options.xml" />
</chapter>

View file

@ -216,12 +216,12 @@ start_all()
</varlistentry>
<varlistentry>
<term>
<methodname>send_keys</methodname>
<methodname>send_key</methodname>
</term>
<listitem>
<para>
Simulate pressing keys on the virtual keyboard, e.g.,
<literal>send_keys("ctrl-alt-delete")</literal>.
<literal>send_key("ctrl-alt-delete")</literal>.
</para>
</listitem>
</varlistentry>
@ -232,7 +232,7 @@ start_all()
<listitem>
<para>
Simulate typing a sequence of characters on the virtual keyboard, e.g.,
<literal>send_keys("foobar\n")</literal> will type the string
<literal>send_chars("foobar\n")</literal> will type the string
<literal>foobar</literal> followed by the Enter key.
</para>
</listitem>

View file

@ -16,7 +16,7 @@
</para>
<programlisting>
nix-build -A netboot nixos/release.nix
nix-build -A netboot.x86_64-linux nixos/release.nix
</programlisting>
<para>

View file

@ -42,7 +42,7 @@
</para>
<para>
If the text is too small to be legible, try <command>setfont ter-132n</command>
If the text is too small to be legible, try <command>setfont ter-v32n</command>
to increase the font size.
</para>

View file

@ -42,6 +42,11 @@
PHP now defaults to PHP 7.4, updated from 7.3.
</para>
</listitem>
<listitem>
<para>
PHP 7.2 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 20.09 release.
</para>
</listitem>
<listitem>
<para>
Python 3 now defaults to Python 3.8 instead of 3.7.
@ -109,6 +114,17 @@ systemd.services.mysql.serviceConfig.ProtectHome = lib.mkForce "read-only";
systemd.services.mysql.serviceConfig.ReadWritePaths = [ "/var/data" ];
</programlisting>
</para>
<para>
The MySQL service no longer runs its <literal>systemd</literal> service startup script as <literal>root</literal> anymore. A dedicated non <literal>root</literal>
super user account is required for operation. This means users with an existing MySQL or MariaDB database server are required to run the following SQL statements
as a super admin user before upgrading:
<programlisting>
CREATE USER IF NOT EXISTS 'mysql'@'localhost' identified with unix_socket;
GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
</programlisting>
If you use MySQL instead of MariaDB please replace <literal>unix_socket</literal> with <literal>auth_socket</literal>. If you have changed the value of <xref linkend="opt-services.mysql.user"/>
from the default of <literal>mysql</literal> to a different user please change <literal>'mysql'@'localhost'</literal> to the corresponding user instead.
</para>
</listitem>
<listitem>
<para>
@ -130,6 +146,16 @@ systemd.services.mysql.serviceConfig.ReadWritePaths = [ "/var/data" ];
This will make container tools like Podman work as non-root users out of the box.
</para>
</listitem>
<listitem>
<para>
The various documented workarounds to use steam have been converted to a module. <varname>programs.steam.enable</varname> enables steam, controller support and the workarounds.
</para>
</listitem>
<listitem>
<para>
Support for built-in LCDs in various pieces of Logitech hardware (keyboards and USB speakers). <varname>hardware.logitech.lcd.enable</varname> enables support for all hardware supported by the g15daemon project.
</para>
</listitem>
</itemizedlist>
</section>
@ -176,9 +202,7 @@ systemd.services.mysql.serviceConfig.ReadWritePaths = [ "/var/data" ];
likekly to break with future versions of go. As a result
<literal>buildGoModule</literal> switched from
<literal>modSha256</literal> to the <literal>vendorSha256</literal>
attribute to pin fetched version data. <literal>buildGoModule</literal>
still accepts <literal>modSha256</literal> with a warning, but support will
be removed in the next release.
attribute to pin fetched version data.
</para>
</listitem>
<listitem>
@ -515,6 +539,46 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
In the <literal>resilio</literal> module, <xref linkend="opt-services.resilio.httpListenAddr"/> has been changed to listen to <literal>[::1]</literal> instead of <literal>0.0.0.0</literal>.
</para>
</listitem>
<listitem>
<para>
Users of <link xlink:href="http://openafs.org">OpenAFS 1.6</link> must
upgrade their services to OpenAFS 1.8! In this release, the OpenAFS package
version 1.6.24 is marked broken but can be used during transition to
OpenAFS 1.8.x. Use the options
<option>services.openafsClient.packages.module</option>,
<option>services.openafsClient.packages.programs</option> and
<option>services.openafsServer.package</option> to select a different
OpenAFS package. OpenAFS 1.6 will be removed in the next release. The
package <literal>openafs</literal> and the service options will then
silently point to the OpenAFS 1.8 release.
</para>
<para>
See also the OpenAFS <link
xlink:href="http://docs.openafs.org/AdminGuide/index.html">Administrator
Guide</link> for instructions. Beware of the following when updating
servers:
<itemizedlist>
<listitem>
<para>
The storage format of the server key has changed and the key must be converted before running the new release.
</para>
</listitem>
<listitem>
<para>
When updating multiple database servers, turn off the database servers
from the highest IP down to the lowest with resting periods in
between. Start up in reverse order. Do not concurrently run database
servers working with different OpenAFS releases!
</para>
</listitem>
<listitem>
<para>
Update servers first, then clients.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Radicale's default package has changed from 2.x to 3.x. An upgrade
@ -525,12 +589,106 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
automatically if <literal>stateVersion</literal> is 20.09 or higher.
</para>
</listitem>
<listitem>
<para>
<literal>udpt</literal> experienced a complete rewrite from C++ to rust. The configuration format changed from ini to toml.
The new configuration documentation can be found at
<link xlink:href="https://naim94a.github.io/udpt/config.html">the official website</link> and example
configuration is packaged in <literal>${udpt}/share/udpt/udpt.toml</literal>.
</para>
</listitem>
<listitem>
<para>
We now have a unified <xref linkend="opt-services.xserver.displayManager.autoLogin"/> option interface
to be used for every display-manager in NixOS.
</para>
</listitem>
<listitem>
<para>
The <literal>bitcoind</literal> module has changed to multi-instance, using submodules.
Therefore, it is now mandatory to name each instance.
To use this new multi-instance config with an existing bitcoind data directory and user,
you have to adjust the original config, e.g.:
<programlisting>
services.bitcoind = {
enable = true;
extraConfig = "...";
...
};
</programlisting>
To something similar:
<programlisting>
services.bitcoind.mainnet = {
enable = true;
dataDir = "/var/lib/bitcoind";
user = "bitcoin";
extraConfig = "...";
...
};
</programlisting>
The key settings are:
<itemizedlist>
<listitem>
<para>
<literal>dataDir</literal> - to continue using the same data directory.
</para>
</listitem>
<listitem>
<para>
<literal>user</literal> - to continue using the same user so that bitcoind maintains access to its files.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Graylog introduced a change in the LDAP server certificate validation behaviour for version 3.3.3 which might break existing setups.
When updating Graylog from a version before 3.3.3 make sure to check the Graylog <link xlink:href="https://www.graylog.org/post/announcing-graylog-v3-3-3">release info</link> for information on how to avoid the issue.
</para>
</listitem>
<listitem>
<para>
The <literal>dokuwiki</literal> module has changed to multi-instance, using submodules.
Therefore, it is now mandatory to name each instance. Moreover, forcing SSL by default has been dropped, so
<literal>nginx.forceSSL</literal> and <literal>nginx.enableACME</literal> are no longer set to <literal>true</literal>.
To continue using your service with the original SSL settings, you have to adjust the original config, e.g.:
<programlisting>
services.dokuwiki = {
enable = true;
...
};
</programlisting>
To something similar:
<programlisting>
services.dokuwiki."mywiki" = {
enable = true;
nginx = {
forceSSL = true;
enableACME = true;
};
...
};
</programlisting>
The base package has also been upgraded to the 2020-07-29 "Hogfather" release. Plugins might be incompatible or require upgrading.
</para>
</listitem>
<listitem>
<para>
The <xref linkend="opt-services.postgresql.dataDir"/> option is now set to <literal>"/var/lib/postgresql/${cfg.package.psqlSchema}"</literal> regardless of your
<xref linkend="opt-system.stateVersion"/>. Users with an existing postgresql install that have a <xref linkend="opt-system.stateVersion"/> of <literal>17.09</literal> or below
should double check what the value of their <xref linkend="opt-services.postgresql.dataDir"/> option is (<literal>/var/db/postgresql</literal>) and then explicitly
set this value to maintain compatibility:
<programlisting>
services.postgresql.dataDir = "/var/db/postgresql";
</programlisting>
</para>
</listitem>
<listitem>
<para>
The USBGuard module now removes options and instead hardcodes values for <literal>IPCAccessControlFiles</literal>, <literal>ruleFiles</literal>, and <literal>auditFilePath</literal>. Audit logs can be found in the journal.
</para>
</listitem>
</itemizedlist>
</section>
@ -566,6 +724,11 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
The default output of <literal>buildGoPackage</literal> is now <literal>$out</literal> instead of <literal>$bin</literal>.
</para>
</listitem>
<listitem>
<para>
<literal>buildGoModule</literal> <literal>doCheck</literal> now defaults to <literal>true</literal>.
</para>
</listitem>
<listitem>
<para>
Packages built using <literal>buildRustPackage</literal> now use <literal>release</literal>
@ -623,6 +786,37 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
was removed, as udev gained native support to handle FIDO security tokens.
</para>
</listitem>
<listitem>
<para>
The <literal>services.transmission</literal> module
was enhanced with the new options:
<xref linkend="opt-services.transmission.credentialsFile"/>,
<xref linkend="opt-services.transmission.openFirewall"/>,
and <xref linkend="opt-services.transmission.performanceNetParameters"/>.
</para>
<para>
<literal>transmission-daemon</literal> is now started with additional systemd sandbox/hardening options for better security.
Please <link xlink:href="https://github.com/NixOS/nixpkgs/issues">report</link>
any use case where this is not working well.
In particular, the <literal>RootDirectory</literal> option newly set
forbids uploading or downloading a torrent outside of the default directory
configured at <link linkend="opt-services.transmission.settings">settings.download-dir</link>.
If you really need Transmission to access other directories,
you must include those directories into the <literal>BindPaths</literal> of the service:
<programlisting>
systemd.services.transmission.serviceConfig.BindPaths = [ "/path/to/alternative/download-dir" ];
</programlisting>
</para>
<para>
Also, connection to the RPC (Remote Procedure Call) of <literal>transmission-daemon</literal>
is now only available on the local network interface by default.
Use:
<programlisting>
services.transmission.settings.rpc-bind-address = "0.0.0.0";
</programlisting>
to get the previous behavior of listening on all network interfaces.
</para>
</listitem>
<listitem>
<para>
With this release <literal>systemd-networkd</literal> (when enabled through <xref linkend="opt-networking.useNetworkd"/>)
@ -705,6 +899,17 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
There are no functional changes, however this may require updating some configurations to use correct types for all attributes.
</para>
</listitem>
<listitem>
<para>
The <literal>fontconfig</literal> module stopped generating fontconfig 2.10.x config and cache.
Fontconfig 2.10.x was removed from Nixpkgs - it hasn't been used in any nixpkgs package anymore.
</para>
</listitem>
<listitem>
<para>
The packages <package>perl</package>, <package>rsync</package> and <package>strace</package> were removed from <option>systemPackages</option>. If you need them, install them again with <code><xref linkend="opt-environment.systemPackages"/> = with pkgs; [ perl rsync strace ];</code> in your <filename>configuration.nix</filename>.
</para>
</listitem>
</itemizedlist>
</section>
</section>

View file

@ -5,21 +5,32 @@
config
, # The size of the disk, in megabytes.
diskSize
# if "auto" size is calculated based on the contents copied to it and
# additionalSpace is taken into account.
diskSize ? "auto"
# The files and directories to be placed in the target file system.
, # additional disk space to be added to the image if diskSize "auto"
# is used
additionalSpace ? "512M"
, # size of the boot partition, is only used if partitionTableType is
# either "efi" or "hybrid"
bootSize ? "256M"
, # The files and directories to be placed in the target file system.
# This is a list of attribute sets {source, target} where `source'
# is the file system object (regular file or directory) to be
# grafted in the file system at path `target'.
, contents ? []
contents ? []
, # Type of partition table to use; either "legacy", "efi", or "none".
# For "efi" images, the GPT partition table is used and a mandatory ESP
# partition of reasonable size is created in addition to the root partition.
# If `installBootLoader` is true, GRUB will be installed in EFI mode.
# For "legacy", the msdos partition table is used and a single large root
# partition is created. If `installBootLoader` is true, GRUB will be
# installed in legacy mode.
# partition is created.
# For "hybrid", the GPT partition table is used and a mandatory ESP
# partition of reasonable size is created in addition to the root partition.
# Also a legacy MBR will be present.
# For "none", no partition table is created. Enabling `installBootLoader`
# most likely fails as GRUB will probably refuse to install.
partitionTableType ? "legacy"
@ -43,7 +54,7 @@
format ? "raw"
}:
assert partitionTableType == "legacy" || partitionTableType == "efi" || partitionTableType == "none";
assert partitionTableType == "legacy" || partitionTableType == "efi" || partitionTableType == "hybrid" || partitionTableType == "none";
# We use -E offset=X below, which is only supported by e2fsprogs
assert partitionTableType != "none" -> fsType == "ext4";
@ -60,11 +71,12 @@ let format' = format; in let
vdi = "vdi";
vpc = "vhd";
raw = "img";
}.${format};
}.${format} or format;
rootPartition = { # switch-case
legacy = "1";
efi = "2";
hybrid = "3";
}.${partitionTableType};
partitionDiskScript = { # switch-case
@ -76,9 +88,18 @@ let format' = format; in let
efi = ''
parted --script $diskImage -- \
mklabel gpt \
mkpart ESP fat32 8MiB 256MiB \
mkpart ESP fat32 8MiB ${bootSize} \
set 1 boot on \
mkpart primary ext4 256MiB -1
mkpart primary ext4 ${bootSize} -1
'';
hybrid = ''
parted --script $diskImage -- \
mklabel gpt \
mkpart ESP fat32 8MiB ${bootSize} \
set 1 boot on \
mkpart no-fs 0 1024KiB \
set 2 bios_grub on \
mkpart primary ext4 ${bootSize} -1
'';
none = "";
}.${partitionTableType};
@ -129,19 +150,6 @@ let format' = format; in let
}
mkdir $out
diskImage=nixos.raw
truncate -s ${toString diskSize}M $diskImage
${partitionDiskScript}
${if partitionTableType != "none" then ''
# Get start & length of the root partition in sectors to $START and $SECTORS.
eval $(partx $diskImage -o START,SECTORS --nr ${rootPartition} --pairs)
mkfs.${fsType} -F -L ${label} $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
'' else ''
mkfs.${fsType} -F -L ${label} $diskImage
''}
root="$PWD/root"
mkdir -p $root
@ -186,6 +194,31 @@ let format' = format; in let
nixos-install --root $root --no-bootloader --no-root-passwd \
--system ${config.system.build.toplevel} --channel ${channelSources} --substituters ""
diskImage=nixos.raw
${if diskSize == "auto" then ''
${if partitionTableType == "efi" || partitionTableType == "hybrid" then ''
additionalSpace=$(( ($(numfmt --from=iec '${additionalSpace}') + $(numfmt --from=iec '${bootSize}')) / 1000 ))
'' else ''
additionalSpace=$(( $(numfmt --from=iec '${additionalSpace}') / 1000 ))
''}
diskSize=$(( $(set -- $(du -d0 $root); echo "$1") + $additionalSpace ))
truncate -s "$diskSize"K $diskImage
'' else ''
truncate -s ${toString diskSize}M $diskImage
''}
${partitionDiskScript}
${if partitionTableType != "none" then ''
# Get start & length of the root partition in sectors to $START and $SECTORS.
eval $(partx $diskImage -o START,SECTORS --nr ${rootPartition} --pairs)
mkfs.${fsType} -F -L ${label} $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
'' else ''
mkfs.${fsType} -F -L ${label} $diskImage
''}
echo "copying staging root to image..."
cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} -t ${fsType} -i $diskImage $root/* /
'';
@ -219,7 +252,7 @@ in pkgs.vmTools.runInLinuxVM (
# Create the ESP and mount it. Unlike e2fsprogs, mkfs.vfat doesn't support an
# '-E offset=X' option, so we can't do this outside the VM.
${optionalString (partitionTableType == "efi") ''
${optionalString (partitionTableType == "efi" || partitionTableType == "hybrid") ''
mkdir -p /mnt/boot
mkfs.vfat -n ESP /dev/vda1
mount /dev/vda1 /mnt/boot

View file

@ -46,7 +46,10 @@ pkgs.stdenv.mkDerivation {
(
GLOBIGNORE=".:.."
shopt -u dotglob
cp -a --reflink=auto ./files/* -t ./rootImage/
for f in ./files/*; do
cp -a --reflink=auto -t ./rootImage/ "$f"
done
)
# Also include a manifest of the closures in a format suitable for nix-store --load-db

View file

@ -1,4 +1,4 @@
{ stdenv, closureInfo, xorriso, syslinux
{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
, # The file name of the resulting ISO image.
isoName ? "cd.iso"
@ -48,7 +48,7 @@ assert usbBootable -> isohybridMbrImage != "";
stdenv.mkDerivation {
name = isoName;
builder = ./make-iso9660-image.sh;
buildInputs = [ xorriso syslinux zstd ];
buildInputs = [ xorriso syslinux zstd libossp_uuid ];
inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;

View file

@ -99,7 +99,12 @@ done
mkdir -p $out/iso
# daed2280-b91e-42c0-aed6-82c825ca41f3 is an arbitrary namespace, to prevent
# independent applications from generating the same UUID for the same value.
# (the chance of that being problematic seem pretty slim here, but that's how
# version-5 UUID's work)
xorriso="xorriso
-boot_image any gpt_disk_guid=$(uuid -v 5 daed2280-b91e-42c0-aed6-82c825ca41f3 $out | tr -d -)
-as mkisofs
-iso-level 3
-volid ${volumeID}
@ -118,15 +123,6 @@ xorriso="xorriso
$xorriso -output $out/iso/$isoName
if test -n "$usbBootable"; then
echo "Making image hybrid..."
if test -n "$efiBootable"; then
isohybrid --uefi $out/iso/$isoName
else
isohybrid $out/iso/$isoName
fi
fi
if test -n "$compressImage"; then
echo "Compressing image..."
zstd -T$NIX_BUILD_CORES --rm $out/iso/$isoName

View file

@ -36,7 +36,7 @@ let
// lib.optionalAttrs (opt ? example) { example = substFunction opt.example; }
// lib.optionalAttrs (opt ? default) { default = substFunction opt.default; }
// lib.optionalAttrs (opt ? type) { type = substFunction opt.type; }
// lib.optionalAttrs (opt ? relatedPackages && opt.relatedPackages != []) { relatedPackages = genRelatedPackages opt.relatedPackages; }
// lib.optionalAttrs (opt ? relatedPackages && opt.relatedPackages != []) { relatedPackages = genRelatedPackages opt.relatedPackages opt.name; }
);
# Generate DocBook documentation for a list of packages. This is
@ -48,7 +48,7 @@ let
# - a list: that will be interpreted as an attribute path from `pkgs`,
# - an attrset: that can specify `name`, `path`, `package`, `comment`
# (either of `name`, `path` is required, the rest are optional).
genRelatedPackages = packages:
genRelatedPackages = packages: optName:
let
unpack = p: if lib.isString p then { name = p; }
else if lib.isList p then { path = p; }
@ -58,7 +58,7 @@ let
title = args.title or null;
name = args.name or (lib.concatStringsSep "." args.path);
path = args.path or [ args.name ];
package = args.package or (lib.attrByPath path (throw "Invalid package attribute path `${toString path}'") pkgs);
package = args.package or (lib.attrByPath path (throw "Invalid package attribute path `${toString path}' found while evaluating `relatedPackages' of option `${optName}'") pkgs);
in "<listitem>"
+ "<para><literal>${lib.optionalString (title != null) "${title} aka "}pkgs.${name} (${package.meta.name})</literal>"
+ lib.optionalString (!package.meta.available) " <emphasis>[UNAVAILABLE]</emphasis>"

View file

@ -2,13 +2,18 @@
{ pkgs }:
let
zeroPad = n: if n < 10 then "0${toString n}" else toString n;
zeroPad = n:
pkgs.lib.optionalString (n < 16) "0" +
(if n > 255
then throw "Can't have more than 255 nets or nodes!"
else pkgs.lib.toHexString n);
in
{
rec {
qemuNicMac = net: machine: "52:54:00:12:${zeroPad net}:${zeroPad machine}";
qemuNICFlags = nic: net: machine:
[ "-device virtio-net-pci,netdev=vlan${toString nic},mac=52:54:00:12:${zeroPad net}:${zeroPad machine}"
[ "-device virtio-net-pci,netdev=vlan${toString nic},mac=${qemuNicMac net machine}"
"-netdev vde,id=vlan${toString nic},sock=$QEMU_VDE_SOCKET_${toString net}"
];

View file

@ -2,9 +2,11 @@ pkgs: with pkgs.lib;
rec {
# Check whenever fileSystem is needed for boot
fsNeededForBoot = fs: fs.neededForBoot
|| elem fs.mountPoint [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/etc" ];
# Check whenever fileSystem is needed for boot. NOTE: Make sure
# pathsNeededForBoot is closed under the parent relationship, i.e. if /a/b/c
# is in the list, put /a and /a/b in as well.
pathsNeededForBoot = [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/etc" ];
fsNeededForBoot = fs: fs.neededForBoot || elem fs.mountPoint pathsNeededForBoot;
# Check whenever `b` depends on `a` as a fileSystem
fsBefore = a: b: a.mountPoint == b.device

View file

@ -63,8 +63,8 @@ in {
fsType = "ext4";
configFile = pkgs.writeText "configuration.nix"
''
{
imports = [ <nixpkgs/nixos/modules/virtualisation/amazon-image.nix> ];
{ modulesPath, ... }: {
imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ];
${optionalString config.ec2.hvm ''
ec2.hvm = true;
''}

View file

@ -1,292 +0,0 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.fonts.fontconfig;
fcBool = x: "<bool>" + (boolToString x) + "</bool>";
# back-supported fontconfig version and package
# version is used for font cache generation
supportVersion = "210";
supportPkg = pkgs."fontconfig_${supportVersion}";
# latest fontconfig version and package
# version is used for configuration folder name, /etc/fonts/VERSION/
# note: format differs from supportVersion and can not be used with makeCacheConf
latestVersion = pkgs.fontconfig.configVersion;
latestPkg = pkgs.fontconfig;
# supported version fonts.conf
supportFontsConf = pkgs.makeFontsConf { fontconfig = supportPkg; fontDirectories = config.fonts.fonts; };
# configuration file to read fontconfig cache
# version dependent
# priority 0
cacheConfSupport = makeCacheConf { version = supportVersion; };
cacheConfLatest = makeCacheConf {};
# generate the font cache setting file for a fontconfig version
# use latest when no version is passed
makeCacheConf = { version ? null }:
let
fcPackage = if version == null
then "fontconfig"
else "fontconfig_${version}";
makeCache = fontconfig: pkgs.makeFontsCache { inherit fontconfig; fontDirectories = config.fonts.fonts; };
cache = makeCache pkgs.${fcPackage};
cache32 = makeCache pkgs.pkgsi686Linux.${fcPackage};
in
pkgs.writeText "fc-00-nixos-cache.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Font directories -->
${concatStringsSep "\n" (map (font: "<dir>${font}</dir>") config.fonts.fonts)}
<!-- Pre-generated font caches -->
<cachedir>${cache}</cachedir>
${optionalString (pkgs.stdenv.isx86_64 && cfg.cache32Bit) ''
<cachedir>${cache32}</cachedir>
''}
</fontconfig>
'';
# local configuration file
localConf = pkgs.writeText "fc-local.conf" cfg.localConf;
# rendering settings configuration files
# priority 10
hintingConf = pkgs.writeText "fc-10-hinting.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Default rendering settings -->
<match target="pattern">
<edit mode="append" name="hinting">
${fcBool cfg.hinting.enable}
</edit>
<edit mode="append" name="autohint">
${fcBool cfg.hinting.autohint}
</edit>
<edit mode="append" name="hintstyle">
<const>hintslight</const>
</edit>
</match>
</fontconfig>
'';
antialiasConf = pkgs.writeText "fc-10-antialias.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Default rendering settings -->
<match target="pattern">
<edit mode="append" name="antialias">
${fcBool cfg.antialias}
</edit>
</match>
</fontconfig>
'';
subpixelConf = pkgs.writeText "fc-10-subpixel.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Default rendering settings -->
<match target="pattern">
<edit mode="append" name="rgba">
<const>${cfg.subpixel.rgba}</const>
</edit>
<edit mode="append" name="lcdfilter">
<const>lcd${cfg.subpixel.lcdfilter}</const>
</edit>
</match>
</fontconfig>
'';
dpiConf = pkgs.writeText "fc-11-dpi.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<match target="pattern">
<edit name="dpi" mode="assign">
<double>${toString cfg.dpi}</double>
</edit>
</match>
</fontconfig>
'';
# default fonts configuration file
# priority 52
defaultFontsConf =
let genDefault = fonts: name:
optionalString (fonts != []) ''
<alias>
<family>${name}</family>
<prefer>
${concatStringsSep ""
(map (font: ''
<family>${font}</family>
'') fonts)}
</prefer>
</alias>
'';
in
pkgs.writeText "fc-52-nixos-default-fonts.conf" ''
<?xml version='1.0'?>
<!DOCTYPE fontconfig SYSTEM 'fonts.dtd'>
<fontconfig>
<!-- Default fonts -->
${genDefault cfg.defaultFonts.sansSerif "sans-serif"}
${genDefault cfg.defaultFonts.serif "serif"}
${genDefault cfg.defaultFonts.monospace "monospace"}
</fontconfig>
'';
# reject Type 1 fonts
# priority 53
rejectType1 = pkgs.writeText "fc-53-nixos-reject-type1.conf" ''
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<!-- Reject Type 1 fonts -->
<selectfont>
<rejectfont>
<pattern>
<patelt name="fontformat"><string>Type 1</string></patelt>
</pattern>
</rejectfont>
</selectfont>
</fontconfig>
'';
# The configuration to be included in /etc/font/
penultimateConf = pkgs.runCommand "fontconfig-penultimate-conf" {
preferLocalBuild = true;
} ''
support_folder=$out/etc/fonts/conf.d
latest_folder=$out/etc/fonts/${latestVersion}/conf.d
mkdir -p $support_folder
mkdir -p $latest_folder
# fonts.conf
ln -s ${supportFontsConf} $support_folder/../fonts.conf
ln -s ${latestPkg.out}/etc/fonts/fonts.conf \
$latest_folder/../fonts.conf
# fontconfig-penultimate various configuration files
ln -s ${pkgs.fontconfig-penultimate}/etc/fonts/conf.d/*.conf \
$support_folder
ln -s ${pkgs.fontconfig-penultimate}/etc/fonts/conf.d/*.conf \
$latest_folder
ln -s ${cacheConfSupport} $support_folder/00-nixos-cache.conf
ln -s ${cacheConfLatest} $latest_folder/00-nixos-cache.conf
rm $support_folder/10-antialias.conf $latest_folder/10-antialias.conf
ln -s ${antialiasConf} $support_folder/10-antialias.conf
ln -s ${antialiasConf} $latest_folder/10-antialias.conf
rm $support_folder/10-hinting.conf $latest_folder/10-hinting.conf
ln -s ${hintingConf} $support_folder/10-hinting.conf
ln -s ${hintingConf} $latest_folder/10-hinting.conf
${optionalString cfg.useEmbeddedBitmaps ''
rm $support_folder/10-no-embedded-bitmaps.conf
rm $latest_folder/10-no-embedded-bitmaps.conf
''}
rm $support_folder/10-subpixel.conf $latest_folder/10-subpixel.conf
ln -s ${subpixelConf} $support_folder/10-subpixel.conf
ln -s ${subpixelConf} $latest_folder/10-subpixel.conf
${optionalString (cfg.dpi != 0) ''
ln -s ${dpiConf} $support_folder/11-dpi.conf
ln -s ${dpiConf} $latest_folder/11-dpi.conf
''}
# 50-user.conf
${optionalString (!cfg.includeUserConf) ''
rm $support_folder/50-user.conf
rm $latest_folder/50-user.conf
''}
# 51-local.conf
rm $latest_folder/51-local.conf
substitute \
${pkgs.fontconfig-penultimate}/etc/fonts/conf.d/51-local.conf \
$latest_folder/51-local.conf \
--replace local.conf /etc/fonts/${latestVersion}/local.conf
# local.conf (indirect priority 51)
${optionalString (cfg.localConf != "") ''
ln -s ${localConf} $support_folder/../local.conf
ln -s ${localConf} $latest_folder/../local.conf
''}
# 52-nixos-default-fonts.conf
ln -s ${defaultFontsConf} $support_folder/52-nixos-default-fonts.conf
ln -s ${defaultFontsConf} $latest_folder/52-nixos-default-fonts.conf
# 53-no-bitmaps.conf
${optionalString cfg.allowBitmaps ''
rm $support_folder/53-no-bitmaps.conf
rm $latest_folder/53-no-bitmaps.conf
''}
${optionalString (!cfg.allowType1) ''
# 53-nixos-reject-type1.conf
ln -s ${rejectType1} $support_folder/53-nixos-reject-type1.conf
ln -s ${rejectType1} $latest_folder/53-nixos-reject-type1.conf
''}
'';
in
{
options = {
fonts = {
fontconfig = {
penultimate = {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Enable fontconfig-penultimate settings to supplement the
NixOS defaults by providing per-font rendering defaults and
metric aliases.
'';
};
};
};
};
};
config = mkIf (config.fonts.fontconfig.enable && config.fonts.fontconfig.penultimate.enable) {
fonts.fontconfig.confPackages = [ penultimateConf ];
};
}

View file

@ -1,11 +1,6 @@
/*
NixOS support 2 fontconfig versions, "support" and "latest".
- "latest" refers to default fontconfig package (pkgs.fontconfig).
configuration files are linked to /etc/fonts/VERSION/conf.d/
- "support" refers to supportPkg (pkgs."fontconfig_${supportVersion}").
configuration files are linked to /etc/fonts/conf.d/
Configuration files are linked to /etc/fonts/${pkgs.fontconfig.configVersion}/conf.d/
This module generates a package containing configuration files and link it in /etc/fonts.
@ -22,40 +17,21 @@ let
cfg = config.fonts.fontconfig;
fcBool = x: "<bool>" + (boolToString x) + "</bool>";
# back-supported fontconfig version and package
# version is used for font cache generation
supportVersion = "210";
supportPkg = pkgs."fontconfig_${supportVersion}";
# latest fontconfig version and package
# version is used for configuration folder name, /etc/fonts/VERSION/
# note: format differs from supportVersion and can not be used with makeCacheConf
latestVersion = pkgs.fontconfig.configVersion;
latestPkg = pkgs.fontconfig;
# supported version fonts.conf
supportFontsConf = pkgs.makeFontsConf { fontconfig = supportPkg; fontDirectories = config.fonts.fonts; };
pkg = pkgs.fontconfig;
# configuration file to read fontconfig cache
# version dependent
# priority 0
cacheConfSupport = makeCacheConf { version = supportVersion; };
cacheConfLatest = makeCacheConf {};
cacheConf = makeCacheConf {};
# generate the font cache setting file for a fontconfig version
# use latest when no version is passed
# generate the font cache setting file
# When cross-compiling, we cant generate the cache, so we skip the
# <cachedir> part. fontconfig still works but is a little slower in
# looking things up.
makeCacheConf = { version ? null }:
makeCacheConf = { }:
let
fcPackage = if version == null
then "fontconfig"
else "fontconfig_${version}";
makeCache = fontconfig: pkgs.makeFontsCache { inherit fontconfig; fontDirectories = config.fonts.fonts; };
cache = makeCache pkgs.${fcPackage};
cache32 = makeCache pkgs.pkgsi686Linux.${fcPackage};
cache = makeCache pkgs.fontconfig;
cache32 = makeCache pkgs.pkgsi686Linux.fontconfig;
in
pkgs.writeText "fc-00-nixos-cache.conf" ''
<?xml version='1.0'?>
@ -200,63 +176,54 @@ let
confPkg = pkgs.runCommand "fontconfig-conf" {
preferLocalBuild = true;
} ''
support_folder=$out/etc/fonts/conf.d
latest_folder=$out/etc/fonts/${latestVersion}/conf.d
mkdir -p $support_folder
mkdir -p $latest_folder
dst=$out/etc/fonts/${pkg.configVersion}/conf.d
mkdir -p $dst
# fonts.conf
ln -s ${supportFontsConf} $support_folder/../fonts.conf
ln -s ${latestPkg.out}/etc/fonts/fonts.conf \
$latest_folder/../fonts.conf
ln -s ${pkg.out}/etc/fonts/fonts.conf \
$dst/../fonts.conf
# TODO: remove this legacy symlink once people stop using packages built before #95358 was merged
ln -s /etc/fonts/${pkg.configVersion}/fonts.conf \
$out/etc/fonts/fonts.conf
# fontconfig default config files
ln -s ${supportPkg.out}/etc/fonts/conf.d/*.conf \
$support_folder/
ln -s ${latestPkg.out}/etc/fonts/conf.d/*.conf \
$latest_folder/
ln -s ${pkg.out}/etc/fonts/conf.d/*.conf \
$dst/
# update latest 51-local.conf path to look at the latest local.conf
rm $latest_folder/51-local.conf
# update 51-local.conf path to look at local.conf
rm $dst/51-local.conf
substitute ${latestPkg.out}/etc/fonts/conf.d/51-local.conf \
$latest_folder/51-local.conf \
--replace local.conf /etc/fonts/${latestVersion}/local.conf
substitute ${pkg.out}/etc/fonts/conf.d/51-local.conf \
$dst/51-local.conf \
--replace local.conf /etc/fonts/${pkg.configVersion}/local.conf
# 00-nixos-cache.conf
ln -s ${cacheConfSupport} \
$support_folder/00-nixos-cache.conf
ln -s ${cacheConfLatest} $latest_folder/00-nixos-cache.conf
ln -s ${cacheConf} $dst/00-nixos-cache.conf
# 10-nixos-rendering.conf
ln -s ${renderConf} $support_folder/10-nixos-rendering.conf
ln -s ${renderConf} $latest_folder/10-nixos-rendering.conf
ln -s ${renderConf} $dst/10-nixos-rendering.conf
# 50-user.conf
${optionalString (!cfg.includeUserConf) ''
rm $support_folder/50-user.conf
rm $latest_folder/50-user.conf
# Since latest fontconfig looks for default files inside the package,
# we had to move this one elsewhere to be able to exclude it here.
${optionalString cfg.includeUserConf ''
ln -s ${pkg.out}/etc/fonts/conf.d.bak/50-user.conf $dst/50-user.conf
''}
# local.conf (indirect priority 51)
${optionalString (cfg.localConf != "") ''
ln -s ${localConf} $support_folder/../local.conf
ln -s ${localConf} $latest_folder/../local.conf
ln -s ${localConf} $dst/../local.conf
''}
# 52-nixos-default-fonts.conf
ln -s ${defaultFontsConf} $support_folder/52-nixos-default-fonts.conf
ln -s ${defaultFontsConf} $latest_folder/52-nixos-default-fonts.conf
ln -s ${defaultFontsConf} $dst/52-nixos-default-fonts.conf
# 53-no-bitmaps.conf
ln -s ${rejectBitmaps} $support_folder/53-no-bitmaps.conf
ln -s ${rejectBitmaps} $latest_folder/53-no-bitmaps.conf
ln -s ${rejectBitmaps} $dst/53-no-bitmaps.conf
${optionalString (!cfg.allowType1) ''
# 53-nixos-reject-type1.conf
ln -s ${rejectType1} $support_folder/53-nixos-reject-type1.conf
ln -s ${rejectType1} $latest_folder/53-nixos-reject-type1.conf
ln -s ${rejectType1} $dst/53-nixos-reject-type1.conf
''}
'';
@ -490,7 +457,7 @@ in
environment.systemPackages = [ pkgs.fontconfig ];
environment.etc.fonts.source = "${fontconfigEtc}/etc/fonts/";
})
(mkIf (cfg.enable && !cfg.penultimate.enable) {
(mkIf cfg.enable {
fonts.fontconfig.confPackages = [ confPkg ];
})
];

View file

@ -27,6 +27,7 @@ with lib;
fonts.fontconfig.enable = false;
nixpkgs.overlays = singleton (const (super: {
cairo = super.cairo.override { x11Support = false; };
dbus = super.dbus.override { x11Support = false; };
networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };
@ -35,6 +36,7 @@ with lib;
networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
gobject-introspection = super.gobject-introspection.override { x11Support = false; };
qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
}));
};
}

View file

@ -33,14 +33,11 @@ let
pkgs.ncurses
pkgs.netcat
config.programs.ssh.package
pkgs.perl
pkgs.procps
pkgs.rsync
pkgs.strace
pkgs.su
pkgs.time
pkgs.utillinux
pkgs.which # 88K size
pkgs.which
pkgs.zstd
];

View file

@ -581,7 +581,7 @@ in {
# password or an SSH authorized key. Privileged accounts are
# root and users in the wheel group.
assertion = !cfg.mutableUsers ->
any id (mapAttrsToList (name: cfg:
any id ((mapAttrsToList (name: cfg:
(name == "root"
|| cfg.group == "wheel"
|| elem "wheel" cfg.extraGroups)
@ -591,7 +591,9 @@ in {
|| cfg.passwordFile != null
|| cfg.openssh.authorizedKeys.keys != []
|| cfg.openssh.authorizedKeys.keyFiles != [])
) cfg.users);
) cfg.users) ++ [
config.security.googleOsLogin.enable
]);
message = ''
Neither the root account nor any wheel user has a password or SSH authorized key.
You must set one to prevent being locked out of your system.'';

View file

@ -43,7 +43,6 @@ in
serviceConfig = {
ExecStart = "${cfg.package}/bin/ckb-next-daemon ${optionalString (cfg.gid != null) "--gid=${builtins.toString cfg.gid}"}";
Restart = "on-failure";
StandardOutput = "syslog";
};
};
};

View file

@ -5,9 +5,44 @@ with lib;
let
cfg = config.hardware.logitech;
in {
vendor = "046d";
daemon = "g15daemon";
in
{
imports = [
(mkRenamedOptionModule [ "hardware" "logitech" "enable" ] [ "hardware" "logitech" "wireless" "enable" ])
(mkRenamedOptionModule [ "hardware" "logitech" "enableGraphical" ] [ "hardware" "logitech" "wireless" "enableGraphical" ])
];
options.hardware.logitech = {
enable = mkEnableOption "Logitech Devices";
lcd = {
enable = mkEnableOption "Logitech LCD Devices";
startWhenNeeded = mkOption {
type = types.bool;
default = true;
description = ''
Only run the service when an actual supported device is plugged.
'';
};
devices = mkOption {
type = types.listOf types.str;
default = [ "0a07" "c222" "c225" "c227" "c251" ];
description = ''
List of USB device ids supported by g15daemon.
</para>
<para>
You most likely do not need to change this.
'';
};
};
wireless = {
enable = mkEnableOption "Logitech Wireless Devices";
enableGraphical = mkOption {
type = types.bool;
@ -15,14 +50,47 @@ in {
description = "Enable graphical support applications.";
};
};
};
config = lib.mkIf cfg.enable {
environment.systemPackages = [
pkgs.ltunify
] ++ lib.optional cfg.enableGraphical pkgs.solaar;
config = lib.mkIf (cfg.wireless.enable || cfg.lcd.enable) {
environment.systemPackages = []
++ lib.optional cfg.wireless.enable pkgs.ltunify
++ lib.optional cfg.wireless.enableGraphical pkgs.solaar;
services.udev = {
# ltunifi and solaar both provide udev rules but the most up-to-date have been split
# out into a dedicated derivation
services.udev.packages = with pkgs; [ logitech-udev-rules ];
packages = []
++ lib.optional cfg.wireless.enable pkgs.logitech-udev-rules
++ lib.optional cfg.lcd.enable pkgs.g15daemon;
extraRules = ''
# nixos: hardware.logitech.lcd
'' + lib.concatMapStringsSep "\n" (
dev:
''ACTION=="add", SUBSYSTEMS=="usb", ATTRS{idVendor}=="${vendor}", ATTRS{idProduct}=="${dev}", TAG+="systemd", ENV{SYSTEMD_WANTS}+="${daemon}.service"''
) cfg.lcd.devices;
};
systemd.services."${daemon}" = lib.mkIf cfg.lcd.enable {
description = "Logitech LCD Support Daemon";
documentation = [ "man:g15daemon(1)" ];
wantedBy = lib.mkIf (! cfg.lcd.startWhenNeeded) "multi-user.target";
serviceConfig = {
Type = "forking";
ExecStart = "${pkgs.g15daemon}/bin/g15daemon";
# we patch it to write to /run/g15daemon/g15daemon.pid instead of
# /run/g15daemon.pid so systemd will do the cleanup for us.
PIDFile = "/run/${daemon}/g15daemon.pid";
PrivateTmp = true;
PrivateNetwork = true;
ProtectHome = "tmpfs";
ProtectSystem = "full"; # strict doesn't work
RuntimeDirectory = daemon;
Restart = "on-failure";
};
};
};
}

View file

@ -0,0 +1,16 @@
{ lib, pkgs, config, ...}:
with lib;
{
options.hardware.video.hidpi.enable = mkEnableOption "Font/DPI configuration optimized for HiDPI displays";
config = mkIf config.hardware.video.hidpi.enable {
console.font = lib.mkDefault "${pkgs.terminus_font}/share/consolefonts/ter-v32n.psf.gz";
# Needed when typing in passwords for full disk encryption
console.earlySetup = mkDefault true;
boot.loader.systemd-boot.consoleMode = mkDefault "1";
# TODO Find reasonable defaults X11 & wayland
};
}

View file

@ -26,7 +26,7 @@ in
Whether to enable <command>uvcvideo</command> dynamic controls.
Note that enabling this brings the <command>uvcdynctrl</command> tool
into your environement and register all dynamic controls from
into your environment and register all dynamic controls from
specified <command>packages</command> to the <command>uvcvideo</command> driver.
'';
};

View file

@ -0,0 +1,29 @@
{ config, lib, ... }:
with lib;
let
cfg = config.hardware.xpadneo;
in
{
options.hardware.xpadneo = {
enable = mkEnableOption "the xpadneo driver for Xbox One wireless controllers";
};
config = mkIf cfg.enable {
boot = {
# Must disable Enhanced Retransmission Mode to support bluetooth pairing
# https://wiki.archlinux.org/index.php/Gamepad#Connect_Xbox_Wireless_Controller_with_Bluetooth
extraModprobeConfig =
mkIf
config.hardware.bluetooth.enable
"options bluetooth disable_ertm=1";
extraModulePackages = with config.boot.kernelPackages; [ xpadneo ];
kernelModules = [ "hid_xpadneo" ];
};
};
meta = {
maintainers = with maintainers; [ metadark ];
};
}

View file

@ -417,6 +417,14 @@ in
'';
};
isoImage.squashfsCompression = mkOption {
default = "xz -Xdict-size 100%";
description = ''
Compression settings to use for the squashfs nix store.
'';
example = "zstd -Xcompression-level 6";
};
isoImage.edition = mkOption {
default = "";
description = ''
@ -614,6 +622,7 @@ in
# Create the squashfs image that contains the Nix store.
system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
storeContents = config.isoImage.storeContents;
comp = config.isoImage.squashfsCompression;
};
# Individual files to be included on the CD, outside of the Nix

View file

@ -1,4 +1,4 @@
#! @shell@ -e
#! @runtimeShell@ -e
# Shows the usage of this command to the user

View file

@ -1,4 +1,4 @@
#! @shell@
#! @runtimeShell@
set -e

View file

@ -497,8 +497,8 @@ if (-f $fb_modes_file && -r $fb_modes_file) {
$modes =~ m/([0-9]+)x([0-9]+)/;
my $console_width = $1, my $console_height = $2;
if ($console_width > 1920) {
push @attrs, "# High-DPI console";
push @attrs, 'console.font = lib.mkDefault "${pkgs.terminus_font}/share/consolefonts/ter-u28n.psf.gz";';
push @attrs, "# high-resolution display";
push @attrs, 'hardware.video.hidpi.enable = lib.mkDefault true;';
}
}

View file

@ -1,4 +1,4 @@
#! @shell@
#! @runtimeShell@
set -e
shopt -s nullglob

View file

@ -1,6 +1,6 @@
#! @shell@
#! @runtimeShell@
if [ -x "@shell@" ]; then export SHELL="@shell@"; fi;
if [ -x "@runtimeShell@" ]; then export SHELL="@runtimeShell@"; fi;
set -e
set -o pipefail

View file

@ -1,4 +1,4 @@
#! @shell@
#! @runtimeShell@
case "$1" in
-h|--help)

View file

@ -14,11 +14,13 @@ let
nixos-build-vms = makeProg {
name = "nixos-build-vms";
src = ./nixos-build-vms/nixos-build-vms.sh;
inherit (pkgs) runtimeShell;
};
nixos-install = makeProg {
name = "nixos-install";
src = ./nixos-install.sh;
inherit (pkgs) runtimeShell;
nix = config.nix.package.out;
path = makeBinPath [ nixos-enter ];
};
@ -28,6 +30,7 @@ let
makeProg {
name = "nixos-rebuild";
src = ./nixos-rebuild.sh;
inherit (pkgs) runtimeShell;
nix = config.nix.package.out;
nix_x86_64_linux = fallback.x86_64-linux;
nix_i686_linux = fallback.i686-linux;
@ -50,6 +53,7 @@ let
nixos-version = makeProg {
name = "nixos-version";
src = ./nixos-version.sh;
inherit (pkgs) runtimeShell;
inherit (config.system.nixos) version codeName revision;
inherit (config.system) configurationRevision;
json = builtins.toJSON ({
@ -64,6 +68,7 @@ let
nixos-enter = makeProg {
name = "nixos-enter";
src = ./nixos-enter.sh;
inherit (pkgs) runtimeShell;
};
in

View file

@ -198,7 +198,7 @@ in
bosun = 161;
kubernetes = 162;
peerflix = 163;
chronos = 164;
#chronos = 164; # removed 2020-08-15
gitlab = 165;
tox-bootstrapd = 166;
cadvisor = 167;
@ -247,7 +247,7 @@ in
bepasty = 215;
# pumpio = 216; # unused, removed 2018-02-24
nm-openvpn = 217;
mathics = 218;
# mathics = 218; # unused, removed 2020-08-15
ejabberd = 219;
postsrsd = 220;
opendkim = 221;
@ -345,6 +345,7 @@ in
zoneminder = 314;
paperless = 315;
#mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317;
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -645,6 +646,7 @@ in
zoneminder = 314;
paperless = 315;
#mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View file

@ -1,7 +1,6 @@
[
./config/debug-info.nix
./config/fonts/fontconfig.nix
./config/fonts/fontconfig-penultimate.nix
./config/fonts/fontdir.nix
./config/fonts/fonts.nix
./config/fonts/ghostscript.nix
@ -72,9 +71,11 @@
./hardware/video/capture/mwprocapture.nix
./hardware/video/bumblebee.nix
./hardware/video/displaylink.nix
./hardware/video/hidpi.nix
./hardware/video/nvidia.nix
./hardware/video/uvcvideo/default.nix
./hardware/video/webcam/facetimehd.nix
./hardware/xpadneo.nix
./i18n/input-method/default.nix
./i18n/input-method/fcitx.nix
./i18n/input-method/ibus.nix
@ -154,6 +155,7 @@
./programs/ssmtp.nix
./programs/sysdig.nix
./programs/systemtap.nix
./programs/steam.nix
./programs/sway.nix
./programs/system-config-printer.nix
./programs/thefuck.nix
@ -328,6 +330,7 @@
./services/development/bloop.nix
./services/development/hoogle.nix
./services/development/jupyter/default.nix
./services/development/jupyterhub/default.nix
./services/development/lorri.nix
./services/editors/emacs.nix
./services/editors/infinoted.nix
@ -462,14 +465,11 @@
./services/misc/leaps.nix
./services/misc/lidarr.nix
./services/misc/mame.nix
./services/misc/mathics.nix
./services/misc/matrix-appservice-discord.nix
./services/misc/matrix-synapse.nix
./services/misc/mautrix-telegram.nix
./services/misc/mbpfan.nix
./services/misc/mediatomb.nix
./services/misc/mesos-master.nix
./services/misc/mesos-slave.nix
./services/misc/metabase.nix
./services/misc/mwlib.nix
./services/misc/nix-daemon.nix
@ -485,6 +485,7 @@
./services/misc/parsoid.nix
./services/misc/plex.nix
./services/misc/tautulli.nix
./services/misc/pinnwand.nix
./services/misc/pykms.nix
./services/misc/radarr.nix
./services/misc/redmine.nix
@ -510,6 +511,7 @@
./services/misc/uhub.nix
./services/misc/weechat.nix
./services/misc/xmr-stak.nix
./services/misc/zigbee2mqtt.nix
./services/misc/zoneminder.nix
./services/misc/zookeeper.nix
./services/monitoring/alerta.nix
@ -642,6 +644,8 @@
./services/networking/iperf3.nix
./services/networking/ircd-hybrid/default.nix
./services/networking/iwd.nix
./services/networking/jicofo.nix
./services/networking/jitsi-videobridge.nix
./services/networking/keepalived/default.nix
./services/networking/keybase.nix
./services/networking/kippo.nix
@ -670,6 +674,7 @@
./services/networking/nat.nix
./services/networking/ndppd.nix
./services/networking/networkmanager.nix
./services/networking/nextdns.nix
./services/networking/nftables.nix
./services/networking/ngircd.nix
./services/networking/nghttpx/default.nix
@ -777,10 +782,8 @@
./services/networking/znc/default.nix
./services/printing/cupsd.nix
./services/scheduling/atd.nix
./services/scheduling/chronos.nix
./services/scheduling/cron.nix
./services/scheduling/fcron.nix
./services/scheduling/marathon.nix
./services/search/elasticsearch.nix
./services/search/elasticsearch-curator.nix
./services/search/hound.nix
@ -811,6 +814,7 @@
./services/security/torsocks.nix
./services/security/usbguard.nix
./services/security/vault.nix
./services/security/yubikey-agent.nix
./services/system/cloud-init.nix
./services/system/dbus.nix
./services/system/earlyoom.nix
@ -830,6 +834,7 @@
./services/ttys/gpm.nix
./services/ttys/kmscon.nix
./services/wayland/cage.nix
./services/video/mirakurun.nix
./services/web-apps/atlassian/confluence.nix
./services/web-apps/atlassian/crowd.nix
./services/web-apps/atlassian/jira.nix
@ -847,6 +852,7 @@
./services/web-apps/icingaweb2/module-monitoring.nix
./services/web-apps/ihatemoney
./services/web-apps/jirafeau.nix
./services/web-apps/jitsi-meet.nix
./services/web-apps/limesurvey.nix
./services/web-apps/mattermost.nix
./services/web-apps/mediawiki.nix
@ -859,6 +865,7 @@
./services/web-apps/moinmoin.nix
./services/web-apps/restya-board.nix
./services/web-apps/sogo.nix
./services/web-apps/rss-bridge.nix
./services/web-apps/tt-rss.nix
./services/web-apps/trac.nix
./services/web-apps/trilium.nix
@ -882,6 +889,7 @@
./services/web-servers/meguca.nix
./services/web-servers/mighttpd2.nix
./services/web-servers/minio.nix
./services/web-servers/molly-brown.nix
./services/web-servers/nginx/default.nix
./services/web-servers/nginx/gitweb.nix
./services/web-servers/phpfpm/default.nix
@ -916,6 +924,7 @@
./services/x11/gdk-pixbuf.nix
./services/x11/imwheel.nix
./services/x11/redshift.nix
./services/x11/urserver.nix
./services/x11/urxvtd.nix
./services/x11/window-managers/awesome.nix
./services/x11/window-managers/default.nix

View file

@ -26,6 +26,7 @@
pkgs.fuse
pkgs.fuse3
pkgs.sshfs-fuse
pkgs.rsync
pkgs.socat
pkgs.screen

View file

@ -70,6 +70,7 @@ in
agent.pinentryFlavor = mkOption {
type = types.nullOr (types.enum pkgs.pinentry.flavors);
example = "gnome3";
default = defaultPinentryFlavor;
description = ''
Which pinentry interface to use. If not null, the path to the
pinentry binary will be passed to gpg-agent via commandline and
@ -91,8 +92,6 @@ in
};
config = mkIf cfg.agent.enable {
programs.gnupg.agent.pinentryFlavor = mkDefault defaultPinentryFlavor;
# This overrides the systemd user unit shipped with the gnupg package
systemd.user.services.gpg-agent = mkIf (cfg.agent.pinentryFlavor != null) {
serviceConfig.ExecStart = [ "" ''

View file

@ -0,0 +1,25 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.steam;
in {
options.programs.steam.enable = mkEnableOption "steam";
config = mkIf cfg.enable {
hardware.opengl = { # this fixes the "glXChooseVisual failed" bug, context: https://github.com/NixOS/nixpkgs/issues/47932
enable = true;
driSupport32Bit = true;
};
# optionally enable 32bit pulseaudio support if pulseaudio is enabled
hardware.pulseaudio.support32Bit = config.hardware.pulseaudio.enable;
hardware.steam-hardware.enable = true;
environment.systemPackages = [ pkgs.steam ];
};
meta.maintainers = with maintainers; [ mkg20001 ];
}

View file

@ -17,8 +17,12 @@ with lib;
(mkAliasOptionModule [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ])
# Completely removed modules
(mkRemovedOptionModule [ "fonts" "fontconfig" "penultimate" ] "The corresponding package has removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "chronos" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "firefox" "syncserver" "user" ] "")
(mkRemovedOptionModule [ "services" "firefox" "syncserver" "group" ] "")
(mkRemovedOptionModule [ "services" "marathon" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "mesos" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "services" "winstone" ] "The corresponding package was removed from nixpkgs.")
(mkRemovedOptionModule [ "networking" "vpnc" ] "Use environment.etc.\"vpnc/service.conf\" instead.")
(mkRemovedOptionModule [ "environment" "blcr" "enable" ] "The BLCR module has been removed")
@ -28,6 +32,7 @@ with lib;
(mkRemovedOptionModule [ "services" "osquery" ] "The osquery module has been removed")
(mkRemovedOptionModule [ "services" "fourStore" ] "The fourStore module has been removed")
(mkRemovedOptionModule [ "services" "fourStoreEndpoint" ] "The fourStoreEndpoint module has been removed")
(mkRemovedOptionModule [ "services" "mathics" ] "The Mathics module has been removed")
(mkRemovedOptionModule [ "programs" "way-cooler" ] ("way-cooler is abandoned by its author: " +
"https://way-cooler.org/blog/2020/01/09/way-cooler-post-mortem.html"))
(mkRemovedOptionModule [ "services" "xserver" "multitouch" ] ''

View file

@ -23,16 +23,16 @@ let
type = types.nullOr types.str;
default = null;
description = ''
ACME Directory Resource URI. Defaults to let's encrypt
ACME Directory Resource URI. Defaults to Let's Encrypt's
production endpoint,
https://acme-v02.api.letsencrypt.org/directory, if unset.
<link xlink:href="https://acme-v02.api.letsencrypt.org/directory"/>, if unset.
'';
};
domain = mkOption {
type = types.str;
default = name;
description = "Domain to fetch certificate for (defaults to the entry name)";
description = "Domain to fetch certificate for (defaults to the entry name).";
};
email = mkOption {
@ -103,7 +103,7 @@ let
description = ''
Key type to use for private keys.
For an up to date list of supported values check the --key-type option
at https://go-acme.github.io/lego/usage/cli/#usage.
at <link xlink:href="https://go-acme.github.io/lego/usage/cli/#usage"/>.
'';
};
@ -113,7 +113,7 @@ let
example = "route53";
description = ''
DNS Challenge provider. For a list of supported providers, see the "code"
field of the DNS providers listed at https://go-acme.github.io/lego/dns/.
field of the DNS providers listed at <link xlink:href="https://go-acme.github.io/lego/dns/"/>.
'';
};
@ -123,7 +123,7 @@ let
Path to an EnvironmentFile for the cert's service containing any required and
optional environment variables for your selected dnsProvider.
To find out what values you need to set, consult the documentation at
https://go-acme.github.io/lego/dns/ for the corresponding dnsProvider.
<link xlink:href="https://go-acme.github.io/lego/dns/"/> for the corresponding dnsProvider.
'';
example = "/var/src/secrets/example.org-route53-api-token";
};
@ -169,7 +169,7 @@ in
(mkRemovedOptionModule [ "security" "acme" "production" ] ''
Use security.acme.server to define your staging ACME server URL instead.
To use the let's encrypt staging server, use security.acme.server =
To use Let's Encrypt's staging server, use security.acme.server =
"https://acme-staging-v02.api.letsencrypt.org/directory".
''
)
@ -207,9 +207,9 @@ in
type = types.nullOr types.str;
default = null;
description = ''
ACME Directory Resource URI. Defaults to let's encrypt
ACME Directory Resource URI. Defaults to Let's Encrypt's
production endpoint,
<literal>https://acme-v02.api.letsencrypt.org/directory</literal>, if unset.
<link xlink:href="https://acme-v02.api.letsencrypt.org/directory"/>, if unset.
'';
};
@ -230,8 +230,8 @@ in
type = types.bool;
default = false;
description = ''
Accept the CA's terms of service. The default provier is Let's Encrypt,
you can find their ToS at https://letsencrypt.org/repository/
Accept the CA's terms of service. The default provider is Let's Encrypt,
you can find their ToS at <link xlink:href="https://letsencrypt.org/repository/"/>.
'';
};

View file

@ -36,6 +36,17 @@ let
'';
};
p11Auth = mkOption {
default = config.security.pam.p11.enable;
type = types.bool;
description = ''
If set, keys listed in
<filename>~/.ssh/authorized_keys</filename> and
<filename>~/.eid/authorized_certificates</filename>
can be used to log in with the associated PKCS#11 tokens.
'';
};
u2fAuth = mkOption {
default = config.security.pam.u2f.enable;
type = types.bool;
@ -352,6 +363,8 @@ let
"auth sufficient ${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so file=~/.ssh/authorized_keys:~/.ssh/authorized_keys2:/etc/ssh/authorized_keys.d/%u"}
${optionalString cfg.fprintAuth
"auth sufficient ${pkgs.fprintd}/lib/security/pam_fprintd.so"}
${let p11 = config.security.pam.p11; in optionalString cfg.p11Auth
"auth ${p11.control} ${pkgs.pam_p11}/lib/security/pam_p11.so ${pkgs.opensc}/lib/opensc-pkcs11.so"}
${let u2f = config.security.pam.u2f; in optionalString cfg.u2fAuth
"auth ${u2f.control} ${pkgs.pam_u2f}/lib/security/pam_u2f.so ${optionalString u2f.debug "debug"} ${optionalString (u2f.authFile != null) "authfile=${u2f.authFile}"} ${optionalString u2f.interactive "interactive"} ${optionalString u2f.cue "cue"}"}
${optionalString cfg.usbAuth
@ -566,6 +579,39 @@ in
security.pam.enableOTPW = mkEnableOption "the OTPW (one-time password) PAM module";
security.pam.p11 = {
enable = mkOption {
default = false;
type = types.bool;
description = ''
Enables P11 PAM (<literal>pam_p11</literal>) module.
If set, users can log in with SSH keys and PKCS#11 tokens.
More information can be found <link
xlink:href="https://github.com/OpenSC/pam_p11">here</link>.
'';
};
control = mkOption {
default = "sufficient";
type = types.enum [ "required" "requisite" "sufficient" "optional" ];
description = ''
This option sets pam "control".
If you want to have multi factor authentication, use "required".
If you want to use the PKCS#11 device instead of the regular password,
use "sufficient".
Read
<citerefentry>
<refentrytitle>pam.conf</refentrytitle>
<manvolnum>5</manvolnum>
</citerefentry>
for better understanding of this option.
'';
};
};
security.pam.u2f = {
enable = mkOption {
default = false;
@ -747,6 +793,7 @@ in
++ optionals config.krb5.enable [pam_krb5 pam_ccreds]
++ optionals config.security.pam.enableOTPW [ pkgs.otpw ]
++ optionals config.security.pam.oath.enable [ pkgs.oathToolkit ]
++ optionals config.security.pam.p11.enable [ pkgs.pam_p11 ]
++ optionals config.security.pam.u2f.enable [ pkgs.pam_u2f ];
boot.supportedFilesystems = optionals config.security.pam.enableEcryptfs [ "ecryptfs" ];

View file

@ -170,7 +170,6 @@ in {
Restart = "always";
RestartSec = 30;
BusName = "com.intel.tss2.Tabrmd";
StandardOutput = "syslog";
ExecStart = "${cfg.abrmd.package}/bin/tpm2-abrmd";
User = "tss";
Group = "nogroup";

View file

@ -160,8 +160,11 @@ in
config = {
security.wrappers = {
# These are mount related wrappers that require the +s permission.
fusermount.source = "${pkgs.fuse}/bin/fusermount";
fusermount3.source = "${pkgs.fuse3}/bin/fusermount3";
mount.source = "${lib.getBin pkgs.utillinux}/bin/mount";
umount.source = "${lib.getBin pkgs.utillinux}/bin/umount";
};
boot.specialFileSystems.${parentWrapperDir} = {

View file

@ -45,7 +45,7 @@ in {
environment.ROON_DATAROOT = "/var/lib/${name}";
serviceConfig = {
ExecStart = "${pkgs.roon-server}/opt/start.sh";
ExecStart = "${pkgs.roon-server}/start.sh";
LimitNOFILE = 8192;
User = cfg.user;
Group = cfg.group;

View file

@ -31,27 +31,42 @@ let
let
os = val:
optionalString (val != null) "${val}";
os' = prefixx: val:
optionalString (val != null) (prefixx + "${val}");
os' = prefix: val:
optionalString (val != null) (prefix + "${val}");
flatten = key: value:
"&${key}=${value}";
in
"-s ${opt.type}://" + os opt.location + "?" + os' "name=" name
+ concatStrings (mapAttrsToList flatten opt.query);
"--stream.stream=\"${opt.type}://" + os opt.location + "?" + os' "name=" name
+ concatStrings (mapAttrsToList flatten opt.query) + "\"";
optionalNull = val: ret:
optional (val != null) ret;
optionString = concatStringsSep " " (mapAttrsToList streamToOption cfg.streams
++ ["-p ${toString cfg.port}"]
++ ["--controlPort ${toString cfg.controlPort}"]
++ optionalNull cfg.sampleFormat "--sampleFormat ${cfg.sampleFormat}"
++ optionalNull cfg.codec "-c ${cfg.codec}"
++ optionalNull cfg.streamBuffer "--streamBuffer ${cfg.streamBuffer}"
++ optionalNull cfg.buffer "-b ${cfg.buffer}"
++ optional cfg.sendToMuted "--sendToMuted");
# global options
++ [ "--stream.bind_to_address ${cfg.listenAddress}" ]
++ [ "--stream.port ${toString cfg.port}" ]
++ optionalNull cfg.sampleFormat "--stream.sampleformat ${cfg.sampleFormat}"
++ optionalNull cfg.codec "--stream.codec ${cfg.codec}"
++ optionalNull cfg.streamBuffer "--stream.stream_buffer ${cfg.streamBuffer}"
++ optionalNull cfg.buffer "--stream.buffer ${cfg.buffer}"
++ optional cfg.sendToMuted "--stream.send_to_muted"
# tcp json rpc
++ [ "--tcp.enabled ${toString cfg.tcp.enable}" ]
++ optionals cfg.tcp.enable [
"--tcp.address ${cfg.tcp.listenAddress}"
"--tcp.port ${toString cfg.tcp.port}" ]
# http json rpc
++ [ "--http.enabled ${toString cfg.http.enable}" ]
++ optionals cfg.http.enable [
"--http.address ${cfg.http.listenAddress}"
"--http.port ${toString cfg.http.port}"
] ++ optional (cfg.http.docRoot != null) "--http.doc_root \"${toString cfg.http.docRoot}\"");
in {
imports = [
(mkRenamedOptionModule [ "services" "snapserver" "controlPort"] [ "services" "snapserver" "tcp" "port" ])
];
###### interface
@ -67,6 +82,15 @@ in {
'';
};
listenAddress = mkOption {
type = types.str;
default = "::";
example = "0.0.0.0";
description = ''
The address where snapclients can connect.
'';
};
port = mkOption {
type = types.port;
default = 1704;
@ -75,14 +99,6 @@ in {
'';
};
controlPort = mkOption {
type = types.port;
default = 1705;
description = ''
The port for control connections (JSON-RPC).
'';
};
openFirewall = mkOption {
type = types.bool;
default = true;
@ -94,6 +110,90 @@ in {
inherit sampleFormat;
inherit codec;
streamBuffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Stream read (input) buffer in ms.
'';
example = 20;
};
buffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Network buffer in ms.
'';
example = 1000;
};
sendToMuted = mkOption {
type = types.bool;
default = false;
description = ''
Send audio to muted clients.
'';
};
tcp.enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable the JSON-RPC via TCP.
'';
};
tcp.listenAddress = mkOption {
type = types.str;
default = "::";
example = "0.0.0.0";
description = ''
The address where the TCP JSON-RPC listens on.
'';
};
tcp.port = mkOption {
type = types.port;
default = 1705;
description = ''
The port where the TCP JSON-RPC listens on.
'';
};
http.enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable the JSON-RPC via HTTP.
'';
};
http.listenAddress = mkOption {
type = types.str;
default = "::";
example = "0.0.0.0";
description = ''
The address where the HTTP JSON-RPC listens on.
'';
};
http.port = mkOption {
type = types.port;
default = 1780;
description = ''
The port where the HTTP JSON-RPC listens on.
'';
};
http.docRoot = mkOption {
type = with types; nullOr path;
default = null;
description = ''
Path to serve from the HTTP servers root.
'';
};
streams = mkOption {
type = with types; attrsOf (submodule {
options = {
@ -147,34 +247,7 @@ in {
};
'';
};
streamBuffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Stream read (input) buffer in ms.
'';
example = 20;
};
buffer = mkOption {
type = with types; nullOr int;
default = null;
description = ''
Network buffer in ms.
'';
example = 1000;
};
sendToMuted = mkOption {
type = types.bool;
default = false;
description = ''
Send audio to muted clients.
'';
};
};
};
@ -206,7 +279,10 @@ in {
};
};
networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [ cfg.port cfg.controlPort ];
networking.firewall.allowedTCPPorts =
optionals cfg.openFirewall [ cfg.port ]
++ optional cfg.tcp.enable cfg.tcp.port
++ optional cfg.http.enable cfg.http.port;
};
meta = {

View file

@ -18,7 +18,7 @@ in {
};
host = mkOption {
description = "Remote host where snapshots should be sent.";
description = "Remote host where snapshots should be sent. <literal>lz4</literal> is expected to be installed on this host.";
example = "example.com";
type = types.str;
};

View file

@ -49,6 +49,8 @@ let
] ++ service.registrationFlags
++ optional (service.buildsDir != null)
"--builds-dir ${service.buildsDir}"
++ optional (service.cloneUrl != null)
"--clone-url ${service.cloneUrl}"
++ optional (service.preCloneScript != null)
"--pre-clone-script ${service.preCloneScript}"
++ optional (service.preBuildScript != null)
@ -377,6 +379,14 @@ in
in context of selected executor (Locally, Docker, SSH).
'';
};
cloneUrl = mkOption {
type = types.nullOr types.str;
default = null;
example = "http://gitlab.example.local";
description = ''
Overwrite the URL for the GitLab instance. Used if the Runner cant connect to GitLab on the URL GitLab exposes itself.
'';
};
dockerImage = mkOption {
type = types.nullOr types.str;
default = null;

View file

@ -6,12 +6,10 @@ let
cfg = config.services.mysql;
mysql = cfg.package;
isMariaDB = lib.getName mysql == lib.getName pkgs.mariadb;
isMariaDB = lib.getName cfg.package == lib.getName pkgs.mariadb;
mysqldOptions =
"--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${mysql}";
"--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${cfg.package}";
settingsFile = pkgs.writeText "my.cnf" (
generators.toINI { listsAsDuplicateKeys = true; } cfg.settings +
@ -22,7 +20,7 @@ in
{
imports = [
(mkRemovedOptionModule [ "services" "mysql" "pidDir" ] "Don't wait for pidfiles, describe dependencies through systemd")
(mkRemovedOptionModule [ "services" "mysql" "pidDir" ] "Don't wait for pidfiles, describe dependencies through systemd.")
(mkRemovedOptionModule [ "services" "mysql" "rootPassword" ] "Use socket authentication or set the password outside of the nix store.")
];
@ -46,25 +44,31 @@ in
type = types.nullOr types.str;
default = null;
example = literalExample "0.0.0.0";
description = "Address to bind to. The default is to bind to all addresses";
description = "Address to bind to. The default is to bind to all addresses.";
};
port = mkOption {
type = types.int;
default = 3306;
description = "Port of MySQL";
description = "Port of MySQL.";
};
user = mkOption {
type = types.str;
default = "mysql";
description = "User account under which MySQL runs";
description = "User account under which MySQL runs.";
};
group = mkOption {
type = types.str;
default = "mysql";
description = "Group under which MySQL runs.";
};
dataDir = mkOption {
type = types.path;
example = "/var/lib/mysql";
description = "Location where MySQL stores its table files";
description = "Location where MySQL stores its table files.";
};
configFile = mkOption {
@ -171,7 +175,7 @@ in
initialScript = mkOption {
type = types.nullOr types.path;
default = null;
description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database";
description = "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database.";
};
ensureDatabases = mkOption {
@ -259,33 +263,33 @@ in
serverId = mkOption {
type = types.int;
default = 1;
description = "Id of the MySQL server instance. This number must be unique for each instance";
description = "Id of the MySQL server instance. This number must be unique for each instance.";
};
masterHost = mkOption {
type = types.str;
description = "Hostname of the MySQL master server";
description = "Hostname of the MySQL master server.";
};
slaveHost = mkOption {
type = types.str;
description = "Hostname of the MySQL slave server";
description = "Hostname of the MySQL slave server.";
};
masterUser = mkOption {
type = types.str;
description = "Username of the MySQL replication user";
description = "Username of the MySQL replication user.";
};
masterPassword = mkOption {
type = types.str;
description = "Password of the MySQL replication user";
description = "Password of the MySQL replication user.";
};
masterPort = mkOption {
type = types.int;
default = 3306;
description = "Port number on which the MySQL master server runs";
description = "Port number on which the MySQL master server runs.";
};
};
};
@ -317,29 +321,33 @@ in
binlog-ignore-db = [ "information_schema" "performance_schema" "mysql" ];
})
(mkIf (!isMariaDB) {
plugin-load-add = optional (cfg.ensureUsers != []) "auth_socket.so";
plugin-load-add = "auth_socket.so";
})
];
users.users.mysql = {
users.users = optionalAttrs (cfg.user == "mysql") {
mysql = {
description = "MySQL server user";
group = "mysql";
group = cfg.group;
uid = config.ids.uids.mysql;
};
};
users.groups.mysql.gid = config.ids.gids.mysql;
users.groups = optionalAttrs (cfg.group == "mysql") {
mysql.gid = config.ids.gids.mysql;
};
environment.systemPackages = [mysql];
environment.systemPackages = [ cfg.package ];
environment.etc."my.cnf".source = cfg.configFile;
systemd.tmpfiles.rules = [
"d '${cfg.dataDir}' 0700 ${cfg.user} mysql - -"
"z '${cfg.dataDir}' 0700 ${cfg.user} mysql - -"
"d '${cfg.dataDir}' 0700 '${cfg.user}' '${cfg.group}' - -"
"z '${cfg.dataDir}' 0700 '${cfg.user}' '${cfg.group}' - -"
];
systemd.services.mysql = let
hasNotify = (cfg.package == pkgs.mariadb);
hasNotify = isMariaDB;
in {
description = "MySQL Server";
@ -357,27 +365,20 @@ in
preStart = if isMariaDB then ''
if ! test -e ${cfg.dataDir}/mysql; then
${mysql}/bin/mysql_install_db --defaults-file=/etc/my.cnf ${mysqldOptions}
${cfg.package}/bin/mysql_install_db --defaults-file=/etc/my.cnf ${mysqldOptions}
touch ${cfg.dataDir}/mysql_init
fi
'' else ''
if ! test -e ${cfg.dataDir}/mysql; then
${mysql}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} --initialize-insecure
${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} --initialize-insecure
touch ${cfg.dataDir}/mysql_init
fi
'';
serviceConfig = {
Type = if hasNotify then "notify" else "simple";
Restart = "on-abort";
RestartSec = "5s";
# The last two environment variables are used for starting Galera clusters
ExecStart = "${mysql}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION";
ExecStartPost =
let
setupScript = pkgs.writeScript "mysql-setup" ''
#!${pkgs.runtimeShell} -e
postStart = let
# The super user account to use on *first* run of MySQL server
superUser = if isMariaDB then cfg.user else "root";
in ''
${optionalString (!hasNotify) ''
# Wait until the MySQL server is available for use
count=0
@ -397,6 +398,12 @@ in
if [ -f ${cfg.dataDir}/mysql_init ]
then
# While MariaDB comes with a 'mysql' super user account since 10.4.x, MySQL does not
# Since we don't want to run this service as 'root' we need to ensure the account exists on first run
( echo "CREATE USER IF NOT EXISTS '${cfg.user}'@'localhost' IDENTIFIED WITH ${if isMariaDB then "unix_socket" else "auth_socket"};"
echo "GRANT ALL PRIVILEGES ON *.* TO '${cfg.user}'@'localhost' WITH GRANT OPTION;"
) | ${cfg.package}/bin/mysql -u ${superUser} -N
${concatMapStrings (database: ''
# Create initial databases
if ! test -e "${cfg.dataDir}/${database.name}"; then
@ -416,7 +423,7 @@ in
cat ${database.schema}/mysql-databases/*.sql
fi
''}
) | ${mysql}/bin/mysql -u root -N
) | ${cfg.package}/bin/mysql -u ${superUser} -N
fi
'') cfg.initialDatabases}
@ -428,7 +435,7 @@ in
echo "CREATE USER '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}' IDENTIFIED WITH mysql_native_password;"
echo "SET PASSWORD FOR '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}' = PASSWORD('${cfg.replication.masterPassword}');"
echo "GRANT REPLICATION SLAVE ON *.* TO '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}';"
) | ${mysql}/bin/mysql -u root -N
) | ${cfg.package}/bin/mysql -u ${superUser} -N
''}
${optionalString (cfg.replication.role == "slave")
@ -438,7 +445,7 @@ in
( echo "stop slave;"
echo "change master to master_host='${cfg.replication.masterHost}', master_user='${cfg.replication.masterUser}', master_password='${cfg.replication.masterPassword}';"
echo "start slave;"
) | ${mysql}/bin/mysql -u root -N
) | ${cfg.package}/bin/mysql -u ${superUser} -N
''}
${optionalString (cfg.initialScript != null)
@ -446,7 +453,7 @@ in
# Execute initial script
# using toString to avoid copying the file to nix store if given as path instead of string,
# as it might contain credentials
cat ${toString cfg.initialScript} | ${mysql}/bin/mysql -u root -N
cat ${toString cfg.initialScript} | ${cfg.package}/bin/mysql -u ${superUser} -N
''}
rm ${cfg.dataDir}/mysql_init
@ -457,7 +464,7 @@ in
${concatMapStrings (database: ''
echo "CREATE DATABASE IF NOT EXISTS \`${database}\`;"
'') cfg.ensureDatabases}
) | ${mysql}/bin/mysql -u root -N
) | ${cfg.package}/bin/mysql -N
''}
${concatMapStrings (user:
@ -466,16 +473,19 @@ in
${concatStringsSep "\n" (mapAttrsToList (database: permission: ''
echo "GRANT ${permission} ON ${database} TO '${user.name}'@'localhost';"
'') user.ensurePermissions)}
) | ${mysql}/bin/mysql -u root -N
) | ${cfg.package}/bin/mysql -N
'') cfg.ensureUsers}
'';
in
# ensureDatbases & ensureUsers depends on this script being run as root
# when the user has secured their mysql install
"+${setupScript}";
serviceConfig = {
Type = if hasNotify then "notify" else "simple";
Restart = "on-abort";
RestartSec = "5s";
# The last two environment variables are used for starting Galera clusters
ExecStart = "${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION";
# User and group
User = cfg.user;
Group = "mysql";
Group = cfg.group;
# Runtime directory and mode
RuntimeDirectory = "mysqld";
RuntimeDirectoryMode = "0755";

View file

@ -55,9 +55,13 @@ in
dataDir = mkOption {
type = types.path;
defaultText = "/var/lib/postgresql/\${config.services.postgresql.package.psqlSchema}";
example = "/var/lib/postgresql/11";
description = ''
Data directory for PostgreSQL.
The data directory for PostgreSQL. If left as the default value
this directory will automatically be created before the PostgreSQL server starts, otherwise
the sysadmin is responsible for ensuring the directory exists with appropriate ownership
and permissions.
'';
};
@ -249,10 +253,7 @@ in
else if versionAtLeast config.system.stateVersion "16.03" then pkgs.postgresql_9_5
else throw "postgresql_9_4 was removed, please upgrade your postgresql version.");
services.postgresql.dataDir =
mkDefault (if versionAtLeast config.system.stateVersion "17.09"
then "/var/lib/postgresql/${cfg.package.psqlSchema}"
else "/var/db/postgresql");
services.postgresql.dataDir = mkDefault "/var/lib/postgresql/${cfg.package.psqlSchema}";
services.postgresql.authentication = mkAfter
''
@ -291,40 +292,28 @@ in
preStart =
''
# Create data directory.
if ! test -e ${cfg.dataDir}/PG_VERSION; then
mkdir -m 0700 -p ${cfg.dataDir}
# Cleanup the data directory.
rm -f ${cfg.dataDir}/*.conf
chown -R postgres:postgres ${cfg.dataDir}
fi
''; # */
script =
''
# Initialise the database.
if ! test -e ${cfg.dataDir}/PG_VERSION; then
initdb -U ${cfg.superUser} ${concatStringsSep " " cfg.initdbArgs}
# See postStart!
touch "${cfg.dataDir}/.first_startup"
fi
ln -sfn "${configFile}" "${cfg.dataDir}/postgresql.conf"
${optionalString (cfg.recoveryConfig != null) ''
ln -sfn "${pkgs.writeText "recovery.conf" cfg.recoveryConfig}" \
"${cfg.dataDir}/recovery.conf"
''}
${optionalString (!groupAccessAvailable) ''
# postgresql pre 11.0 doesn't start if state directory mode is group accessible
chmod 0700 "${cfg.dataDir}"
''}
exec postgres
'';
serviceConfig =
serviceConfig = mkMerge [
{ ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
User = "postgres";
Group = "postgres";
PermissionsStartOnly = true;
RuntimeDirectory = "postgresql";
Type = if versionAtLeast cfg.package.version "9.6"
then "notify"
@ -338,11 +327,15 @@ in
# Give Postgres a decent amount of time to clean up after
# receiving systemd's SIGINT.
TimeoutSec = 120;
};
ExecStart = "${postgresql}/bin/postgres";
# Wait for PostgreSQL to be ready to accept connections.
postStart =
''
ExecStartPost =
let
setupScript = pkgs.writeScript "postgresql-setup" (''
#!${pkgs.runtimeShell} -e
PSQL="${pkgs.utillinux}/bin/runuser -u ${cfg.superUser} -- psql --port=${toString cfg.port}"
while ! $PSQL -d postgres -c "" 2> /dev/null; do
@ -367,7 +360,15 @@ in
$PSQL -tAc 'GRANT ${permission} ON ${database} TO "${user.name}"'
'') user.ensurePermissions)}
'') cfg.ensureUsers}
'';
'');
in
"+${setupScript}";
}
(mkIf (cfg.dataDir == "/var/lib/postgresql/${cfg.package.psqlSchema}") {
StateDirectory = "postgresql postgresql/${cfg.package.psqlSchema}";
StateDirectoryMode = if groupAccessAvailable then "0750" else "0700";
})
];
unitConfig.RequiresMountsFor = "${cfg.dataDir}";
};

View file

@ -42,6 +42,7 @@ in {
# It has been possible since https://github.com/flatpak/flatpak/releases/tag/1.3.2
# to build a SELinux policy module.
# TODO: use sysusers.d
users.users.flatpak = {
description = "Flatpak system helper";
group = "flatpak";

View file

@ -28,7 +28,10 @@ with lib;
malcontent-ui
];
services.dbus.packages = [ pkgs.malcontent ];
services.dbus.packages = [
# D-Bus services are in `out`, not the default `bin` output that would be picked up by `makeDbusConf`.
pkgs.malcontent.out
];
services.accounts-daemon.enable = true;

View file

@ -0,0 +1,190 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.services.jupyterhub;
kernels = (pkgs.jupyter-kernel.create {
definitions = if cfg.kernels != null
then cfg.kernels
else pkgs.jupyter-kernel.default;
});
jupyterhubConfig = pkgs.writeText "jupyterhub_config.py" ''
c.JupyterHub.bind_url = "http://${cfg.host}:${toString cfg.port}"
c.JupyterHub.authentication_class = "${cfg.authentication}"
c.JupyterHub.spawner_class = "${cfg.spawner}"
c.SystemdSpawner.default_url = '/lab'
c.SystemdSpawner.cmd = "${cfg.jupyterlabEnv}/bin/jupyterhub-singleuser"
c.SystemdSpawner.environment = {
'JUPYTER_PATH': '${kernels}'
}
${cfg.extraConfig}
'';
in {
meta.maintainers = with maintainers; [ costrouc ];
options.services.jupyterhub = {
enable = mkEnableOption "Jupyterhub development server";
authentication = mkOption {
type = types.str;
default = "jupyterhub.auth.PAMAuthenticator";
description = ''
Jupyterhub authentication to use
There are many authenticators available including: oauth, pam,
ldap, kerberos, etc.
'';
};
spawner = mkOption {
type = types.str;
default = "systemdspawner.SystemdSpawner";
description = ''
Jupyterhub spawner to use
There are many spawners available including: local process,
systemd, docker, kubernetes, yarn, batch, etc.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Extra contents appended to the jupyterhub configuration
Jupyterhub configuration is a normal python file using
Traitlets. https://jupyterhub.readthedocs.io/en/stable/getting-started/config-basics.html. The
base configuration of this module was designed to have sane
defaults for configuration but you can override anything since
this is a python file.
'';
example = literalExample ''
c.SystemdSpawner.mem_limit = '8G'
c.SystemdSpawner.cpu_limit = 2.0
'';
};
jupyterhubEnv = mkOption {
type = types.package;
default = (pkgs.python3.withPackages (p: with p; [
jupyterhub
jupyterhub-systemdspawner
]));
description = ''
Python environment to run jupyterhub
Customizing will affect the packages available in the hub and
proxy. This will allow packages to be available for the
extraConfig that you may need. This will not normally need to
be changed.
'';
};
jupyterlabEnv = mkOption {
type = types.package;
default = (pkgs.python3.withPackages (p: with p; [
jupyterhub
jupyterlab
]));
description = ''
Python environment to run jupyterlab
Customizing will affect the packages available in the
jupyterlab server and the default kernel provided. This is the
way to customize the jupyterlab extensions and jupyter
notebook extensions. This will not normally need to
be changed.
'';
};
kernels = mkOption {
type = types.nullOr (types.attrsOf(types.submodule (import ../jupyter/kernel-options.nix {
inherit lib;
})));
default = null;
example = literalExample ''
{
python3 = let
env = (pkgs.python3.withPackages (pythonPackages: with pythonPackages; [
ipykernel
pandas
scikitlearn
]));
in {
displayName = "Python 3 for machine learning";
argv = [
"''${env.interpreter}"
"-m"
"ipykernel_launcher"
"-f"
"{connection_file}"
];
language = "python";
logo32 = "''${env}/''${env.sitePackages}/ipykernel/resources/logo-32x32.png";
logo64 = "''${env}/''${env.sitePackages}/ipykernel/resources/logo-64x64.png";
};
}
'';
description = ''
Declarative kernel config
Kernels can be declared in any language that supports and has
the required dependencies to communicate with a jupyter server.
In python's case, it means that ipykernel package must always be
included in the list of packages of the targeted environment.
'';
};
port = mkOption {
type = types.port;
default = 8000;
description = ''
Port number Jupyterhub will be listening on
'';
};
host = mkOption {
type = types.str;
default = "0.0.0.0";
description = ''
Bind IP JupyterHub will be listening on
'';
};
stateDirectory = mkOption {
type = types.str;
default = "jupyterhub";
description = ''
Directory for jupyterhub state (token + database)
'';
};
};
config = mkMerge [
(mkIf cfg.enable {
systemd.services.jupyterhub = {
description = "Jupyterhub development server";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Restart = "always";
ExecStart = "${cfg.jupyterhubEnv}/bin/jupyterhub --config ${jupyterhubConfig}";
User = "root";
StateDirectory = cfg.stateDirectory;
WorkingDirectory = "/var/lib/${cfg.stateDirectory}";
};
};
})
];
}

View file

@ -34,7 +34,8 @@ Keywords=Text;Editor;
'';
};
in {
in
{
options.services.emacs = {
enable = mkOption {

View file

@ -8,12 +8,8 @@ let
mkTlpConfig = tlpConfig: generators.toKeyValue {
mkKeyValue = generators.mkKeyValueDefault {
mkValueString = val:
if isInt val then toString val
else if isString val then val
else if true == val then "1"
else if false == val then "0"
else if isList val then "\"" + (concatStringsSep " " val) + "\""
else err "invalid value provided to mkTlpConfig:" (toString val);
if isList val then "\"" + (toString val) + "\""
else toString val;
} "=";
} tlpConfig;
in
@ -27,10 +23,24 @@ in
description = "Whether to enable the TLP power management daemon.";
};
settings = mkOption {type = with types; attrsOf (oneOf [bool int float str (listOf str)]);
default = {};
example = {
SATA_LINKPWR_ON_BAT = "med_power_with_dipm";
USB_BLACKLIST_PHONE = 1;
};
description = ''
Options passed to TLP. See https://linrunner.de/tlp for all supported options..
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = "Additional configuration variables for TLP";
description = ''
Verbatim additional configuration variables for TLP.
DEPRECATED: use services.tlp.config instead.
'';
};
};
};
@ -39,8 +49,20 @@ in
config = mkIf cfg.enable {
boot.kernelModules = [ "msr" ];
warnings = optional (cfg.extraConfig != "") ''
Using config.services.tlp.extraConfig is deprecated and will become unsupported in a future release. Use config.services.tlp.settings instead.
'';
assertions = [{
assertion = cfg.enable -> config.powerManagement.scsiLinkPolicy == null;
message = ''
`services.tlp.enable` and `config.powerManagement.scsiLinkPolicy` cannot be set both.
Set `services.tlp.settings.SATA_LINKPWR_ON_AC` and `services.tlp.settings.SATA_LINKPWR_ON_BAT` instead.
'';
}];
environment.etc = {
"tlp.conf".text = cfg.extraConfig;
"tlp.conf".text = (mkTlpConfig cfg.settings) + cfg.extraConfig;
} // optionalAttrs enableRDW {
"NetworkManager/dispatcher.d/99tlp-rdw-nm".source =
"${tlp}/etc/NetworkManager/dispatcher.d/99tlp-rdw-nm";
@ -48,18 +70,25 @@ in
environment.systemPackages = [ tlp ];
# FIXME: When the config is parametrized we need to move these into a
# conditional on the relevant options being enabled.
powerManagement = {
scsiLinkPolicy = null;
cpuFreqGovernor = null;
cpufreq.max = null;
cpufreq.min = null;
services.tlp.settings = let
cfg = config.powerManagement;
maybeDefault = val: lib.mkIf (val != null) (lib.mkDefault val);
in {
CPU_SCALING_GOVERNOR_ON_AC = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_GOVERNOR_ON_BAT = maybeDefault cfg.cpuFreqGovernor;
CPU_SCALING_MIN_FREQ_ON_AC = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_AC = maybeDefault cfg.cpufreq.max;
CPU_SCALING_MIN_FREQ_ON_BAT = maybeDefault cfg.cpufreq.min;
CPU_SCALING_MAX_FREQ_ON_BAT = maybeDefault cfg.cpufreq.max;
};
services.udev.packages = [ tlp ];
systemd = {
# use native tlp instead because it can also differentiate between AC/BAT
services.cpufreq.enable = false;
packages = [ tlp ];
# XXX: These must always be disabled/masked according to [1].
#

View file

@ -25,6 +25,8 @@ let
clientRestrictions = concatStringsSep ", " (clientAccess ++ dnsBl);
smtpTlsSecurityLevel = if cfg.useDane then "dane" else "may";
mainCf = let
escape = replaceStrings ["$"] ["$$"];
mkList = items: "\n " + concatStringsSep ",\n " items;
@ -508,6 +510,14 @@ in
'';
};
useDane = mkOption {
type = types.bool;
default = false;
description = ''
Sets smtp_tls_security_level to "dane" rather than "may". See postconf(5) for details.
'';
};
sslCert = mkOption {
type = types.str;
default = "";
@ -809,13 +819,13 @@ in
// optionalAttrs cfg.enableHeaderChecks { header_checks = [ "regexp:/etc/postfix/header_checks" ]; }
// optionalAttrs (cfg.tlsTrustedAuthorities != "") {
smtp_tls_CAfile = cfg.tlsTrustedAuthorities;
smtp_tls_security_level = "may";
smtp_tls_security_level = smtpTlsSecurityLevel;
}
// optionalAttrs (cfg.sslCert != "") {
smtp_tls_cert_file = cfg.sslCert;
smtp_tls_key_file = cfg.sslKey;
smtp_tls_security_level = "may";
smtp_tls_security_level = smtpTlsSecurityLevel;
smtpd_tls_cert_file = cfg.sslCert;
smtpd_tls_key_file = cfg.sslKey;

View file

@ -162,6 +162,45 @@ in
<manvolnum>7</manvolnum></citerefentry>.
'';
};
backupDir = mkOption {
type = types.str;
default = "${cfg.stateDir}/dump";
description = "Path to the dump files.";
};
};
ssh = {
enable = mkOption {
type = types.bool;
default = true;
description = "Enable external SSH feature.";
};
clonePort = mkOption {
type = types.int;
default = 22;
example = 2222;
description = ''
SSH port displayed in clone URL.
The option is required to configure a service when the external visible port
differs from the local listening port i.e. if port forwarding is used.
'';
};
};
lfs = {
enable = mkOption {
type = types.bool;
default = false;
description = "Enables git-lfs support.";
};
contentDir = mkOption {
type = types.str;
default = "${cfg.stateDir}/data/lfs";
description = "Where to store LFS files.";
};
};
appName = mkOption {
@ -200,6 +239,12 @@ in
description = "HTTP listen port.";
};
enableUnixSocket = mkOption {
type = types.bool;
default = false;
description = "Configure Gitea to listen on a unix socket instead of the default TCP port.";
};
cookieSecure = mkOption {
type = types.bool;
default = false;
@ -300,14 +345,34 @@ in
ROOT = cfg.repositoryRoot;
};
server = {
server = mkMerge [
{
DOMAIN = cfg.domain;
HTTP_ADDR = cfg.httpAddress;
HTTP_PORT = cfg.httpPort;
ROOT_URL = cfg.rootUrl;
STATIC_ROOT_PATH = cfg.staticRootPath;
LFS_JWT_SECRET = "#jwtsecret#";
};
ROOT_URL = cfg.rootUrl;
}
(mkIf cfg.enableUnixSocket {
PROTOCOL = "unix";
HTTP_ADDR = "/run/gitea/gitea.sock";
})
(mkIf (!cfg.enableUnixSocket) {
HTTP_ADDR = cfg.httpAddress;
HTTP_PORT = cfg.httpPort;
})
(mkIf cfg.ssh.enable {
DISABLE_SSH = false;
SSH_PORT = cfg.ssh.clonePort;
})
(mkIf (!cfg.ssh.enable) {
DISABLE_SSH = true;
})
(mkIf cfg.lfs.enable {
LFS_START_SERVER = true;
LFS_CONTENT_PATH = cfg.lfs.contentDir;
})
];
session = {
COOKIE_NAME = "session";
@ -357,12 +422,26 @@ in
};
systemd.tmpfiles.rules = [
"d '${cfg.stateDir}' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}/conf' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom/conf' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}/log' - ${cfg.user} gitea - -"
"d '${cfg.repositoryRoot}' - ${cfg.user} gitea - -"
"d '${cfg.dump.backupDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.dump.backupDir}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.dump.backupDir}' - ${cfg.user} gitea - -"
"d '${cfg.lfs.contentDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.lfs.contentDir}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.lfs.contentDir}' - ${cfg.user} gitea - -"
"d '${cfg.repositoryRoot}' 0750 ${cfg.user} gitea - -"
"z '${cfg.repositoryRoot}' 0750 ${cfg.user} gitea - -"
"Z '${cfg.repositoryRoot}' - ${cfg.user} gitea - -"
"d '${cfg.stateDir}' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"d '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/custom/conf' 0750 ${cfg.user} gitea - -"
"z '${cfg.stateDir}/log' 0750 ${cfg.user} gitea - -"
"Z '${cfg.stateDir}' - ${cfg.user} gitea - -"
# If we have a folder or symlink with gitea locales, remove it
@ -431,28 +510,39 @@ in
User = cfg.user;
Group = "gitea";
WorkingDirectory = cfg.stateDir;
ExecStart = "${gitea}/bin/gitea web";
ExecStart = "${gitea}/bin/gitea web --pid /run/gitea/gitea.pid";
Restart = "always";
# Filesystem
# Runtime directory and mode
RuntimeDirectory = "gitea";
RuntimeDirectoryMode = "0755";
# Access write directories
ReadWritePaths = [ cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
UMask = "0027";
# Capabilities
CapabilityBoundingSet = "";
# Security
NoNewPrivileges = true;
# Sandboxing
ProtectSystem = "strict";
ProtectHome = true;
PrivateTmp = true;
PrivateDevices = true;
PrivateUsers = true;
ProtectHostname = true;
ProtectClock = true;
ProtectKernelTunables = true;
ProtectKernelModules = true;
ProtectKernelLogs = true;
ProtectControlGroups = true;
ReadWritePaths = cfg.stateDir;
# Caps
CapabilityBoundingSet = "";
NoNewPrivileges = true;
# Misc.
RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
LockPersonality = true;
RestrictRealtime = true;
PrivateMounts = true;
PrivateUsers = true;
MemoryDenyWriteExecute = true;
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @reboot @resources @setuid @swap";
RestrictRealtime = true;
RestrictSUIDSGID = true;
PrivateMounts = true;
# System Call Filtering
SystemCallArchitectures = "native";
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
SystemCallFilter = "~@clock @cpu-emulation @debug @keyring @memlock @module @mount @obsolete @raw-io @reboot @resources @setuid @swap";
};
environment = {
@ -504,7 +594,7 @@ in
Type = "oneshot";
User = cfg.user;
ExecStart = "${gitea}/bin/gitea dump";
WorkingDirectory = cfg.stateDir;
WorkingDirectory = cfg.dump.backupDir;
};
};

View file

@ -71,7 +71,7 @@ let
};
};
redisConfig.production.url = "redis://localhost:6379/";
redisConfig.production.url = cfg.redisUrl;
gitlabConfig = {
# These are the default settings from config/gitlab.example.yml
@ -311,6 +311,12 @@ in {
description = "Extra configuration in config/database.yml.";
};
redisUrl = mkOption {
type = types.str;
default = "redis://localhost:6379/";
description = "Redis URL for all GitLab services except gitlab-shell";
};
extraGitlabRb = mkOption {
type = types.str;
default = "";
@ -612,12 +618,19 @@ in {
enable = true;
ensureUsers = singleton { name = cfg.databaseUsername; };
};
# The postgresql module doesn't currently support concepts like
# objects owners and extensions; for now we tack on what's needed
# here.
systemd.services.postgresql.postStart = mkAfter (optionalString databaseActuallyCreateLocally ''
systemd.services.gitlab-postgresql = let pgsql = config.services.postgresql; in mkIf databaseActuallyCreateLocally {
after = [ "postgresql.service" ];
wantedBy = [ "multi-user.target" ];
path = [ pgsql.package ];
script = ''
set -eu
PSQL="${pkgs.utillinux}/bin/runuser -u ${pgsql.superUser} -- psql --port=${toString pgsql.port}"
$PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.databaseName}'" | grep -q 1 || $PSQL -tAc 'CREATE DATABASE "${cfg.databaseName}" OWNER "${cfg.databaseUsername}"'
current_owner=$($PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.databaseName}'")
if [[ "$current_owner" != "${cfg.databaseUsername}" ]]; then
@ -631,7 +644,12 @@ in {
rm "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}"
fi
$PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS pg_trgm"
'');
'';
serviceConfig = {
Type = "oneshot";
};
};
# Use postfix to send out mails.
services.postfix.enable = mkDefault true;
@ -678,7 +696,6 @@ in {
"L+ /run/gitlab/shell-config.yml - - - - ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)}"
"L+ ${cfg.statePath}/config/unicorn.rb - - - - ${./defaultUnicornConfig.rb}"
"L+ ${cfg.statePath}/config/initializers/extra-gitlab.rb - - - - ${extraGitlabRb}"
];
systemd.services.gitlab-sidekiq = {
@ -704,7 +721,7 @@ in {
TimeoutSec = "infinity";
Restart = "on-failure";
WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
ExecStart="${cfg.packages.gitlab.rubyEnv}/bin/sidekiq -C \"${cfg.packages.gitlab}/share/gitlab/config/sidekiq_queues.yml\" -e production -P ${cfg.statePath}/tmp/sidekiq.pid";
ExecStart="${cfg.packages.gitlab.rubyEnv}/bin/sidekiq -C \"${cfg.packages.gitlab}/share/gitlab/config/sidekiq_queues.yml\" -e production";
};
};
@ -761,7 +778,7 @@ in {
};
systemd.services.gitlab = {
after = [ "gitlab-workhorse.service" "gitaly.service" "network.target" "postgresql.service" "redis.service" ];
after = [ "gitlab-workhorse.service" "gitaly.service" "network.target" "gitlab-postgresql.service" "redis.service" ];
requires = [ "gitlab-sidekiq.service" ];
wantedBy = [ "multi-user.target" ];
environment = gitlabEnv;
@ -798,6 +815,7 @@ in {
rm -f ${cfg.statePath}/lib
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
ln -sf ${extraGitlabRb} ${cfg.statePath}/config/initializers/extra-gitlab.rb
${cfg.packages.gitlab-shell}/bin/install

View file

@ -98,7 +98,7 @@ in
${pkgs.gollum}/bin/gollum \
--port ${toString cfg.port} \
--host ${cfg.address} \
--config ${builtins.toFile "gollum-config.rb" cfg.extraConfig} \
--config ${pkgs.writeText "gollum-config.rb" cfg.extraConfig} \
--ref ${cfg.branch} \
${optionalString cfg.mathjax "--mathjax"} \
${optionalString cfg.emoji "--emoji"} \

View file

@ -16,6 +16,14 @@ in
description = "User account under which Jellyfin runs.";
};
package = mkOption {
type = types.package;
example = literalExample "pkgs.jellyfin";
description = ''
Jellyfin package to use.
'';
};
group = mkOption {
type = types.str;
default = "jellyfin";
@ -35,11 +43,16 @@ in
Group = cfg.group;
StateDirectory = "jellyfin";
CacheDirectory = "jellyfin";
ExecStart = "${pkgs.jellyfin}/bin/jellyfin --datadir '/var/lib/${StateDirectory}' --cachedir '/var/cache/${CacheDirectory}'";
ExecStart = "${cfg.package}/bin/jellyfin --datadir '/var/lib/${StateDirectory}' --cachedir '/var/cache/${CacheDirectory}'";
Restart = "on-failure";
};
};
services.jellyfin.package = mkDefault (
if versionAtLeast config.system.stateVersion "20.09" then pkgs.jellyfin
else pkgs.jellyfin_10_5
);
users.users = mkIf (cfg.user == "jellyfin") {
jellyfin = {
group = cfg.group;

Some files were not shown because too many files have changed in this diff Show more