Project import generated by Copybara.
GitOrigin-RevId: 5bb20f9dc70e9ee16e21cc404b6508654931ce41
This commit is contained in:
parent
68d7e71424
commit
73a29e0b97
348 changed files with 4042 additions and 4341 deletions
|
@ -224,7 +224,7 @@ There are a few naming guidelines:
|
|||
|
||||
- Dashes in the package name _should_ be preserved in new variable names, rather than converted to underscores or camel cased — e.g., `http-parser` instead of `http_parser` or `httpParser`. The hyphenated style is preferred in all three package names.
|
||||
|
||||
- If there are multiple versions of a package, this _should_ be reflected in the variable names in `all-packages.nix`, e.g. `json-c-0-9` and `json-c-0-11`. If there is an obvious “default” version, make an attribute like `json-c = json-c-0-9;`. See also [](#sec-versioning)
|
||||
- If there are multiple versions of a package, this _should_ be reflected in the variable names in `all-packages.nix`, e.g. `json-c_0_9` and `json-c_0_11`. If there is an obvious “default” version, make an attribute like `json-c = json-c_0_9;`. See also [](#sec-versioning)
|
||||
|
||||
## File naming and organisation {#sec-organisation}
|
||||
|
||||
|
|
|
@ -32,9 +32,9 @@ Given that most of the OCaml ecosystem is now built with dune, nixpkgs includes
|
|||
|
||||
Here is a simple package example.
|
||||
|
||||
- It defines an (optional) attribute `minimalOCamlVersion` that will be used to
|
||||
throw a descriptive evaluation error if building with an older OCaml is
|
||||
attempted.
|
||||
- It defines an (optional) attribute `minimalOCamlVersion` (see note below)
|
||||
that will be used to throw a descriptive evaluation error if building with
|
||||
an older OCaml is attempted.
|
||||
|
||||
- It uses the `fetchFromGitHub` fetcher to get its source.
|
||||
|
||||
|
@ -117,3 +117,11 @@ buildDunePackage rec {
|
|||
};
|
||||
}
|
||||
```
|
||||
|
||||
Note about `minimalOCamlVersion`. A deprecated version of this argument was
|
||||
spelled `minimumOCamlVersion`; setting the old attribute wrongly modifies the
|
||||
derivation hash and is therefore inappropriate. As a technical dept, currently
|
||||
packaged libraries may still use the old spelling: maintainers are invited to
|
||||
fix this when updating packages. Massive renaming is strongly discouraged as it
|
||||
would be challenging to review, difficult to test, and will cause unnecessary
|
||||
rebuild.
|
||||
|
|
|
@ -192,10 +192,6 @@ meta.hydraPlatforms = [];
|
|||
|
||||
If set to `true`, the package is marked as "broken", meaning that it won’t show up in `nix-env -qa`, and cannot be built or installed. Such packages should be removed from Nixpkgs eventually unless they are fixed.
|
||||
|
||||
### `updateWalker` {#var-meta-updateWalker}
|
||||
|
||||
If set to `true`, the package is tested to be updated correctly by the `update-walker.sh` script without additional settings. Such packages have `meta.version` set and their homepage (or the page specified by `meta.downloadPage`) contains a direct link to the package tarball.
|
||||
|
||||
## Licenses {#sec-meta-license}
|
||||
|
||||
The `meta.license` attribute should preferably contain a value from `lib.licenses` defined in [`nixpkgs/lib/licenses.nix`](https://github.com/NixOS/nixpkgs/blob/master/lib/licenses.nix), or in-place license description of the same format if the license is unlikely to be useful in another expression.
|
||||
|
|
6
third_party/nixpkgs/flake.nix
vendored
6
third_party/nixpkgs/flake.nix
vendored
|
@ -48,10 +48,6 @@
|
|||
system.nixos.versionSuffix =
|
||||
".${final.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}.${self.shortRev or "dirty"}";
|
||||
system.nixos.revision = final.mkIf (self ? rev) self.rev;
|
||||
|
||||
# NOTE: This assumes that `nixpkgs.config` is _not_ used when
|
||||
# nixpkgs.pkgs is set OR _module.args.pkgs is set.
|
||||
nixpkgs.config.path = self.outPath;
|
||||
}
|
||||
];
|
||||
});
|
||||
|
@ -66,7 +62,7 @@
|
|||
}).nixos.manual.x86_64-linux;
|
||||
};
|
||||
|
||||
legacyPackages = forAllSystems (system: import ./. { inherit system; config.path = self.outPath; });
|
||||
legacyPackages = forAllSystems (system: import ./. { inherit system; });
|
||||
|
||||
nixosModules = {
|
||||
notDetected = import ./nixos/modules/installer/scan/not-detected.nix;
|
||||
|
|
|
@ -6068,6 +6068,16 @@
|
|||
githubId = 2396926;
|
||||
name = "Justin Woo";
|
||||
};
|
||||
jvanbruegge = {
|
||||
email = "supermanitu@gmail.com";
|
||||
github = "jvanbruegge";
|
||||
githubId = 1529052;
|
||||
name = "Jan van Brügge";
|
||||
keys = [{
|
||||
longkeyid = "rsa4096/0x366572BE7D6C78A2";
|
||||
fingerprint = "3513 5CE5 77AD 711F 3825 9A99 3665 72BE 7D6C 78A2";
|
||||
}];
|
||||
};
|
||||
jwatt = {
|
||||
email = "jwatt@broken.watch";
|
||||
github = "jjwatt";
|
||||
|
|
|
@ -5,7 +5,7 @@ when developing or debugging a test:
|
|||
|
||||
```ShellSession
|
||||
$ nix-build . -A nixosTests.login.driverInteractive
|
||||
$ ./result/bin/nixos-test-driver --interactive
|
||||
$ ./result/bin/nixos-test-driver
|
||||
[...]
|
||||
>>>
|
||||
```
|
||||
|
@ -28,7 +28,7 @@ You can re-use the VM states coming from a previous run by setting the
|
|||
`--keep-vm-state` flag.
|
||||
|
||||
```ShellSession
|
||||
$ ./result/bin/nixos-test-driver --interactive --keep-vm-state
|
||||
$ ./result/bin/nixos-test-driver --keep-vm-state
|
||||
```
|
||||
|
||||
The machine state is stored in the `$TMPDIR/vm-state-machinename`
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
</para>
|
||||
<programlisting>
|
||||
$ nix-build . -A nixosTests.login.driverInteractive
|
||||
$ ./result/bin/nixos-test-driver --interactive
|
||||
$ ./result/bin/nixos-test-driver
|
||||
[...]
|
||||
>>>
|
||||
</programlisting>
|
||||
|
@ -30,7 +30,7 @@ $ ./result/bin/nixos-test-driver --interactive
|
|||
the <literal>--keep-vm-state</literal> flag.
|
||||
</para>
|
||||
<programlisting>
|
||||
$ ./result/bin/nixos-test-driver --interactive --keep-vm-state
|
||||
$ ./result/bin/nixos-test-driver --keep-vm-state
|
||||
</programlisting>
|
||||
<para>
|
||||
The machine state is stored in the
|
||||
|
|
|
@ -442,14 +442,50 @@
|
|||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The interface that allows activation scripts to restart units
|
||||
has been reworked. Restarting and reloading is now done by a
|
||||
single file
|
||||
<literal>switch-to-configuration</literal> (the script that is
|
||||
run when running <literal>nixos-rebuild switch</literal> for
|
||||
example) has been reworked
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
The interface that allows activation scripts to restart
|
||||
units has been streamlined. Restarting and reloading is
|
||||
now done by a single file
|
||||
<literal>/run/nixos/activation-restart-list</literal> that
|
||||
honors <literal>restartIfChanged</literal> and
|
||||
<literal>reloadIfChanged</literal> of the units.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The script now uses a proper ini-file parser to parse
|
||||
systemd units. Some values are now only searched in one
|
||||
section instead of in the entire unit. This is only
|
||||
relevant for units that don’t use the NixOS systemd moule.
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>RefuseManualStop</literal>,
|
||||
<literal>X-OnlyManualStart</literal>,
|
||||
<literal>X-StopOnRemoval</literal>,
|
||||
<literal>X-StopOnReconfiguration</literal> are only
|
||||
searched in the <literal>[Unit]</literal> section
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>X-ReloadIfChanged</literal>,
|
||||
<literal>X-RestartIfChanged</literal>,
|
||||
<literal>X-StopIfChanged</literal> are only searched
|
||||
in the <literal>[Service]</literal> section
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>services.bookstack.cacheDir</literal> option has
|
||||
|
@ -531,6 +567,29 @@
|
|||
was added.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The configuration portion of the <literal>nix-daemon</literal>
|
||||
module has been reworked and exposed as
|
||||
<link xlink:href="options.html#opt-nix-settings">nix.settings</link>:
|
||||
</para>
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
Legacy options have been mapped to the corresponding
|
||||
options under under
|
||||
<link xlink:href="options.html#opt-nix.settings">nix.settings</link>
|
||||
but may be deprecated in the future.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<link xlink:href="options.html#opt-nix.buildMachines.publicHostKey">nix.buildMachines.publicHostKey</link>
|
||||
has been added.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The
|
||||
|
@ -541,6 +600,19 @@
|
|||
using the PyPy interpreter were added.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If you are using Wayland you can choose to use the Ozone
|
||||
Wayland support in Chrome and several Electron apps by setting
|
||||
the environment variable <literal>NIXOS_OZONE_WL=1</literal>
|
||||
(for example via
|
||||
<literal>environment.sessionVariables.NIXOS_OZONE_WL = "1"</literal>).
|
||||
This is not enabled by default because Ozone Wayland is still
|
||||
under heavy development and behavior is not always flawless.
|
||||
Furthermore, not all Electron apps use the latest Electron
|
||||
versions.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The <literal>influxdb2</literal> package was split into
|
||||
|
@ -712,6 +784,15 @@
|
|||
warning.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>programs.tmux</literal> has a new option
|
||||
<literal>plugins</literal> that accepts a list of packages
|
||||
from the <literal>tmuxPlugins</literal> group. The specified
|
||||
packages are added to the system and loaded by
|
||||
<literal>tmux</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</section>
|
||||
</section>
|
||||
|
|
|
@ -453,7 +453,7 @@
|
|||
Allow ad-hoc remote builders for building the new system. This requires
|
||||
the user executing <command>nixos-rebuild</command> (usually root) to be
|
||||
configured as a trusted user in the Nix daemon. This can be achieved by
|
||||
using the <literal>nix.trustedUsers</literal> NixOS option. Examples
|
||||
using the <literal>nix.settings.trusted-users</literal> NixOS option. Examples
|
||||
values for that option are described in the <literal>Remote builds
|
||||
chapter</literal> in the Nix manual, (i.e. <command>--builders
|
||||
"ssh://bigbrother x86_64-linux"</command>). By specifying an empty string
|
||||
|
|
|
@ -60,7 +60,7 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
## Backward Incompatibilities {#sec-release-22.05-incompatibilities}
|
||||
|
||||
- `pkgs.ghc` now refers to `pkgs.targetPackages.haskellPackages.ghc`.
|
||||
This *only* makes a difference if you are cross-compiling and will
|
||||
This _only_ makes a difference if you are cross-compiling and will
|
||||
ensure that `pkgs.ghc` always runs on the host platform and compiles
|
||||
for the target platform (similar to `pkgs.gcc` for example).
|
||||
`haskellPackages.ghc` still behaves as before, running on the build
|
||||
|
@ -141,7 +141,11 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
`pkgs.noto-fonts-cjk` is currently an alias of `pkgs.noto-fonts-cjk-sans` and
|
||||
doesn't include serif fonts.
|
||||
|
||||
- The interface that allows activation scripts to restart units has been reworked. Restarting and reloading is now done by a single file `/run/nixos/activation-restart-list` that honors `restartIfChanged` and `reloadIfChanged` of the units.
|
||||
- `switch-to-configuration` (the script that is run when running `nixos-rebuild switch` for example) has been reworked
|
||||
* The interface that allows activation scripts to restart units has been streamlined. Restarting and reloading is now done by a single file `/run/nixos/activation-restart-list` that honors `restartIfChanged` and `reloadIfChanged` of the units.
|
||||
* The script now uses a proper ini-file parser to parse systemd units. Some values are now only searched in one section instead of in the entire unit. This is only relevant for units that don't use the NixOS systemd moule.
|
||||
* `RefuseManualStop`, `X-OnlyManualStart`, `X-StopOnRemoval`, `X-StopOnReconfiguration` are only searched in the `[Unit]` section
|
||||
* `X-ReloadIfChanged`, `X-RestartIfChanged`, `X-StopIfChanged` are only searched in the `[Service]` section
|
||||
|
||||
- The `services.bookstack.cacheDir` option has been removed, since the
|
||||
cache directory is now handled by systemd.
|
||||
|
@ -183,8 +187,20 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
|
||||
Similarly [virtualisation.vmVariantWithBootloader](#opt-virtualisation.vmVariantWithBootLoader) was added.
|
||||
|
||||
- The configuration portion of the `nix-daemon` module has been reworked and exposed as [nix.settings](options.html#opt-nix-settings):
|
||||
* Legacy options have been mapped to the corresponding options under under [nix.settings](options.html#opt-nix.settings) but may be deprecated in the future.
|
||||
* [nix.buildMachines.publicHostKey](options.html#opt-nix.buildMachines.publicHostKey) has been added.
|
||||
|
||||
- The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
|
||||
|
||||
- If you are using Wayland you can choose to use the Ozone Wayland support
|
||||
in Chrome and several Electron apps by setting the environment variable
|
||||
`NIXOS_OZONE_WL=1` (for example via
|
||||
`environment.sessionVariables.NIXOS_OZONE_WL = "1"`).
|
||||
This is not enabled by default because Ozone Wayland is
|
||||
still under heavy development and behavior is not always flawless.
|
||||
Furthermore, not all Electron apps use the latest Electron versions.
|
||||
|
||||
- The `influxdb2` package was split into `influxdb2-server` and
|
||||
`influxdb2-cli`, matching the split that took place upstream. A
|
||||
combined `influxdb2` package is still provided in this release for
|
||||
|
@ -236,11 +252,14 @@ In addition to numerous new and upgraded packages, this release has the followin
|
|||
Plugins are automatically repackaged using autoPatchelf.
|
||||
|
||||
- The `zrepl` package has been updated from 0.4.0 to 0.5:
|
||||
* The RPC protocol version was bumped; all zrepl daemons in a setup must be updated and restarted before replication can resume.
|
||||
* A bug involving encrypt-on-receive has been fixed. Read the [zrepl documentation](https://zrepl.github.io/configuration/sendrecvoptions.html#job-recv-options-placeholder) and check the output of `zfs get -r encryption,zrepl:placeholder PATH_TO_ROOTFS` on the receiver.
|
||||
|
||||
- The RPC protocol version was bumped; all zrepl daemons in a setup must be updated and restarted before replication can resume.
|
||||
- A bug involving encrypt-on-receive has been fixed. Read the [zrepl documentation](https://zrepl.github.io/configuration/sendrecvoptions.html#job-recv-options-placeholder) and check the output of `zfs get -r encryption,zrepl:placeholder PATH_TO_ROOTFS` on the receiver.
|
||||
|
||||
- Renamed option `services.openssh.challengeResponseAuthentication` to `services.openssh.kbdInteractiveAuthentication`.
|
||||
Reason is that the old name has been deprecated upstream.
|
||||
Using the old option name will still work, but produce a warning.
|
||||
|
||||
- `programs.tmux` has a new option `plugins` that accepts a list of packages from the `tmuxPlugins` group. The specified packages are added to the system and loaded by `tmux`.
|
||||
|
||||
<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
|
||||
|
|
|
@ -98,7 +98,7 @@ in rec {
|
|||
|
||||
description = mkOption {
|
||||
default = "";
|
||||
type = types.str;
|
||||
type = types.singleLineStr;
|
||||
description = "Description of this unit used in systemd messages and progress indicators.";
|
||||
};
|
||||
|
||||
|
|
|
@ -33,6 +33,22 @@ class EnvDefault(argparse.Action):
|
|||
setattr(namespace, self.dest, values)
|
||||
|
||||
|
||||
def writeable_dir(arg: str) -> Path:
|
||||
"""Raises an ArgumentTypeError if the given argument isn't a writeable directory
|
||||
Note: We want to fail as early as possible if a directory isn't writeable,
|
||||
since an executed nixos-test could fail (very late) because of the test-driver
|
||||
writing in a directory without proper permissions.
|
||||
"""
|
||||
path = Path(arg)
|
||||
if not path.is_dir():
|
||||
raise argparse.ArgumentTypeError("{0} is not a directory".format(path))
|
||||
if not os.access(path, os.W_OK):
|
||||
raise argparse.ArgumentTypeError(
|
||||
"{0} is not a writeable directory".format(path)
|
||||
)
|
||||
return path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
|
||||
arg_parser.add_argument(
|
||||
|
@ -45,7 +61,7 @@ def main() -> None:
|
|||
"-I",
|
||||
"--interactive",
|
||||
help="drop into a python repl and run the tests interactively",
|
||||
action="store_true",
|
||||
action=argparse.BooleanOptionalAction,
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"--start-scripts",
|
||||
|
@ -63,6 +79,14 @@ def main() -> None:
|
|||
nargs="*",
|
||||
help="vlans to span by the driver",
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"-o",
|
||||
"--output_directory",
|
||||
help="""The path to the directory where outputs copied from the VM will be placed.
|
||||
By e.g. Machine.copy_from_vm or Machine.screenshot""",
|
||||
default=Path.cwd(),
|
||||
type=writeable_dir,
|
||||
)
|
||||
arg_parser.add_argument(
|
||||
"testscript",
|
||||
action=EnvDefault,
|
||||
|
@ -77,7 +101,11 @@ def main() -> None:
|
|||
rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
|
||||
|
||||
with Driver(
|
||||
args.start_scripts, args.vlans, args.testscript.read_text(), args.keep_vm_state
|
||||
args.start_scripts,
|
||||
args.vlans,
|
||||
args.testscript.read_text(),
|
||||
args.output_directory.resolve(),
|
||||
args.keep_vm_state,
|
||||
) as driver:
|
||||
if args.interactive:
|
||||
ptpython.repl.embed(driver.test_symbols(), {})
|
||||
|
@ -94,7 +122,7 @@ def generate_driver_symbols() -> None:
|
|||
in user's test scripts. That list is then used by pyflakes to lint those
|
||||
scripts.
|
||||
"""
|
||||
d = Driver([], [], "")
|
||||
d = Driver([], [], "", Path())
|
||||
test_symbols = d.test_symbols()
|
||||
with open("driver-symbols", "w") as fp:
|
||||
fp.write(",".join(test_symbols.keys()))
|
||||
|
|
|
@ -10,6 +10,28 @@ from test_driver.vlan import VLan
|
|||
from test_driver.polling_condition import PollingCondition
|
||||
|
||||
|
||||
def get_tmp_dir() -> Path:
|
||||
"""Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD
|
||||
Raises an exception in case the retrieved temporary directory is not writeable
|
||||
See https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
|
||||
"""
|
||||
tmp_dir = Path(tempfile.gettempdir())
|
||||
tmp_dir.mkdir(mode=0o700, exist_ok=True)
|
||||
if not tmp_dir.is_dir():
|
||||
raise NotADirectoryError(
|
||||
"The directory defined by TMPDIR, TEMP, TMP or CWD: {0} is not a directory".format(
|
||||
tmp_dir
|
||||
)
|
||||
)
|
||||
if not os.access(tmp_dir, os.W_OK):
|
||||
raise PermissionError(
|
||||
"The directory defined by TMPDIR, TEMP, TMP, or CWD: {0} is not writeable".format(
|
||||
tmp_dir
|
||||
)
|
||||
)
|
||||
return tmp_dir
|
||||
|
||||
|
||||
class Driver:
|
||||
"""A handle to the driver that sets up the environment
|
||||
and runs the tests"""
|
||||
|
@ -24,12 +46,13 @@ class Driver:
|
|||
start_scripts: List[str],
|
||||
vlans: List[int],
|
||||
tests: str,
|
||||
out_dir: Path,
|
||||
keep_vm_state: bool = False,
|
||||
):
|
||||
self.tests = tests
|
||||
self.out_dir = out_dir
|
||||
|
||||
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
|
||||
tmp_dir.mkdir(mode=0o700, exist_ok=True)
|
||||
tmp_dir = get_tmp_dir()
|
||||
|
||||
with rootlog.nested("start all VLans"):
|
||||
self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
|
||||
|
@ -47,6 +70,7 @@ class Driver:
|
|||
name=cmd.machine_name,
|
||||
tmp_dir=tmp_dir,
|
||||
callbacks=[self.check_polling_conditions],
|
||||
out_dir=self.out_dir,
|
||||
)
|
||||
for cmd in cmd(start_scripts)
|
||||
]
|
||||
|
@ -141,8 +165,8 @@ class Driver:
|
|||
"Using legacy create_machine(), please instantiate the"
|
||||
"Machine class directly, instead"
|
||||
)
|
||||
tmp_dir = Path(os.environ.get("TMPDIR", tempfile.gettempdir()))
|
||||
tmp_dir.mkdir(mode=0o700, exist_ok=True)
|
||||
|
||||
tmp_dir = get_tmp_dir()
|
||||
|
||||
if args.get("startCommand"):
|
||||
start_command: str = args.get("startCommand", "")
|
||||
|
@ -154,6 +178,7 @@ class Driver:
|
|||
|
||||
return Machine(
|
||||
tmp_dir=tmp_dir,
|
||||
out_dir=self.out_dir,
|
||||
start_command=cmd,
|
||||
name=name,
|
||||
keep_vm_state=args.get("keep_vm_state", False),
|
||||
|
|
|
@ -297,6 +297,7 @@ class Machine:
|
|||
the machine lifecycle with the help of a start script / command."""
|
||||
|
||||
name: str
|
||||
out_dir: Path
|
||||
tmp_dir: Path
|
||||
shared_dir: Path
|
||||
state_dir: Path
|
||||
|
@ -325,6 +326,7 @@ class Machine:
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
out_dir: Path,
|
||||
tmp_dir: Path,
|
||||
start_command: StartCommand,
|
||||
name: str = "machine",
|
||||
|
@ -332,6 +334,7 @@ class Machine:
|
|||
allow_reboot: bool = False,
|
||||
callbacks: Optional[List[Callable]] = None,
|
||||
) -> None:
|
||||
self.out_dir = out_dir
|
||||
self.tmp_dir = tmp_dir
|
||||
self.keep_vm_state = keep_vm_state
|
||||
self.allow_reboot = allow_reboot
|
||||
|
@ -702,10 +705,9 @@ class Machine:
|
|||
self.connected = True
|
||||
|
||||
def screenshot(self, filename: str) -> None:
|
||||
out_dir = os.environ.get("out", os.getcwd())
|
||||
word_pattern = re.compile(r"^\w+$")
|
||||
if word_pattern.match(filename):
|
||||
filename = os.path.join(out_dir, "{}.png".format(filename))
|
||||
filename = os.path.join(self.out_dir, "{}.png".format(filename))
|
||||
tmp = "{}.ppm".format(filename)
|
||||
|
||||
with self.nested(
|
||||
|
@ -756,7 +758,6 @@ class Machine:
|
|||
all the VMs (using a temporary directory).
|
||||
"""
|
||||
# Compute the source, target, and intermediate shared file names
|
||||
out_dir = Path(os.environ.get("out", os.getcwd()))
|
||||
vm_src = Path(source)
|
||||
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
|
||||
shared_temp = Path(shared_td)
|
||||
|
@ -766,7 +767,7 @@ class Machine:
|
|||
# Copy the file to the shared directory inside VM
|
||||
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
|
||||
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
|
||||
abs_target = out_dir / target_dir / vm_src.name
|
||||
abs_target = self.out_dir / target_dir / vm_src.name
|
||||
abs_target.parent.mkdir(exist_ok=True, parents=True)
|
||||
# Copy the file from the shared directory outside VM
|
||||
if intermediate.is_dir():
|
||||
|
|
|
@ -30,7 +30,7 @@ rec {
|
|||
# effectively mute the XMLLogger
|
||||
export LOGFILE=/dev/null
|
||||
|
||||
${driver}/bin/nixos-test-driver
|
||||
${driver}/bin/nixos-test-driver -o $out
|
||||
'';
|
||||
|
||||
passthru = driver.passthru // {
|
||||
|
@ -51,6 +51,7 @@ rec {
|
|||
, enableOCR ? false
|
||||
, skipLint ? false
|
||||
, passthru ? {}
|
||||
, interactive ? false
|
||||
}:
|
||||
let
|
||||
# Reifies and correctly wraps the python test driver for
|
||||
|
@ -139,7 +140,8 @@ rec {
|
|||
wrapProgram $out/bin/nixos-test-driver \
|
||||
--set startScripts "''${vmStartScripts[*]}" \
|
||||
--set testScript "$out/test-script" \
|
||||
--set vlans '${toString vlans}'
|
||||
--set vlans '${toString vlans}' \
|
||||
${lib.optionalString (interactive) "--add-flags --interactive"}
|
||||
'');
|
||||
|
||||
# Make a full-blown test
|
||||
|
@ -217,6 +219,7 @@ rec {
|
|||
testName = name;
|
||||
qemu_pkg = pkgs.qemu;
|
||||
nodes = nodes pkgs.qemu;
|
||||
interactive = true;
|
||||
};
|
||||
|
||||
test =
|
||||
|
|
|
@ -17,7 +17,7 @@ in
|
|||
description = "Azure NixOS Test User";
|
||||
openssh.authorizedKeys.keys = [ (builtins.readFile ~/.ssh/id_ed25519.pub) ];
|
||||
};
|
||||
nix.trustedUsers = [ username ];
|
||||
nix.settings.trusted-users = [ username ];
|
||||
|
||||
virtualisation.azureImage.diskSize = 2500;
|
||||
|
||||
|
|
|
@ -46,25 +46,36 @@ with lib;
|
|||
let
|
||||
cfg = config.xdg.portal;
|
||||
packages = [ pkgs.xdg-desktop-portal ] ++ cfg.extraPortals;
|
||||
joinedPortals = pkgs.symlinkJoin {
|
||||
joinedPortals = pkgs.buildEnv {
|
||||
name = "xdg-portals";
|
||||
paths = cfg.extraPortals;
|
||||
paths = packages;
|
||||
pathsToLink = [ "/share/xdg-desktop-portal/portals" "/share/applications" ];
|
||||
};
|
||||
|
||||
in mkIf cfg.enable {
|
||||
in
|
||||
mkIf cfg.enable {
|
||||
|
||||
assertions = [
|
||||
{ assertion = (cfg.gtkUsePortal -> cfg.extraPortals != []);
|
||||
message = "Setting xdg.portal.gtkUsePortal to true requires a portal implementation in xdg.portal.extraPortals such as xdg-desktop-portal-gtk or xdg-desktop-portal-kde.";
|
||||
{
|
||||
assertion = cfg.extraPortals != [ ];
|
||||
message = "Setting xdg.portal.enable to true requires a portal implementation in xdg.portal.extraPortals such as xdg-desktop-portal-gtk or xdg-desktop-portal-kde.";
|
||||
}
|
||||
];
|
||||
|
||||
services.dbus.packages = packages;
|
||||
systemd.packages = packages;
|
||||
|
||||
environment.sessionVariables = {
|
||||
environment = {
|
||||
# fixes screen sharing on plasmawayland on non-chromium apps by linking
|
||||
# share/applications/*.desktop files
|
||||
# see https://github.com/NixOS/nixpkgs/issues/145174
|
||||
systemPackages = [ joinedPortals ];
|
||||
pathsToLink = [ "/share/applications" ];
|
||||
|
||||
sessionVariables = {
|
||||
GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1";
|
||||
XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ with lib;
|
|||
|
||||
installer.cloneConfigExtra = ''
|
||||
# Let demo build as a trusted user.
|
||||
# nix.trustedUsers = [ "demo" ];
|
||||
# nix.settings.trusted-users = [ "demo" ];
|
||||
|
||||
# Mount a VirtualBox shared folder.
|
||||
# This is configurable in the VirtualBox menu at
|
||||
|
|
|
@ -61,85 +61,17 @@ let
|
|||
in scrubbedEval.options;
|
||||
baseOptionsJSON =
|
||||
let
|
||||
filterIntoStore =
|
||||
filter =
|
||||
builtins.filterSource
|
||||
(n: t:
|
||||
(t == "directory" -> baseNameOf n != "tests")
|
||||
&& (t == "file" -> hasSuffix ".nix" n)
|
||||
);
|
||||
|
||||
# Figure out if Nix runs in pure evaluation mode. May return true in
|
||||
# impure mode, but this is highly unlikely.
|
||||
# We need to know because of https://github.com/NixOS/nix/issues/1888
|
||||
# and https://github.com/NixOS/nix/issues/5868
|
||||
isPureEval = builtins.getEnv "PATH" == "" && builtins.getEnv "_" == "";
|
||||
|
||||
# Return a nixpkgs subpath with minimal copying.
|
||||
#
|
||||
# The sources for the base options json derivation can come in one of
|
||||
# two forms:
|
||||
# - single source: a store path with all of nixpkgs, postfix with
|
||||
# subpaths to access various directories. This has the benefit of
|
||||
# not creating copies of these subtrees in the Nix store, but
|
||||
# can cause unnecessary rebuilds if you update the Nixpkgs `pkgs`
|
||||
# tree often.
|
||||
# - split sources: multiple store paths with subdirectories of
|
||||
# nixpkgs that exclude the bulk of the pkgs directory.
|
||||
# This requires more copying and hashing during evaluation but
|
||||
# requires fewer files to be copied. This method produces fewer
|
||||
# unnecessary rebuilds of the base options json.
|
||||
#
|
||||
# Flake
|
||||
#
|
||||
# Flakes always put a copy of the full nixpkgs sources in the store,
|
||||
# so we can use the "single source" method. This method is ideal
|
||||
# for using nixpkgs as a dependency, as the base options json will be
|
||||
# substitutable from cache.nixos.org.
|
||||
#
|
||||
# This requires that the `self.outPath` is wired into `pkgs` correctly,
|
||||
# which is done for you if `pkgs` comes from the `lib.nixosSystem` or
|
||||
# `legacyPackages` flake attributes.
|
||||
#
|
||||
# Other Nixpkgs invocation
|
||||
#
|
||||
# If you do not use the known-correct flake attributes, but rather
|
||||
# invoke Nixpkgs yourself, set `config.path` to the correct path value,
|
||||
# e.g. `import nixpkgs { config.path = nixpkgs; }`.
|
||||
#
|
||||
# Choosing between single or split source paths
|
||||
#
|
||||
# We make assumptions based on the type and contents of `pkgs.path`.
|
||||
# By passing a different `config.path` to Nixpkgs, you can influence
|
||||
# how your documentation cache is evaluated and rebuilt.
|
||||
#
|
||||
# Single source
|
||||
# - If pkgs.path is a string containing a store path, the code has no
|
||||
# choice but to create this store path, if it hasn't already been.
|
||||
# We assume that the "single source" method is most efficient.
|
||||
# - If pkgs.path is a path value containing that is a store path,
|
||||
# we try to convert it to a string with context without copying.
|
||||
# This occurs for example when nixpkgs was fetched and using its
|
||||
# default `config.path`, which is `./.`.
|
||||
# Nix currently does not allow this conversion when evaluating in
|
||||
# pure mode. If the conversion is not possible, we use the
|
||||
# "split source" method.
|
||||
#
|
||||
# Split source
|
||||
# - If pkgs.path is a path value that is not a store path, we assume
|
||||
# that it's unlikely for all of nixpkgs to end up in the store for
|
||||
# other reasons and try to keep both the copying and rebuilds low.
|
||||
pull =
|
||||
if builtins.typeOf pkgs.path == "string" && isStorePath pkgs.path then
|
||||
dir: "${pkgs.path}/${dir}"
|
||||
else if !isPureEval && isStorePath pkgs.path then
|
||||
dir: "${builtins.storePath pkgs.path}/${dir}"
|
||||
else
|
||||
dir: filterIntoStore "${toString pkgs.path}/${dir}";
|
||||
in
|
||||
pkgs.runCommand "lazy-options.json" {
|
||||
libPath = pull "lib";
|
||||
pkgsLibPath = pull "pkgs/pkgs-lib";
|
||||
nixosPath = pull "nixos";
|
||||
libPath = filter "${toString pkgs.path}/lib";
|
||||
pkgsLibPath = filter "${toString pkgs.path}/pkgs/pkgs-lib";
|
||||
nixosPath = filter "${toString pkgs.path}/nixos";
|
||||
modules = map (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy;
|
||||
} ''
|
||||
export NIX_STORE_DIR=$TMPDIR/store
|
||||
|
|
|
@ -59,8 +59,6 @@ let
|
|||
inherit (cfg) config overlays localSystem crossSystem;
|
||||
};
|
||||
|
||||
# NOTE: flake.nix assumes that nixpkgs.config is only used with ../../..
|
||||
# as nixpkgs.config.path should be equivalent to ../../..
|
||||
finalPkgs = if opt.pkgs.isDefined then cfg.pkgs.appendOverlays cfg.overlays else defaultPkgs;
|
||||
|
||||
in
|
||||
|
|
|
@ -852,7 +852,6 @@
|
|||
./services/networking/quassel.nix
|
||||
./services/networking/quorum.nix
|
||||
./services/networking/quicktun.nix
|
||||
./services/networking/racoon.nix
|
||||
./services/networking/radicale.nix
|
||||
./services/networking/radvd.nix
|
||||
./services/networking/rdnssd.nix
|
||||
|
|
|
@ -17,7 +17,7 @@ with lib;
|
|||
|
||||
boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened;
|
||||
|
||||
nix.allowedUsers = mkDefault [ "@users" ];
|
||||
nix.settings.allowed-users = mkDefault [ "@users" ];
|
||||
|
||||
environment.memoryAllocator.provider = mkDefault "scudo";
|
||||
environment.variables.SCUDO_OPTIONS = mkDefault "ZeroContents=1";
|
||||
|
|
|
@ -52,6 +52,12 @@ let
|
|||
set -s escape-time ${toString cfg.escapeTime}
|
||||
set -g history-limit ${toString cfg.historyLimit}
|
||||
|
||||
${lib.optionalString (cfg.plugins != []) ''
|
||||
# Run plugins
|
||||
${lib.concatMapStringsSep "\n" (x: "run-shell ${x.rtp}") cfg.plugins}
|
||||
|
||||
''}
|
||||
|
||||
${cfg.extraConfig}
|
||||
'';
|
||||
|
||||
|
@ -165,6 +171,13 @@ in {
|
|||
downside it doesn't survive user logout.
|
||||
'';
|
||||
};
|
||||
|
||||
plugins = mkOption {
|
||||
default = [];
|
||||
type = types.listOf types.package;
|
||||
description = "List of plugins to install.";
|
||||
example = lib.literalExpression "[ pkgs.tmuxPlugins.nord ]";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -174,7 +187,7 @@ in {
|
|||
environment = {
|
||||
etc."tmux.conf".text = tmuxConf;
|
||||
|
||||
systemPackages = [ pkgs.tmux ];
|
||||
systemPackages = [ pkgs.tmux ] ++ cfg.plugins;
|
||||
|
||||
variables = {
|
||||
TMUX_TMPDIR = lib.optional cfg.secureSocket ''''${XDG_RUNTIME_DIR:-"/run/user/$(id -u)"}'';
|
||||
|
|
3
third_party/nixpkgs/nixos/modules/rename.nix
vendored
3
third_party/nixpkgs/nixos/modules/rename.nix
vendored
|
@ -80,6 +80,9 @@ with lib;
|
|||
libinput and synaptics.
|
||||
'')
|
||||
(mkRemovedOptionModule [ "virtualisation" "rkt" ] "The rkt module has been removed, it was archived by upstream")
|
||||
(mkRemovedOptionModule [ "services" "racoon" ] ''
|
||||
The racoon module has been removed, because the software project was abandoned upstream.
|
||||
'')
|
||||
|
||||
# Do NOT add any option renames here, see top of the file
|
||||
];
|
||||
|
|
|
@ -123,8 +123,8 @@ with lib;
|
|||
boot.kernel.sysctl."user.max_user_namespaces" = 0;
|
||||
|
||||
assertions = [
|
||||
{ assertion = config.nix.useSandbox -> config.security.allowUserNamespaces;
|
||||
message = "`nix.useSandbox = true` conflicts with `!security.allowUserNamespaces`.";
|
||||
{ assertion = config.nix.settings.sandbox -> config.security.allowUserNamespaces;
|
||||
message = "`nix.settings.sandbox = true` conflicts with `!security.allowUserNamespaces`.";
|
||||
}
|
||||
];
|
||||
})
|
||||
|
|
|
@ -147,7 +147,7 @@ in
|
|||
concurrent = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
example = literalExpression "config.nix.maxJobs";
|
||||
example = literalExpression "config.nix.settings.max-jobs";
|
||||
description = ''
|
||||
Limits how many jobs globally can be run concurrently.
|
||||
The most upper limit of jobs using all defined runners.
|
||||
|
|
|
@ -67,7 +67,7 @@ in
|
|||
|
||||
# Trusted user allows simplified configuration and better performance
|
||||
# when operating in a cluster.
|
||||
nix.trustedUsers = [ config.systemd.services.hercules-ci-agent.serviceConfig.User ];
|
||||
nix.settings.trusted-users = [ config.systemd.services.hercules-ci-agent.serviceConfig.User ];
|
||||
services.hercules-ci-agent = {
|
||||
settings = {
|
||||
nixUserIsTrusted = true;
|
||||
|
|
|
@ -258,8 +258,6 @@ in
|
|||
uid = config.ids.uids.hydra-www;
|
||||
};
|
||||
|
||||
nix.trustedUsers = [ "hydra-queue-runner" ];
|
||||
|
||||
services.hydra.extraConfig =
|
||||
''
|
||||
using_frontend_proxy = 1
|
||||
|
@ -277,16 +275,21 @@ in
|
|||
|
||||
environment.variables = hydraEnv;
|
||||
|
||||
nix.extraOptions = ''
|
||||
keep-outputs = true
|
||||
keep-derivations = true
|
||||
nix.settings = mkMerge [
|
||||
{
|
||||
keep-outputs = true;
|
||||
keep-derivations = true;
|
||||
trusted-users = [ "hydra-queue-runner" ];
|
||||
}
|
||||
|
||||
|
||||
'' + optionalString (versionOlder (getVersion config.nix.package.out) "2.4pre") ''
|
||||
(mkIf (versionOlder (getVersion config.nix.package.out) "2.4pre")
|
||||
{
|
||||
# The default (`true') slows Nix down a lot since the build farm
|
||||
# has so many GC roots.
|
||||
gc-check-reachability = false
|
||||
'';
|
||||
gc-check-reachability = false;
|
||||
}
|
||||
)
|
||||
];
|
||||
|
||||
systemd.services.hydra-init =
|
||||
{ wantedBy = [ "multi-user.target" ];
|
||||
|
|
|
@ -79,10 +79,7 @@ in {
|
|||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.mx-puppet-discord = {
|
||||
description = ''
|
||||
mx-puppet-discord is a discord puppeting bridge for matrix.
|
||||
It handles bridging private and group DMs, as well as Guilds (servers).
|
||||
'';
|
||||
description = "Matrix to Discord puppeting bridge";
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
|
||||
|
|
|
@ -6,20 +6,20 @@ let
|
|||
|
||||
cfg = config.nix;
|
||||
|
||||
nix = cfg.package.out;
|
||||
nixPackage = cfg.package.out;
|
||||
|
||||
nixVersion = getVersion nix;
|
||||
|
||||
isNix23 = versionAtLeast nixVersion "2.3pre";
|
||||
isNixAtLeast = versionAtLeast (getVersion nixPackage);
|
||||
|
||||
makeNixBuildUser = nr: {
|
||||
name = "nixbld${toString nr}";
|
||||
value = {
|
||||
description = "Nix build user ${toString nr}";
|
||||
|
||||
/* For consistency with the setgid(2), setuid(2), and setgroups(2)
|
||||
/*
|
||||
For consistency with the setgid(2), setuid(2), and setgroups(2)
|
||||
calls in `libstore/build.cc', don't add any supplementary group
|
||||
here except "nixbld". */
|
||||
here except "nixbld".
|
||||
*/
|
||||
uid = builtins.add config.ids.uids.nixbld nr;
|
||||
isSystemUser = true;
|
||||
group = "nixbld";
|
||||
|
@ -30,43 +30,82 @@ let
|
|||
nixbldUsers = listToAttrs (map makeNixBuildUser (range 1 cfg.nrBuildUsers));
|
||||
|
||||
nixConf =
|
||||
assert versionAtLeast nixVersion "2.2";
|
||||
pkgs.runCommand "nix.conf" { preferLocalBuild = true; extraOptions = cfg.extraOptions; } (
|
||||
''
|
||||
cat > $out <<END
|
||||
assert isNixAtLeast "2.2";
|
||||
let
|
||||
|
||||
mkValueString = v:
|
||||
if v == null then ""
|
||||
else if isInt v then toString v
|
||||
else if isBool v then boolToString v
|
||||
else if isFloat v then floatToString v
|
||||
else if isList v then toString v
|
||||
else if isDerivation v then toString v
|
||||
else if builtins.isPath v then toString v
|
||||
else if isString v then v
|
||||
else if isCoercibleToString v then toString v
|
||||
else abort "The nix conf value: ${toPretty {} v} can not be encoded";
|
||||
|
||||
mkKeyValue = k: v: "${escape [ "=" ] k} = ${mkValueString v}";
|
||||
|
||||
mkKeyValuePairs = attrs: concatStringsSep "\n" (mapAttrsToList mkKeyValue attrs);
|
||||
|
||||
in
|
||||
pkgs.writeTextFile {
|
||||
name = "nix.conf";
|
||||
text = ''
|
||||
# WARNING: this file is generated from the nix.* options in
|
||||
# your NixOS configuration, typically
|
||||
# /etc/nixos/configuration.nix. Do not edit it!
|
||||
build-users-group = nixbld
|
||||
max-jobs = ${toString (cfg.maxJobs)}
|
||||
cores = ${toString (cfg.buildCores)}
|
||||
sandbox = ${if (builtins.isBool cfg.useSandbox) then boolToString cfg.useSandbox else cfg.useSandbox}
|
||||
extra-sandbox-paths = ${toString cfg.sandboxPaths}
|
||||
substituters = ${toString cfg.binaryCaches}
|
||||
trusted-substituters = ${toString cfg.trustedBinaryCaches}
|
||||
trusted-public-keys = ${toString cfg.binaryCachePublicKeys}
|
||||
auto-optimise-store = ${boolToString cfg.autoOptimiseStore}
|
||||
require-sigs = ${boolToString cfg.requireSignedBinaryCaches}
|
||||
trusted-users = ${toString cfg.trustedUsers}
|
||||
allowed-users = ${toString cfg.allowedUsers}
|
||||
${optionalString (!cfg.distributedBuilds) ''
|
||||
builders =
|
||||
''}
|
||||
system-features = ${toString cfg.systemFeatures}
|
||||
${optionalString isNix23 ''
|
||||
sandbox-fallback = false
|
||||
''}
|
||||
$extraOptions
|
||||
END
|
||||
'' + optionalString cfg.checkConfig (
|
||||
${mkKeyValuePairs cfg.settings}
|
||||
${cfg.extraOptions}
|
||||
'';
|
||||
checkPhase =
|
||||
if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
|
||||
echo "Ignore nix.checkConfig when cross-compiling"
|
||||
'' else ''
|
||||
echo "Checking that Nix can read nix.conf..."
|
||||
echo "Ignoring validation for cross-compilation"
|
||||
''
|
||||
else ''
|
||||
echo "Validating generated nix.conf"
|
||||
ln -s $out ./nix.conf
|
||||
NIX_CONF_DIR=$PWD ${cfg.package}/bin/nix show-config ${optionalString isNix23 "--no-net --option experimental-features nix-command"} >/dev/null
|
||||
'')
|
||||
);
|
||||
set -e
|
||||
set +o pipefail
|
||||
NIX_CONF_DIR=$PWD \
|
||||
${cfg.package}/bin/nix show-config ${optionalString (isNixAtLeast "2.3pre") "--no-net --option experimental-features nix-command"} \
|
||||
|& sed -e 's/^warning:/error:/' \
|
||||
| (! grep '${if cfg.checkConfig then "^error:" else "^error: unknown setting"}')
|
||||
set -o pipefail
|
||||
'';
|
||||
};
|
||||
|
||||
legacyConfMappings = {
|
||||
useSandbox = "sandbox";
|
||||
buildCores = "cores";
|
||||
maxJobs = "max-jobs";
|
||||
sandboxPaths = "extra-sandbox-paths";
|
||||
binaryCaches = "substituters";
|
||||
trustedBinaryCaches = "trusted-substituters";
|
||||
binaryCachePublicKeys = "trusted-public-keys";
|
||||
autoOptimiseStore = "auto-optimise-store";
|
||||
requireSignedBinaryCaches = "require-sigs";
|
||||
trustedUsers = "trusted-users";
|
||||
allowedUsers = "allowed-users";
|
||||
systemFeatures = "system-feature";
|
||||
};
|
||||
|
||||
semanticConfType = with types;
|
||||
let
|
||||
confAtom = nullOr
|
||||
(oneOf [
|
||||
bool
|
||||
int
|
||||
float
|
||||
str
|
||||
path
|
||||
package
|
||||
]) // {
|
||||
description = "Nix config atom (null, bool, int, float, str, path or package)";
|
||||
};
|
||||
in
|
||||
attrsOf (either confAtom (listOf confAtom));
|
||||
|
||||
in
|
||||
|
||||
|
@ -76,7 +115,7 @@ in
|
|||
(mkRenamedOptionModule [ "nix" "chrootDirs" ] [ "nix" "sandboxPaths" ])
|
||||
(mkRenamedOptionModule [ "nix" "daemonIONiceLevel" ] [ "nix" "daemonIOSchedPriority" ])
|
||||
(mkRemovedOptionModule [ "nix" "daemonNiceLevel" ] "Consider nix.daemonCPUSchedPolicy instead.")
|
||||
];
|
||||
] ++ mapAttrsToList (oldConf: newConf: mkRenamedOptionModule [ "nix" oldConf ] [ "nix" "settings" newConf ]) legacyConfMappings;
|
||||
|
||||
###### interface
|
||||
|
||||
|
@ -102,81 +141,6 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
maxJobs = mkOption {
|
||||
type = types.either types.int (types.enum ["auto"]);
|
||||
default = "auto";
|
||||
example = 64;
|
||||
description = ''
|
||||
This option defines the maximum number of jobs that Nix will try to
|
||||
build in parallel. The default is auto, which means it will use all
|
||||
available logical cores. It is recommend to set it to the total
|
||||
number of logical cores in your system (e.g., 16 for two CPUs with 4
|
||||
cores each and hyper-threading).
|
||||
'';
|
||||
};
|
||||
|
||||
autoOptimiseStore = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
If set to true, Nix automatically detects files in the store that have
|
||||
identical contents, and replaces them with hard links to a single copy.
|
||||
This saves disk space. If set to false (the default), you can still run
|
||||
nix-store --optimise to get rid of duplicate files.
|
||||
'';
|
||||
};
|
||||
|
||||
buildCores = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
example = 64;
|
||||
description = ''
|
||||
This option defines the maximum number of concurrent tasks during
|
||||
one build. It affects, e.g., -j option for make.
|
||||
The special value 0 means that the builder should use all
|
||||
available CPU cores in the system. Some builds may become
|
||||
non-deterministic with this option; use with care! Packages will
|
||||
only be affected if enableParallelBuilding is set for them.
|
||||
'';
|
||||
};
|
||||
|
||||
useSandbox = mkOption {
|
||||
type = types.either types.bool (types.enum ["relaxed"]);
|
||||
default = true;
|
||||
description = "
|
||||
If set, Nix will perform builds in a sandboxed environment that it
|
||||
will set up automatically for each build. This prevents impurities
|
||||
in builds by disallowing access to dependencies outside of the Nix
|
||||
store by using network and mount namespaces in a chroot environment.
|
||||
This is enabled by default even though it has a possible performance
|
||||
impact due to the initial setup time of a sandbox for each build. It
|
||||
doesn't affect derivation hashes, so changing this option will not
|
||||
trigger a rebuild of packages.
|
||||
";
|
||||
};
|
||||
|
||||
sandboxPaths = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "/dev" "/proc" ];
|
||||
description =
|
||||
''
|
||||
Directories from the host filesystem to be included
|
||||
in the sandbox.
|
||||
'';
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = ''
|
||||
keep-outputs = true
|
||||
keep-derivations = true
|
||||
'';
|
||||
description = "Additional text appended to <filename>nix.conf</filename>.";
|
||||
};
|
||||
|
||||
distributedBuilds = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
|
@ -254,7 +218,7 @@ in
|
|||
};
|
||||
|
||||
buildMachines = mkOption {
|
||||
type = types.listOf (types.submodule ({
|
||||
type = types.listOf (types.submodule {
|
||||
options = {
|
||||
hostName = mkOption {
|
||||
type = types.str;
|
||||
|
@ -293,7 +257,7 @@ in
|
|||
The username to log in as on the remote host. This user must be
|
||||
able to log in and run nix commands non-interactively. It must
|
||||
also be privileged to build derivations, so must be included in
|
||||
<option>nix.trustedUsers</option>.
|
||||
<option>nix.settings.trusted-users</option>.
|
||||
'';
|
||||
};
|
||||
sshKey = mkOption {
|
||||
|
@ -350,8 +314,17 @@ in
|
|||
list.
|
||||
'';
|
||||
};
|
||||
publicHostKey = mkOption {
|
||||
type = types.nullOr types.str;
|
||||
default = null;
|
||||
description = ''
|
||||
The (base64-encoded) public host key of this builder. The field
|
||||
is calculated via <command>base64 -w0 /etc/ssh/ssh_host_type_key.pub</command>.
|
||||
If null, SSH will use its regular known-hosts file when connecting.
|
||||
'';
|
||||
};
|
||||
}));
|
||||
};
|
||||
});
|
||||
default = [ ];
|
||||
description = ''
|
||||
This option lists the machines to be used if distributed builds are
|
||||
|
@ -391,88 +364,9 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
binaryCaches = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
List of binary cache URLs used to obtain pre-built binaries
|
||||
of Nix packages.
|
||||
|
||||
By default https://cache.nixos.org/ is added,
|
||||
to override it use <literal>lib.mkForce []</literal>.
|
||||
'';
|
||||
};
|
||||
|
||||
trustedBinaryCaches = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
example = [ "https://hydra.nixos.org/" ];
|
||||
description = ''
|
||||
List of binary cache URLs that non-root users can use (in
|
||||
addition to those specified using
|
||||
<option>nix.binaryCaches</option>) by passing
|
||||
<literal>--option binary-caches</literal> to Nix commands.
|
||||
'';
|
||||
};
|
||||
|
||||
requireSignedBinaryCaches = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
If enabled (the default), Nix will only download binaries from binary caches if
|
||||
they are cryptographically signed with any of the keys listed in
|
||||
<option>nix.binaryCachePublicKeys</option>. If disabled, signatures are neither
|
||||
required nor checked, so it's strongly recommended that you use only
|
||||
trustworthy caches and https to prevent man-in-the-middle attacks.
|
||||
'';
|
||||
};
|
||||
|
||||
binaryCachePublicKeys = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = [ "hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=" ];
|
||||
description = ''
|
||||
List of public keys used to sign binary caches. If
|
||||
<option>nix.requireSignedBinaryCaches</option> is enabled,
|
||||
then Nix will use a binary from a binary cache if and only
|
||||
if it is signed by <emphasis>any</emphasis> of the keys
|
||||
listed here. By default, only the key for
|
||||
<uri>cache.nixos.org</uri> is included.
|
||||
'';
|
||||
};
|
||||
|
||||
trustedUsers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "root" ];
|
||||
example = [ "root" "alice" "@wheel" ];
|
||||
description = ''
|
||||
A list of names of users that have additional rights when
|
||||
connecting to the Nix daemon, such as the ability to specify
|
||||
additional binary caches, or to import unsigned NARs. You
|
||||
can also specify groups by prefixing them with
|
||||
<literal>@</literal>; for instance,
|
||||
<literal>@wheel</literal> means all users in the wheel
|
||||
group.
|
||||
'';
|
||||
};
|
||||
|
||||
allowedUsers = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "*" ];
|
||||
example = [ "@wheel" "@builders" "alice" "bob" ];
|
||||
description = ''
|
||||
A list of names of users (separated by whitespace) that are
|
||||
allowed to connect to the Nix daemon. As with
|
||||
<option>nix.trustedUsers</option>, you can specify groups by
|
||||
prefixing them with <literal>@</literal>. Also, you can
|
||||
allow all users by specifying <literal>*</literal>. The
|
||||
default is <literal>*</literal>. Note that trusted users are
|
||||
always allowed to connect.
|
||||
'';
|
||||
};
|
||||
|
||||
nixPath = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default =
|
||||
[
|
||||
default = [
|
||||
"nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos"
|
||||
"nixos-config=/etc/nixos/configuration.nix"
|
||||
"/nix/var/nix/profiles/per-user/root/channels"
|
||||
|
@ -484,45 +378,44 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
systemFeatures = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = [ "kvm" "big-parallel" "gccarch-skylake" ];
|
||||
description = ''
|
||||
The supported features of a machine
|
||||
'';
|
||||
};
|
||||
|
||||
checkConfig = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
If enabled (the default), checks that Nix can parse the generated nix.conf.
|
||||
If enabled (the default), checks for data type mismatches and that Nix
|
||||
can parse the generated nix.conf.
|
||||
'';
|
||||
};
|
||||
|
||||
registry = mkOption {
|
||||
type = types.attrsOf (types.submodule (
|
||||
let
|
||||
inputAttrs = types.attrsOf (types.oneOf [types.str types.int types.bool types.package]);
|
||||
referenceAttrs = with types; attrsOf (oneOf [
|
||||
str
|
||||
int
|
||||
bool
|
||||
package
|
||||
]);
|
||||
in
|
||||
{ config, name, ... }:
|
||||
{ options = {
|
||||
{
|
||||
options = {
|
||||
from = mkOption {
|
||||
type = inputAttrs;
|
||||
type = referenceAttrs;
|
||||
example = { type = "indirect"; id = "nixpkgs"; };
|
||||
description = "The flake reference to be rewritten.";
|
||||
};
|
||||
to = mkOption {
|
||||
type = inputAttrs;
|
||||
type = referenceAttrs;
|
||||
example = { type = "github"; owner = "my-org"; repo = "my-nixpkgs"; };
|
||||
description = "The flake reference to which <option>from></option> is to be rewritten.";
|
||||
description = "The flake reference <option>from></option> is rewritten to.";
|
||||
};
|
||||
flake = mkOption {
|
||||
type = types.nullOr types.attrs;
|
||||
default = null;
|
||||
example = literalExpression "nixpkgs";
|
||||
description = ''
|
||||
The flake input to which <option>from></option> is to be rewritten.
|
||||
The flake input <option>from></option> is rewritten to.
|
||||
'';
|
||||
};
|
||||
exact = mkOption {
|
||||
|
@ -537,11 +430,12 @@ in
|
|||
};
|
||||
config = {
|
||||
from = mkDefault { type = "indirect"; id = name; };
|
||||
to = mkIf (config.flake != null)
|
||||
({ type = "path";
|
||||
to = mkIf (config.flake != null) (mkDefault
|
||||
{
|
||||
type = "path";
|
||||
path = config.flake.outPath;
|
||||
} // lib.filterAttrs
|
||||
(n: v: n == "lastModified" || n == "rev" || n == "revCount" || n == "narHash")
|
||||
} // filterAttrs
|
||||
(n: _: n == "lastModified" || n == "rev" || n == "revCount" || n == "narHash")
|
||||
config.flake);
|
||||
};
|
||||
}
|
||||
|
@ -552,20 +446,216 @@ in
|
|||
'';
|
||||
};
|
||||
|
||||
extraOptions = mkOption {
|
||||
type = types.lines;
|
||||
default = "";
|
||||
example = ''
|
||||
keep-outputs = true
|
||||
keep-derivations = true
|
||||
'';
|
||||
description = "Additional text appended to <filename>nix.conf</filename>.";
|
||||
};
|
||||
|
||||
settings = mkOption {
|
||||
type = types.submodule {
|
||||
freeformType = semanticConfType;
|
||||
|
||||
options = {
|
||||
max-jobs = mkOption {
|
||||
type = types.either types.int (types.enum [ "auto" ]);
|
||||
default = "auto";
|
||||
example = 64;
|
||||
description = ''
|
||||
This option defines the maximum number of jobs that Nix will try to
|
||||
build in parallel. The default is auto, which means it will use all
|
||||
available logical cores. It is recommend to set it to the total
|
||||
number of logical cores in your system (e.g., 16 for two CPUs with 4
|
||||
cores each and hyper-threading).
|
||||
'';
|
||||
};
|
||||
|
||||
auto-optimise-store = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
example = true;
|
||||
description = ''
|
||||
If set to true, Nix automatically detects files in the store that have
|
||||
identical contents, and replaces them with hard links to a single copy.
|
||||
This saves disk space. If set to false (the default), you can still run
|
||||
nix-store --optimise to get rid of duplicate files.
|
||||
'';
|
||||
};
|
||||
|
||||
cores = mkOption {
|
||||
type = types.int;
|
||||
default = 0;
|
||||
example = 64;
|
||||
description = ''
|
||||
This option defines the maximum number of concurrent tasks during
|
||||
one build. It affects, e.g., -j option for make.
|
||||
The special value 0 means that the builder should use all
|
||||
available CPU cores in the system. Some builds may become
|
||||
non-deterministic with this option; use with care! Packages will
|
||||
only be affected if enableParallelBuilding is set for them.
|
||||
'';
|
||||
};
|
||||
|
||||
sandbox = mkOption {
|
||||
type = types.either types.bool (types.enum [ "relaxed" ]);
|
||||
default = true;
|
||||
description = ''
|
||||
If set, Nix will perform builds in a sandboxed environment that it
|
||||
will set up automatically for each build. This prevents impurities
|
||||
in builds by disallowing access to dependencies outside of the Nix
|
||||
store by using network and mount namespaces in a chroot environment.
|
||||
This is enabled by default even though it has a possible performance
|
||||
impact due to the initial setup time of a sandbox for each build. It
|
||||
doesn't affect derivation hashes, so changing this option will not
|
||||
trigger a rebuild of packages.
|
||||
'';
|
||||
};
|
||||
|
||||
extra-sandbox-paths = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
example = [ "/dev" "/proc" ];
|
||||
description = ''
|
||||
Directories from the host filesystem to be included
|
||||
in the sandbox.
|
||||
'';
|
||||
};
|
||||
|
||||
substituters = mkOption {
|
||||
type = types.listOf types.str;
|
||||
description = ''
|
||||
List of binary cache URLs used to obtain pre-built binaries
|
||||
of Nix packages.
|
||||
|
||||
By default https://cache.nixos.org/ is added.
|
||||
'';
|
||||
};
|
||||
|
||||
trusted-substituters = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
example = [ "https://hydra.nixos.org/" ];
|
||||
description = ''
|
||||
List of binary cache URLs that non-root users can use (in
|
||||
addition to those specified using
|
||||
<option>nix.settings.substituters</option>) by passing
|
||||
<literal>--option binary-caches</literal> to Nix commands.
|
||||
'';
|
||||
};
|
||||
|
||||
require-sigs = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = ''
|
||||
If enabled (the default), Nix will only download binaries from binary caches if
|
||||
they are cryptographically signed with any of the keys listed in
|
||||
<option>nix.settings.trusted-public-keys</option>. If disabled, signatures are neither
|
||||
required nor checked, so it's strongly recommended that you use only
|
||||
trustworthy caches and https to prevent man-in-the-middle attacks.
|
||||
'';
|
||||
};
|
||||
|
||||
trusted-public-keys = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = [ "hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=" ];
|
||||
description = ''
|
||||
List of public keys used to sign binary caches. If
|
||||
<option>nix.settings.trusted-public-keys</option> is enabled,
|
||||
then Nix will use a binary from a binary cache if and only
|
||||
if it is signed by <emphasis>any</emphasis> of the keys
|
||||
listed here. By default, only the key for
|
||||
<uri>cache.nixos.org</uri> is included.
|
||||
'';
|
||||
};
|
||||
|
||||
trusted-users = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "root" ];
|
||||
example = [ "root" "alice" "@wheel" ];
|
||||
description = ''
|
||||
A list of names of users that have additional rights when
|
||||
connecting to the Nix daemon, such as the ability to specify
|
||||
additional binary caches, or to import unsigned NARs. You
|
||||
can also specify groups by prefixing them with
|
||||
<literal>@</literal>; for instance,
|
||||
<literal>@wheel</literal> means all users in the wheel
|
||||
group.
|
||||
'';
|
||||
};
|
||||
|
||||
system-features = mkOption {
|
||||
type = types.listOf types.str;
|
||||
example = [ "kvm" "big-parallel" "gccarch-skylake" ];
|
||||
description = ''
|
||||
The set of features supported by the machine. Derivations
|
||||
can express dependencies on system features through the
|
||||
<literal>requiredSystemFeatures</literal> attribute.
|
||||
|
||||
By default, pseudo-features <literal>nixos-test</literal>, <literal>benchmark</literal>,
|
||||
and <literal>big-parallel</literal> used in Nixpkgs are set, <literal>kvm</literal>
|
||||
is also included in it is avaliable.
|
||||
'';
|
||||
};
|
||||
|
||||
allowed-users = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [ "*" ];
|
||||
example = [ "@wheel" "@builders" "alice" "bob" ];
|
||||
description = ''
|
||||
A list of names of users (separated by whitespace) that are
|
||||
allowed to connect to the Nix daemon. As with
|
||||
<option>nix.settings.trusted-users</option>, you can specify groups by
|
||||
prefixing them with <literal>@</literal>. Also, you can
|
||||
allow all users by specifying <literal>*</literal>. The
|
||||
default is <literal>*</literal>. Note that trusted users are
|
||||
always allowed to connect.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
default = { };
|
||||
example = literalExpression ''
|
||||
{
|
||||
use-sandbox = true;
|
||||
show-trace = true;
|
||||
|
||||
system-features = [ "big-parallel" "kvm" "recursive-nix" ];
|
||||
sandbox-paths = { "/bin/sh" = "''${pkgs.busybox-sandbox-shell.out}/bin/busybox"; };
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
Configuration for Nix, see
|
||||
<link xlink:href="https://nixos.org/manual/nix/stable/#sec-conf-file"/> or
|
||||
<citerefentry>
|
||||
<refentrytitle>nix.conf</refentrytitle>
|
||||
<manvolnum>5</manvolnum>
|
||||
</citerefentry> for avalaible options.
|
||||
The value declared here will be translated directly to the key-value pairs Nix expects.
|
||||
</para>
|
||||
<para>
|
||||
You can use <command>nix-instantiate --eval --strict '<nixpkgs/nixos>' -A config.nix.settings</command>
|
||||
to view the current value. By default it is empty.
|
||||
</para>
|
||||
<para>
|
||||
Nix configurations defined under <option>nix.*</option> will be translated and applied to this
|
||||
option. In addition, configuration specified in <option>nix.extraOptions</option> which will be appended
|
||||
verbatim to the resulting config file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
###### implementation
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
|
||||
nix.binaryCachePublicKeys = [ "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" ];
|
||||
nix.binaryCaches = [ "https://cache.nixos.org/" ];
|
||||
|
||||
environment.systemPackages =
|
||||
[ nix
|
||||
[
|
||||
nixPackage
|
||||
pkgs.nix-info
|
||||
]
|
||||
++ optional (config.programs.bash.enableCompletion) pkgs.nix-bash-completions;
|
||||
|
@ -579,44 +669,49 @@ in
|
|||
|
||||
# List of machines for distributed Nix builds in the format
|
||||
# expected by build-remote.pl.
|
||||
environment.etc."nix/machines" =
|
||||
{ enable = cfg.buildMachines != [];
|
||||
environment.etc."nix/machines" = mkIf (cfg.buildMachines != [ ]) {
|
||||
text =
|
||||
concatMapStrings (machine:
|
||||
"${if machine.sshUser != null then "${machine.sshUser}@" else ""}${machine.hostName} "
|
||||
+ (if machine.system != null then machine.system else concatStringsSep "," machine.systems)
|
||||
+ " ${if machine.sshKey != null then machine.sshKey else "-"} ${toString machine.maxJobs} "
|
||||
+ toString (machine.speedFactor)
|
||||
+ " "
|
||||
+ concatStringsSep "," (machine.mandatoryFeatures ++ machine.supportedFeatures)
|
||||
+ " "
|
||||
+ concatStringsSep "," machine.mandatoryFeatures
|
||||
concatMapStrings
|
||||
(machine:
|
||||
(concatStringsSep " " ([
|
||||
"${optionalString (machine.sshUser != null) "${machine.sshUser}@"}${machine.hostName}"
|
||||
(if machine.system != null then machine.system else if machine.systems != [ ] then concatStringsSep "," machine.systems else "-")
|
||||
(if machine.sshKey != null then machine.sshKey else "-")
|
||||
(toString machine.maxJobs)
|
||||
(toString machine.speedFactor)
|
||||
(concatStringsSep "," machine.supportedFeatures)
|
||||
(concatStringsSep "," machine.mandatoryFeatures)
|
||||
]
|
||||
++ optional (isNixAtLeast "2.4pre") (if machine.publicHostKey != null then machine.publicHostKey else "-")))
|
||||
+ "\n"
|
||||
) cfg.buildMachines;
|
||||
)
|
||||
cfg.buildMachines;
|
||||
};
|
||||
|
||||
assertions =
|
||||
let badMachine = m: m.system == null && m.systems == [ ];
|
||||
in [
|
||||
in
|
||||
[
|
||||
{
|
||||
assertion = !(builtins.any badMachine cfg.buildMachines);
|
||||
assertion = !(any badMachine cfg.buildMachines);
|
||||
message = ''
|
||||
At least one system type (via <varname>system</varname> or
|
||||
<varname>systems</varname>) must be set for every build machine.
|
||||
Invalid machine specifications:
|
||||
'' + " " +
|
||||
(builtins.concatStringsSep "\n "
|
||||
(builtins.map (m: m.hostName)
|
||||
(builtins.filter (badMachine) cfg.buildMachines)));
|
||||
(concatStringsSep "\n "
|
||||
(map (m: m.hostName)
|
||||
(filter (badMachine) cfg.buildMachines)));
|
||||
}
|
||||
];
|
||||
|
||||
|
||||
systemd.packages = [ nix ];
|
||||
systemd.packages = [ nixPackage ];
|
||||
|
||||
systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
|
||||
|
||||
systemd.services.nix-daemon =
|
||||
{ path = [ nix pkgs.util-linux config.programs.ssh.package ]
|
||||
{
|
||||
path = [ nixPackage pkgs.util-linux config.programs.ssh.package ]
|
||||
++ optionals cfg.distributedBuilds [ pkgs.gzip ];
|
||||
|
||||
environment = cfg.envVars
|
||||
|
@ -626,7 +721,8 @@ in
|
|||
unitConfig.RequiresMountsFor = "/nix/store";
|
||||
|
||||
serviceConfig =
|
||||
{ CPUSchedulingPolicy = cfg.daemonCPUSchedPolicy;
|
||||
{
|
||||
CPUSchedulingPolicy = cfg.daemonCPUSchedPolicy;
|
||||
IOSchedulingClass = cfg.daemonIOSchedClass;
|
||||
IOSchedulingPriority = cfg.daemonIOSchedPriority;
|
||||
LimitNOFILE = 4096;
|
||||
|
@ -636,9 +732,7 @@ in
|
|||
};
|
||||
|
||||
# Set up the environment variables for running Nix.
|
||||
environment.sessionVariables = cfg.envVars //
|
||||
{ NIX_PATH = cfg.nixPath;
|
||||
};
|
||||
environment.sessionVariables = cfg.envVars // { NIX_PATH = cfg.nixPath; };
|
||||
|
||||
environment.extraInit =
|
||||
''
|
||||
|
@ -647,7 +741,7 @@ in
|
|||
fi
|
||||
'';
|
||||
|
||||
nix.nrBuildUsers = mkDefault (lib.max 32 (if cfg.maxJobs == "auto" then 0 else cfg.maxJobs));
|
||||
nix.nrBuildUsers = mkDefault (max 32 (if cfg.settings.max-jobs == "auto" then 0 else cfg.settings.max-jobs));
|
||||
|
||||
users.users = nixbldUsers;
|
||||
|
||||
|
@ -663,14 +757,26 @@ in
|
|||
fi
|
||||
'';
|
||||
|
||||
nix.systemFeatures = mkDefault (
|
||||
# Legacy configuration conversion.
|
||||
nix.settings = mkMerge [
|
||||
{
|
||||
trusted-public-keys = [ "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" ];
|
||||
substituters = [ "https://cache.nixos.org/" ];
|
||||
|
||||
system-features = mkDefault (
|
||||
[ "nixos-test" "benchmark" "big-parallel" "kvm" ] ++
|
||||
optionals (pkgs.hostPlatform ? gcc.arch) (
|
||||
# a builder can run code for `gcc.arch` and inferior architectures
|
||||
[ "gccarch-${pkgs.hostPlatform.gcc.arch}" ] ++
|
||||
map (x: "gccarch-${x}") lib.systems.architectures.inferiors.${pkgs.hostPlatform.gcc.arch}
|
||||
map (x: "gccarch-${x}") systems.architectures.inferiors.${pkgs.hostPlatform.gcc.arch}
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
(mkIf (!cfg.distributedBuilds) { builders = null; })
|
||||
|
||||
(mkIf (isNixAtLeast "2.3pre") { sandbox-fallback = false; })
|
||||
];
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ in {
|
|||
write = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = "Whether to enable writing to the Nix store as a remote store via SSH. Note: the sshServe user is named nix-ssh and is not a trusted-user. nix-ssh should be added to the nix.trustedUsers option in most use cases, such as allowing remote building of derivations.";
|
||||
description = "Whether to enable writing to the Nix store as a remote store via SSH. Note: the sshServe user is named nix-ssh and is not a trusted-user. nix-ssh should be added to the <option>nix.settings.trusted-users</option> option in most use cases, such as allowing remote building of derivations.";
|
||||
};
|
||||
|
||||
keys = mkOption {
|
||||
|
|
|
@ -25,7 +25,8 @@ in {
|
|||
[ "/dev/sda", "/dev/nvme0n1" ];
|
||||
'';
|
||||
description = ''
|
||||
Paths to disks that will be monitored.
|
||||
Paths to the disks that will be monitored. Will autodiscover
|
||||
all disks if none given.
|
||||
'';
|
||||
};
|
||||
maxInterval = mkOption {
|
||||
|
@ -41,13 +42,23 @@ in {
|
|||
serviceOpts = {
|
||||
serviceConfig = {
|
||||
AmbientCapabilities = [
|
||||
"CAP_RAW_SYSIO"
|
||||
"CAP_SYS_ADMIN"
|
||||
];
|
||||
CapabilityBoundingSet = [
|
||||
"CAP_RAW_SYSIO"
|
||||
"CAP_SYS_ADMIN"
|
||||
];
|
||||
DevicePolicy = "closed";
|
||||
DeviceAllow = lib.mkForce cfg.devices;
|
||||
DeviceAllow = lib.mkOverride 100 (
|
||||
if cfg.devices != [] then
|
||||
cfg.devices
|
||||
else [
|
||||
"block-blkext rw"
|
||||
"block-sd rw"
|
||||
"char-nvme rw"
|
||||
]
|
||||
);
|
||||
ExecStart = ''
|
||||
${pkgs.prometheus-smartctl-exporter}/bin/smartctl_exporter -config ${configFile}
|
||||
'';
|
||||
|
|
|
@ -9,7 +9,7 @@ let
|
|||
# On Nix level we don't attempt to precisely validate the address specifications.
|
||||
# The optional IPv6 scope spec comes *after* port, perhaps surprisingly.
|
||||
mkListen = kind: addr: let
|
||||
al_v4 = builtins.match "([0-9.]+):([0-9]+)()" addr;
|
||||
al_v4 = builtins.match "([0-9.]+):([0-9]+)($)" addr;
|
||||
al_v6 = builtins.match "\\[(.+)]:([0-9]+)(%.*|$)" addr;
|
||||
al_portOnly = builtins.match "([0-9]+)" addr;
|
||||
al = findFirst (a: a != null)
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.services.racoon;
|
||||
in {
|
||||
options.services.racoon = {
|
||||
enable = mkEnableOption "racoon";
|
||||
|
||||
config = mkOption {
|
||||
description = "Contents of racoon configuration file.";
|
||||
default = "";
|
||||
type = types.str;
|
||||
};
|
||||
|
||||
configPath = mkOption {
|
||||
description = "Location of racoon config if config is not provided.";
|
||||
default = "/etc/racoon/racoon.conf";
|
||||
type = types.path;
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
systemd.services.racoon = {
|
||||
description = "Racoon Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.ipsecTools}/bin/racoon -f ${
|
||||
if (cfg.config != "") then pkgs.writeText "racoon.conf" cfg.config
|
||||
else cfg.configPath
|
||||
}";
|
||||
ExecReload = "${pkgs.ipsecTools}/bin/racoonctl reload-config";
|
||||
PIDFile = "/run/racoon.pid";
|
||||
Type = "forking";
|
||||
Restart = "always";
|
||||
};
|
||||
preStart = ''
|
||||
rm /run/racoon.pid || true
|
||||
mkdir -p /var/racoon
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -599,6 +599,8 @@ in {
|
|||
timerConfig.Unit = "nextcloud-cron.service";
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = ["d ${cfg.home} 0750 nextcloud nextcloud"];
|
||||
|
||||
systemd.services = {
|
||||
# When upgrading the Nextcloud package, Nextcloud can report errors such as
|
||||
# "The files of the app [all apps in /var/lib/nextcloud/apps] were not replaced correctly"
|
||||
|
@ -720,8 +722,6 @@ in {
|
|||
before = [ "phpfpm-nextcloud.service" ];
|
||||
path = [ occ ];
|
||||
script = ''
|
||||
chmod og+x ${cfg.home}
|
||||
|
||||
${optionalString (c.dbpassFile != null) ''
|
||||
if [ ! -r "${c.dbpassFile}" ]; then
|
||||
echo "dbpassFile ${c.dbpassFile} is not readable by nextcloud:nextcloud! Aborting..."
|
||||
|
@ -814,7 +814,6 @@ in {
|
|||
users.users.nextcloud = {
|
||||
home = "${cfg.home}";
|
||||
group = "nextcloud";
|
||||
createHome = true;
|
||||
isSystemUser = true;
|
||||
};
|
||||
users.groups.nextcloud.members = [ "nextcloud" config.services.nginx.user ];
|
||||
|
|
|
@ -394,7 +394,8 @@ in
|
|||
|
||||
# Extra UDEV rules used by Solid
|
||||
services.udev.packages = [
|
||||
pkgs.libmtp
|
||||
# libmtp has "bin", "dev", "out" outputs. UDEV rules file is in "out".
|
||||
pkgs.libmtp.out
|
||||
pkgs.media-player-info
|
||||
];
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
use strict;
|
||||
use warnings;
|
||||
use Config::IniFiles;
|
||||
use File::Path qw(make_path);
|
||||
use File::Basename;
|
||||
use File::Slurp;
|
||||
|
@ -113,26 +114,77 @@ sub parseFstab {
|
|||
return ($fss, $swaps);
|
||||
}
|
||||
|
||||
# This subroutine takes a single ini file that specified systemd configuration
|
||||
# like unit configuration and parses it into a hash where the keys are the sections
|
||||
# of the unit file and the values are hashes themselves. These hashes have the unit file
|
||||
# keys as their keys (left side of =) and an array of all values that were set as their
|
||||
# values. If a value is empty (for example `ExecStart=`), then all current definitions are
|
||||
# removed.
|
||||
#
|
||||
# Instead of returning the hash, this subroutine takes a hashref to return the data in. This
|
||||
# allows calling the subroutine multiple times with the same hash to parse override files.
|
||||
sub parseSystemdIni {
|
||||
my ($unitContents, $path) = @_;
|
||||
# Tie the ini file to a hash for easier access
|
||||
my %fileContents;
|
||||
tie %fileContents, "Config::IniFiles", (-file => $path, -allowempty => 1, -allowcontinue => 1);
|
||||
|
||||
# Copy over all sections
|
||||
foreach my $sectionName (keys %fileContents) {
|
||||
# Copy over all keys
|
||||
foreach my $iniKey (keys %{$fileContents{$sectionName}}) {
|
||||
# Ensure the value is an array so it's easier to work with
|
||||
my $iniValue = $fileContents{$sectionName}{$iniKey};
|
||||
my @iniValues;
|
||||
if (ref($iniValue) eq "ARRAY") {
|
||||
@iniValues = @{$iniValue};
|
||||
} else {
|
||||
@iniValues = $iniValue;
|
||||
}
|
||||
# Go over all values
|
||||
for my $iniValue (@iniValues) {
|
||||
# If a value is empty, it's an override that tells us to clean the value
|
||||
if ($iniValue eq "") {
|
||||
delete $unitContents->{$sectionName}->{$iniKey};
|
||||
next;
|
||||
}
|
||||
push(@{$unitContents->{$sectionName}->{$iniKey}}, $iniValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
# This subroutine takes the path to a systemd configuration file (like a unit configuration),
|
||||
# parses it, and returns a hash that contains the contents. The contents of this hash are
|
||||
# explained in the `parseSystemdIni` subroutine. Neither the sections nor the keys inside
|
||||
# the sections are consistently sorted.
|
||||
#
|
||||
# If a directory with the same basename ending in .d exists next to the unit file, it will be
|
||||
# assumed to contain override files which will be parsed as well and handled properly.
|
||||
sub parseUnit {
|
||||
my ($filename) = @_;
|
||||
my $info = {};
|
||||
parseKeyValues($info, read_file($filename)) if -f $filename;
|
||||
parseKeyValues($info, read_file("${filename}.d/overrides.conf")) if -f "${filename}.d/overrides.conf";
|
||||
return $info;
|
||||
my ($unitPath) = @_;
|
||||
|
||||
# Parse the main unit and all overrides
|
||||
my %unitData;
|
||||
parseSystemdIni(\%unitData, $_) for glob("${unitPath}{,.d/*.conf}");
|
||||
return %unitData;
|
||||
}
|
||||
|
||||
sub parseKeyValues {
|
||||
my $info = shift;
|
||||
foreach my $line (@_) {
|
||||
# FIXME: not quite correct.
|
||||
$line =~ /^([^=]+)=(.*)$/ or next;
|
||||
$info->{$1} = $2;
|
||||
}
|
||||
}
|
||||
# Checks whether a specified boolean in a systemd unit is true
|
||||
# or false, with a default that is applied when the value is not set.
|
||||
sub parseSystemdBool {
|
||||
my ($unitConfig, $sectionName, $boolName, $default) = @_;
|
||||
|
||||
sub boolIsTrue {
|
||||
my ($s) = @_;
|
||||
return $s eq "yes" || $s eq "true";
|
||||
my @values = @{$unitConfig->{$sectionName}{$boolName} // []};
|
||||
# Return default if value is not set
|
||||
if (scalar @values lt 1 || not defined $values[-1]) {
|
||||
return $default;
|
||||
}
|
||||
# If value is defined multiple times, use the last definition
|
||||
my $last = $values[-1];
|
||||
# These are valid values as of systemd.syntax(7)
|
||||
return $last eq "1" || $last eq "yes" || $last eq "true" || $last eq "on";
|
||||
}
|
||||
|
||||
sub recordUnit {
|
||||
|
@ -167,17 +219,17 @@ sub handleModifiedUnit {
|
|||
# Revert of the attempt: https://github.com/NixOS/nixpkgs/pull/147609
|
||||
# More details: https://github.com/NixOS/nixpkgs/issues/74899#issuecomment-981142430
|
||||
} else {
|
||||
my $unitInfo = parseUnit($newUnitFile);
|
||||
if (boolIsTrue($unitInfo->{'X-ReloadIfChanged'} // "no")) {
|
||||
my %unitInfo = parseUnit($newUnitFile);
|
||||
if (parseSystemdBool(\%unitInfo, "Service", "X-ReloadIfChanged", 0)) {
|
||||
$unitsToReload->{$unit} = 1;
|
||||
recordUnit($reloadListFile, $unit);
|
||||
}
|
||||
elsif (!boolIsTrue($unitInfo->{'X-RestartIfChanged'} // "yes") || boolIsTrue($unitInfo->{'RefuseManualStop'} // "no") || boolIsTrue($unitInfo->{'X-OnlyManualStart'} // "no")) {
|
||||
elsif (!parseSystemdBool(\%unitInfo, "Service", "X-RestartIfChanged", 1) || parseSystemdBool(\%unitInfo, "Unit", "RefuseManualStop", 0) || parseSystemdBool(\%unitInfo, "Unit", "X-OnlyManualStart", 0)) {
|
||||
$unitsToSkip->{$unit} = 1;
|
||||
} else {
|
||||
# It doesn't make sense to stop and start non-services because
|
||||
# they can't have ExecStop=
|
||||
if (!boolIsTrue($unitInfo->{'X-StopIfChanged'} // "yes") || $unit !~ /\.service$/) {
|
||||
if (!parseSystemdBool(\%unitInfo, "Service", "X-StopIfChanged", 1) || $unit !~ /\.service$/) {
|
||||
# This unit should be restarted instead of
|
||||
# stopped and started.
|
||||
$unitsToRestart->{$unit} = 1;
|
||||
|
@ -188,7 +240,7 @@ sub handleModifiedUnit {
|
|||
# socket(s) instead of the service.
|
||||
my $socketActivated = 0;
|
||||
if ($unit =~ /\.service$/) {
|
||||
my @sockets = split / /, ($unitInfo->{Sockets} // "");
|
||||
my @sockets = split(/ /, join(" ", @{$unitInfo{Service}{Sockets} // []}));
|
||||
if (scalar @sockets == 0) {
|
||||
@sockets = ("$baseName.socket");
|
||||
}
|
||||
|
@ -254,12 +306,12 @@ while (my ($unit, $state) = each %{$activePrev}) {
|
|||
|
||||
if (-e $prevUnitFile && ($state->{state} eq "active" || $state->{state} eq "activating")) {
|
||||
if (! -e $newUnitFile || abs_path($newUnitFile) eq "/dev/null") {
|
||||
my $unitInfo = parseUnit($prevUnitFile);
|
||||
$unitsToStop{$unit} = 1 if boolIsTrue($unitInfo->{'X-StopOnRemoval'} // "yes");
|
||||
my %unitInfo = parseUnit($prevUnitFile);
|
||||
$unitsToStop{$unit} = 1 if parseSystemdBool(\%unitInfo, "Unit", "X-StopOnRemoval", 1);
|
||||
}
|
||||
|
||||
elsif ($unit =~ /\.target$/) {
|
||||
my $unitInfo = parseUnit($newUnitFile);
|
||||
my %unitInfo = parseUnit($newUnitFile);
|
||||
|
||||
# Cause all active target units to be restarted below.
|
||||
# This should start most changed units we stop here as
|
||||
|
@ -268,7 +320,7 @@ while (my ($unit, $state) = each %{$activePrev}) {
|
|||
# active after the system has resumed, which probably
|
||||
# should not be the case. Just ignore it.
|
||||
if ($unit ne "suspend.target" && $unit ne "hibernate.target" && $unit ne "hybrid-sleep.target") {
|
||||
unless (boolIsTrue($unitInfo->{'RefuseManualStart'} // "no") || boolIsTrue($unitInfo->{'X-OnlyManualStart'} // "no")) {
|
||||
unless (parseSystemdBool(\%unitInfo, "Unit", "RefuseManualStart", 0) || parseSystemdBool(\%unitInfo, "Unit", "X-OnlyManualStart", 0)) {
|
||||
$unitsToStart{$unit} = 1;
|
||||
recordUnit($startListFile, $unit);
|
||||
# Don't spam the user with target units that always get started.
|
||||
|
@ -287,7 +339,7 @@ while (my ($unit, $state) = each %{$activePrev}) {
|
|||
# Stopping a target generally has no effect on other units
|
||||
# (unless there is a PartOf dependency), so this is just a
|
||||
# bookkeeping thing to get systemd to do the right thing.
|
||||
if (boolIsTrue($unitInfo->{'X-StopOnReconfiguration'} // "no")) {
|
||||
if (parseSystemdBool(\%unitInfo, "Unit", "X-StopOnReconfiguration", 0)) {
|
||||
$unitsToStop{$unit} = 1;
|
||||
}
|
||||
}
|
||||
|
@ -546,33 +598,36 @@ my $activeNew = getActiveUnits;
|
|||
while (my ($unit, $state) = each %{$activeNew}) {
|
||||
if ($state->{state} eq "failed") {
|
||||
push @failed, $unit;
|
||||
next;
|
||||
}
|
||||
elsif ($state->{state} eq "auto-restart") {
|
||||
# A unit in auto-restart state is a failure *if* it previously failed to start
|
||||
my $lines = `@systemd@/bin/systemctl show '$unit'`;
|
||||
my $info = {};
|
||||
parseKeyValues($info, split("\n", $lines));
|
||||
|
||||
if ($info->{ExecMainStatus} ne '0') {
|
||||
if ($state->{substate} eq "auto-restart") {
|
||||
# A unit in auto-restart substate is a failure *if* it previously failed to start
|
||||
my $main_status = `@systemd@/bin/systemctl show --value --property=ExecMainStatus '$unit'`;
|
||||
chomp($main_status);
|
||||
|
||||
if ($main_status ne "0") {
|
||||
push @failed, $unit;
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
# Ignore scopes since they are not managed by this script but rather
|
||||
# created and managed by third-party services via the systemd dbus API.
|
||||
elsif ($state->{state} ne "failed" && !defined $activePrev->{$unit} && $unit !~ /\.scope$/) {
|
||||
# This only lists units that are not failed (including ones that are in auto-restart but have not failed previously)
|
||||
if ($state->{state} ne "failed" && !defined $activePrev->{$unit} && $unit !~ /\.scope$/msx) {
|
||||
push @new, $unit;
|
||||
}
|
||||
}
|
||||
|
||||
if (scalar @new > 0) {
|
||||
print STDERR "the following new units were started: ", join(", ", sort(@new)), "\n"
|
||||
if scalar @new > 0;
|
||||
}
|
||||
|
||||
if (scalar @failed > 0) {
|
||||
print STDERR "warning: the following units failed: ", join(", ", sort(@failed)), "\n";
|
||||
foreach my $unit (@failed) {
|
||||
print STDERR "\n";
|
||||
system("COLUMNS=1000 @systemd@/bin/systemctl status --no-pager '$unit' >&2");
|
||||
}
|
||||
my @failed_sorted = sort @failed;
|
||||
print STDERR "warning: the following units failed: ", join(", ", @failed_sorted), "\n\n";
|
||||
system "@systemd@/bin/systemctl status --no-pager --full '" . join("' '", @failed_sorted) . "' >&2";
|
||||
$res = 4;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ let
|
|||
configurationName = config.boot.loader.grub.configurationName;
|
||||
|
||||
# Needed by switch-to-configuration.
|
||||
perl = pkgs.perl.withPackages (p: with p; [ FileSlurp NetDBus XMLParser XMLTwig ]);
|
||||
perl = pkgs.perl.withPackages (p: with p; [ FileSlurp NetDBus XMLParser XMLTwig ConfigIniFiles ]);
|
||||
};
|
||||
|
||||
# Handle assertions and warnings
|
||||
|
@ -156,7 +156,7 @@ in
|
|||
|
||||
specialisation = mkOption {
|
||||
default = {};
|
||||
example = lib.literalExpression "{ fewJobsManyCores.configuration = { nix.buildCores = 0; nix.maxJobs = 1; }; }";
|
||||
example = lib.literalExpression "{ fewJobsManyCores.configuration = { nix.settings = { core = 0; max-jobs = 1; }; }";
|
||||
description = ''
|
||||
Additional configurations to build. If
|
||||
<literal>inheritParentConfig</literal> is true, the system
|
||||
|
|
|
@ -300,18 +300,15 @@ in {
|
|||
interpreterSandboxPath = dirOf (dirOf interpreterReg);
|
||||
} // (magics.${system} or (throw "Cannot create binfmt registration for system ${system}"));
|
||||
}) cfg.emulatedSystems);
|
||||
# TODO: add a nix.extraPlatforms option to NixOS!
|
||||
nix.extraOptions = lib.mkIf (cfg.emulatedSystems != []) ''
|
||||
extra-platforms = ${toString (cfg.emulatedSystems ++ lib.optional pkgs.stdenv.hostPlatform.isx86_64 "i686-linux")}
|
||||
'';
|
||||
nix.sandboxPaths = lib.mkIf (cfg.emulatedSystems != []) (
|
||||
let
|
||||
nix.settings = lib.mkIf (cfg.emulatedSystems != []) {
|
||||
extra-platforms = cfg.emulatedSystems ++ lib.optional pkgs.stdenv.hostPlatform.isx86_64 "i686-linux";
|
||||
extra-sandbox-paths = let
|
||||
ruleFor = system: cfg.registrations.${system};
|
||||
hasWrappedRule = lib.any (system: (ruleFor system).wrapInterpreterInShell) cfg.emulatedSystems;
|
||||
in [ "/run/binfmt" ]
|
||||
++ lib.optional hasWrappedRule "${pkgs.bash}"
|
||||
++ (map (system: (ruleFor system).interpreterSandboxPath) cfg.emulatedSystems)
|
||||
);
|
||||
++ (map (system: (ruleFor system).interpreterSandboxPath) cfg.emulatedSystems);
|
||||
};
|
||||
|
||||
environment.etc."binfmt.d/nixos.conf".source = builtins.toFile "binfmt_nixos.conf"
|
||||
(lib.concatStringsSep "\n" (lib.mapAttrsToList makeBinfmtLine config.boot.binfmt.registrations));
|
||||
|
|
|
@ -36,17 +36,6 @@ in {
|
|||
Open vSwitch package to use.
|
||||
'';
|
||||
};
|
||||
|
||||
ipsec = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Whether to start racoon service for openvswitch.
|
||||
Supported only if openvswitch version is less than 2.6.0.
|
||||
Use <literal>virtualisation.vswitch.package = pkgs.openvswitch-lts</literal>
|
||||
for a version that supports ipsec over GRE.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable (let
|
||||
|
@ -65,7 +54,7 @@ in {
|
|||
installPhase = "mkdir -p $out";
|
||||
};
|
||||
|
||||
in (mkMerge [{
|
||||
in {
|
||||
environment.systemPackages = [ cfg.package ];
|
||||
boot.kernelModules = [ "tun" "openvswitch" ];
|
||||
|
||||
|
@ -142,48 +131,14 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
}
|
||||
(mkIf (cfg.ipsec && (versionOlder cfg.package.version "2.6.0")) {
|
||||
environment.systemPackages = [ pkgs.ipsecTools ];
|
||||
});
|
||||
|
||||
services.racoon.enable = true;
|
||||
services.racoon.configPath = "${runDir}/ipsec/etc/racoon/racoon.conf";
|
||||
|
||||
networking.firewall.extraCommands = ''
|
||||
iptables -I INPUT -t mangle -p esp -j MARK --set-mark 1/1
|
||||
iptables -I INPUT -t mangle -p udp --dport 4500 -j MARK --set-mark 1/1
|
||||
'';
|
||||
|
||||
systemd.services.ovs-monitor-ipsec = {
|
||||
description = "Open_vSwitch Ipsec Daemon";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "ovsdb.service" ];
|
||||
before = [ "vswitchd.service" "racoon.service" ];
|
||||
environment.UNIXCTLPATH = "/tmp/ovsdb.ctl.sock";
|
||||
serviceConfig = {
|
||||
ExecStart = ''
|
||||
${cfg.package}/bin/ovs-monitor-ipsec \
|
||||
--root-prefix ${runDir}/ipsec \
|
||||
--pidfile /run/openvswitch/ovs-monitor-ipsec.pid \
|
||||
--monitor --detach \
|
||||
unix:/run/openvswitch/db.sock
|
||||
'';
|
||||
PIDFile = "/run/openvswitch/ovs-monitor-ipsec.pid";
|
||||
# Use service type 'forking' to correctly determine when ovs-monitor-ipsec is ready.
|
||||
Type = "forking";
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
rm -r ${runDir}/ipsec/etc/racoon/certs || true
|
||||
mkdir -p ${runDir}/ipsec/{etc/racoon,etc/init.d/,usr/sbin/}
|
||||
ln -fs ${pkgs.ipsecTools}/bin/setkey ${runDir}/ipsec/usr/sbin/setkey
|
||||
ln -fs ${pkgs.writeScript "racoon-restart" ''
|
||||
#!${pkgs.runtimeShell}
|
||||
/run/current-system/sw/bin/systemctl $1 racoon
|
||||
''} ${runDir}/ipsec/etc/init.d/racoon
|
||||
'';
|
||||
};
|
||||
})]));
|
||||
imports = [
|
||||
(mkRemovedOptionModule [ "virtualisation" "vswitch" "ipsec" ] ''
|
||||
OpenVSwitch IPSec functionality has been removed, because it depended on racoon,
|
||||
which was removed from nixpkgs, because it was abanoded upstream.
|
||||
'')
|
||||
];
|
||||
|
||||
meta.maintainers = with maintainers; [ netixx ];
|
||||
|
||||
|
|
|
@ -10,8 +10,8 @@ import ./make-test-python.nix ({ pkgs, lib, ... }: {
|
|||
|
||||
# XXX: Sandbox setup fails while trying to hardlink files from the host's
|
||||
# store file system into the prepared chroot directory.
|
||||
nix.useSandbox = false;
|
||||
nix.binaryCaches = []; # don't try to access cache.nixos.org
|
||||
nix.settings.sandbox = false;
|
||||
nix.settings.substituters = []; # don't try to access cache.nixos.org
|
||||
|
||||
virtualisation.writableStore = true;
|
||||
# Make sure we always have all the required dependencies for creating a
|
||||
|
|
2
third_party/nixpkgs/nixos/tests/geth.nix
vendored
2
third_party/nixpkgs/nixos/tests/geth.nix
vendored
|
@ -31,7 +31,7 @@ import ./make-test-python.nix ({ pkgs, ... }: {
|
|||
machine.wait_for_open_port(18545)
|
||||
|
||||
machine.succeed(
|
||||
'geth attach --exec "eth.chainId()" http://localhost:8545 | grep \'"0x0"\' '
|
||||
'geth attach --exec eth.blockNumber http://localhost:8545 | grep \'^0$\' '
|
||||
)
|
||||
|
||||
machine.succeed(
|
||||
|
|
2
third_party/nixpkgs/nixos/tests/hardened.nix
vendored
2
third_party/nixpkgs/nixos/tests/hardened.nix
vendored
|
@ -11,7 +11,7 @@ import ./make-test-python.nix ({ pkgs, ... } : {
|
|||
users.users.sybil = { isNormalUser = true; group = "wheel"; };
|
||||
imports = [ ../modules/profiles/hardened.nix ];
|
||||
environment.memoryAllocator.provider = "graphene-hardened";
|
||||
nix.useSandbox = false;
|
||||
nix.settings.sandbox = false;
|
||||
virtualisation.emptyDiskImages = [ 4096 ];
|
||||
boot.initrd.postDeviceCommands = ''
|
||||
${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
|
||||
|
|
10
third_party/nixpkgs/nixos/tests/hibernate.nix
vendored
10
third_party/nixpkgs/nixos/tests/hibernate.nix
vendored
|
@ -45,11 +45,11 @@ in makeTest {
|
|||
../modules/profiles/base.nix
|
||||
];
|
||||
|
||||
nix.binaryCaches = mkForce [ ];
|
||||
nix.extraOptions = ''
|
||||
hashed-mirrors =
|
||||
connect-timeout = 1
|
||||
'';
|
||||
nix.settings = {
|
||||
substituters = mkForce [];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
|
||||
virtualisation.diskSize = 8 * 1024;
|
||||
virtualisation.emptyDiskImages = [
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
hostName = "localhost";
|
||||
systems = [ system ];
|
||||
}];
|
||||
binaryCaches = [];
|
||||
settings.substituters = [];
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ makeInstalledTest {
|
|||
|
||||
testConfig = {
|
||||
xdg.portal.enable = true;
|
||||
xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
|
||||
services.flatpak.enable = true;
|
||||
environment.systemPackages = with pkgs; [ gnupg ostree python3 ];
|
||||
virtualisation.memorySize = 2047;
|
||||
|
|
10
third_party/nixpkgs/nixos/tests/installer.nix
vendored
10
third_party/nixpkgs/nixos/tests/installer.nix
vendored
|
@ -334,11 +334,11 @@ let
|
|||
(pkgs.grub2_efi.override { inherit zfsSupport; })
|
||||
]);
|
||||
|
||||
nix.binaryCaches = mkForce [ ];
|
||||
nix.extraOptions = ''
|
||||
hashed-mirrors =
|
||||
connect-timeout = 1
|
||||
'';
|
||||
nix.settings = {
|
||||
substituters = mkForce [];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
};
|
||||
|
||||
};
|
||||
|
|
|
@ -111,11 +111,11 @@ import ./make-test-python.nix (
|
|||
|
||||
environment.etc."initiator-root-disk-closure".source = nodes.initiatorRootDisk.config.system.build.toplevel;
|
||||
|
||||
nix.binaryCaches = lib.mkForce [ ];
|
||||
nix.extraOptions = ''
|
||||
hashed-mirrors =
|
||||
connect-timeout = 1
|
||||
'';
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [ ];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
};
|
||||
|
||||
initiatorRootDisk = { config, pkgs, modulesPath, lib, ... }: {
|
||||
|
|
10
third_party/nixpkgs/nixos/tests/iscsi-root.nix
vendored
10
third_party/nixpkgs/nixos/tests/iscsi-root.nix
vendored
|
@ -95,11 +95,11 @@ import ./make-test-python.nix (
|
|||
|
||||
system.extraDependencies = [ nodes.initiatorRootDisk.config.system.build.toplevel ];
|
||||
|
||||
nix.binaryCaches = lib.mkForce [];
|
||||
nix.extraOptions = ''
|
||||
hashed-mirrors =
|
||||
connect-timeout = 1
|
||||
'';
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
};
|
||||
|
||||
initiatorRootDisk = { config, pkgs, modulesPath, lib, ... }: {
|
||||
|
|
|
@ -23,7 +23,7 @@ let
|
|||
deployer = { config, lib, nodes, pkgs, ... }: {
|
||||
imports = [ ../../modules/installer/cd-dvd/channel.nix ];
|
||||
environment.systemPackages = [ nixopsPkg ];
|
||||
nix.binaryCaches = lib.mkForce [ ];
|
||||
nix.settings.substituters = lib.mkForce [ ];
|
||||
users.users.person.isNormalUser = true;
|
||||
virtualisation.writableStore = true;
|
||||
virtualisation.additionalPaths = [
|
||||
|
|
|
@ -16,7 +16,7 @@ in
|
|||
(modulesPath + "/testing/test-instrumentation.nix")
|
||||
];
|
||||
virtualisation.writableStore = true;
|
||||
nix.binaryCaches = lib.mkForce [ ];
|
||||
nix.settings.substituters = lib.mkForce [ ];
|
||||
virtualisation.graphics = false;
|
||||
documentation.enable = false;
|
||||
services.qemuGuest.enable = true;
|
||||
|
|
10
third_party/nixpkgs/nixos/tests/os-prober.nix
vendored
10
third_party/nixpkgs/nixos/tests/os-prober.nix
vendored
|
@ -43,11 +43,11 @@ let
|
|||
# vda is a filesystem without partition table
|
||||
forceInstall = true;
|
||||
};
|
||||
nix.binaryCaches = lib.mkForce [ ];
|
||||
nix.extraOptions = ''
|
||||
hashed-mirrors =
|
||||
connect-timeout = 1
|
||||
'';
|
||||
nix.settings = {
|
||||
substituters = lib.mkForce [];
|
||||
hashed-mirrors = null;
|
||||
connect-timeout = 1;
|
||||
};
|
||||
# save some memory
|
||||
documentation.enable = false;
|
||||
};
|
||||
|
|
79
third_party/nixpkgs/nixos/tests/switch-test.nix
vendored
79
third_party/nixpkgs/nixos/tests/switch-test.nix
vendored
|
@ -45,6 +45,31 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
|||
systemd.services.test.restartIfChanged = false;
|
||||
};
|
||||
|
||||
simpleServiceFailing.configuration = {
|
||||
imports = [ simpleServiceModified.configuration ];
|
||||
systemd.services.test.serviceConfig.ExecStart = lib.mkForce "${pkgs.coreutils}/bin/false";
|
||||
};
|
||||
|
||||
autorestartService.configuration = {
|
||||
# A service that immediately goes into restarting (but without failing)
|
||||
systemd.services.autorestart = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
Restart = "always";
|
||||
RestartSec = "20y"; # Should be long enough
|
||||
ExecStart = "${pkgs.coreutils}/bin/true";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
autorestartServiceFailing.configuration = {
|
||||
imports = [ autorestartService.configuration ];
|
||||
systemd.services.autorestart.serviceConfig = {
|
||||
ExecStart = lib.mkForce "${pkgs.coreutils}/bin/false";
|
||||
};
|
||||
};
|
||||
|
||||
restart-and-reload-by-activation-script.configuration = {
|
||||
systemd.services = rec {
|
||||
simple-service = {
|
||||
|
@ -189,12 +214,13 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
|||
exec env -i "$@" | tee /dev/stderr
|
||||
'';
|
||||
in /* python */ ''
|
||||
def switch_to_specialisation(system, name, action="test"):
|
||||
def switch_to_specialisation(system, name, action="test", fail=False):
|
||||
if name == "":
|
||||
stc = f"{system}/bin/switch-to-configuration"
|
||||
else:
|
||||
stc = f"{system}/specialisation/{name}/bin/switch-to-configuration"
|
||||
out = machine.succeed(f"{stc} {action} 2>&1")
|
||||
out = machine.fail(f"{stc} {action} 2>&1") if fail \
|
||||
else machine.succeed(f"{stc} {action} 2>&1")
|
||||
assert_lacks(out, "switch-to-configuration line") # Perl warnings
|
||||
return out
|
||||
|
||||
|
@ -305,7 +331,56 @@ import ./make-test-python.nix ({ pkgs, ...} : {
|
|||
assert_lacks(out, "as well:")
|
||||
assert_contains(out, "would start the following units: test.service\n")
|
||||
|
||||
with subtest("failing units"):
|
||||
# Let the simple service fail
|
||||
switch_to_specialisation("${machine}", "simpleServiceModified")
|
||||
out = switch_to_specialisation("${machine}", "simpleServiceFailing", fail=True)
|
||||
assert_contains(out, "stopping the following units: test.service\n")
|
||||
assert_lacks(out, "NOT restarting the following changed units:")
|
||||
assert_lacks(out, "reloading the following units:")
|
||||
assert_lacks(out, "\nrestarting the following units:")
|
||||
assert_contains(out, "\nstarting the following units: test.service\n")
|
||||
assert_lacks(out, "the following new units were started:")
|
||||
assert_contains(out, "warning: the following units failed: test.service\n")
|
||||
assert_contains(out, "Main PID:") # output of systemctl
|
||||
assert_lacks(out, "as well:")
|
||||
|
||||
# A unit that gets into autorestart without failing is not treated as failed
|
||||
out = switch_to_specialisation("${machine}", "autorestartService")
|
||||
assert_lacks(out, "stopping the following units:")
|
||||
assert_lacks(out, "NOT restarting the following changed units:")
|
||||
assert_lacks(out, "reloading the following units:")
|
||||
assert_lacks(out, "\nrestarting the following units:")
|
||||
assert_lacks(out, "\nstarting the following units:")
|
||||
assert_contains(out, "the following new units were started: autorestart.service\n")
|
||||
assert_lacks(out, "as well:")
|
||||
machine.systemctl('stop autorestart.service') # cancel the 20y timer
|
||||
|
||||
# Switching to the same system should do nothing (especially not treat the unit as failed)
|
||||
out = switch_to_specialisation("${machine}", "autorestartService")
|
||||
assert_lacks(out, "stopping the following units:")
|
||||
assert_lacks(out, "NOT restarting the following changed units:")
|
||||
assert_lacks(out, "reloading the following units:")
|
||||
assert_lacks(out, "\nrestarting the following units:")
|
||||
assert_lacks(out, "\nstarting the following units:")
|
||||
assert_contains(out, "the following new units were started: autorestart.service\n")
|
||||
assert_lacks(out, "as well:")
|
||||
machine.systemctl('stop autorestart.service') # cancel the 20y timer
|
||||
|
||||
# If systemd thinks the unit has failed and is in autorestart, we should show it as failed
|
||||
out = switch_to_specialisation("${machine}", "autorestartServiceFailing", fail=True)
|
||||
assert_lacks(out, "stopping the following units:")
|
||||
assert_lacks(out, "NOT restarting the following changed units:")
|
||||
assert_lacks(out, "reloading the following units:")
|
||||
assert_lacks(out, "\nrestarting the following units:")
|
||||
assert_lacks(out, "\nstarting the following units:")
|
||||
assert_lacks(out, "the following new units were started:")
|
||||
assert_contains(out, "warning: the following units failed: autorestart.service\n")
|
||||
assert_contains(out, "Main PID:") # output of systemctl
|
||||
assert_lacks(out, "as well:")
|
||||
|
||||
with subtest("restart and reload by activation script"):
|
||||
switch_to_specialisation("${machine}", "simpleServiceNorestart")
|
||||
out = switch_to_specialisation("${machine}", "restart-and-reload-by-activation-script")
|
||||
assert_contains(out, "stopping the following units: test.service\n")
|
||||
assert_lacks(out, "NOT restarting the following changed units:")
|
||||
|
|
|
@ -36,12 +36,12 @@ in {
|
|||
client1.wait_for_x()
|
||||
client2.wait_for_x()
|
||||
|
||||
client1.execute("teeworlds 'player_name Alice;connect server'&")
|
||||
client1.execute("teeworlds 'player_name Alice;connect server' >&2 &")
|
||||
server.wait_until_succeeds(
|
||||
'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Alice"'
|
||||
)
|
||||
|
||||
client2.execute("teeworlds 'player_name Bob;connect server'&")
|
||||
client2.execute("teeworlds 'player_name Bob;connect server' >&2 &")
|
||||
server.wait_until_succeeds(
|
||||
'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Bob"'
|
||||
)
|
||||
|
|
9
third_party/nixpkgs/nixos/tests/vscodium.nix
vendored
9
third_party/nixpkgs/nixos/tests/vscodium.nix
vendored
|
@ -3,11 +3,10 @@ let
|
|||
wayland = { pkgs, ... }: {
|
||||
imports = [ ./common/wayland-cage.nix ];
|
||||
|
||||
services.cage.program = ''
|
||||
${pkgs.vscodium}/bin/codium \
|
||||
--enable-features=UseOzonePlatform \
|
||||
--ozone-platform=wayland
|
||||
'';
|
||||
services.cage.program = "${pkgs.vscodium}/bin/codium";
|
||||
|
||||
environment.variables.NIXOS_OZONE_WL = "1";
|
||||
environment.variables.DISPLAY = "do not use";
|
||||
|
||||
fonts.fonts = with pkgs; [ dejavu_fonts ];
|
||||
};
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
}:
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cyanrip";
|
||||
version = "0.7.0";
|
||||
version = "0.8.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "cyanreg";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
sha256 = "0lgb92sfpf4w3nj5vlj6j7931mj2q3cmcx1app9snf853jk9ahmw";
|
||||
sha256 = "1aip52bwkq8cb1d8ifyv2m6m5dz7jk6qmbhyb97yyf4nhxv445ky";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ meson ninja pkg-config ];
|
||||
|
@ -27,7 +27,7 @@ stdenv.mkDerivation rec {
|
|||
meta = with lib; {
|
||||
homepage = "https://github.com/cyanreg/cyanrip";
|
||||
description = "Bule-ish CD ripper";
|
||||
license = licenses.lgpl3Plus;
|
||||
license = licenses.lgpl21Plus;
|
||||
platforms = platforms.all;
|
||||
maintainers = [ maintainers.zane ];
|
||||
};
|
||||
|
|
|
@ -21,19 +21,19 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "spot";
|
||||
version = "0.3.0";
|
||||
version = "0.3.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "xou816";
|
||||
repo = "spot";
|
||||
rev = version;
|
||||
hash = "sha256-An9PJsuXZkvJhP67cisWxFd2dpky53EY/xcR6StgWFY=";
|
||||
hash = "sha256-uZzylK9imEazwC/ogsDO8ZBvByE5/SNSV+mIlp7Z9Ww=";
|
||||
};
|
||||
|
||||
cargoDeps = rustPlatform.fetchCargoTarball {
|
||||
inherit src;
|
||||
name = "${pname}-${version}";
|
||||
hash = "sha256-2qMmPIBoZS6WT06VzCmnYWaIfLzWN2HUvk7y9GKuuXg=";
|
||||
hash = "sha256-v5xdlsI6OlEpCYOTFePTyI8BkIrAwT6FR2JwiRTGgOA=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
|
|
@ -10,14 +10,14 @@ let
|
|||
# If an update breaks things, one of those might have valuable info:
|
||||
# https://aur.archlinux.org/packages/spotify/
|
||||
# https://community.spotify.com/t5/Desktop-Linux
|
||||
version = "1.1.72.439.gc253025e";
|
||||
version = "1.1.77.643.g3c4c6fc6";
|
||||
# To get the latest stable revision:
|
||||
# curl -H 'X-Ubuntu-Series: 16' 'https://api.snapcraft.io/api/v1/snaps/details/spotify?channel=stable' | jq '.download_url,.version,.last_updated'
|
||||
# To get general information:
|
||||
# curl -H 'Snap-Device-Series: 16' 'https://api.snapcraft.io/v2/snaps/info/spotify' | jq '.'
|
||||
# More examples of api usage:
|
||||
# https://github.com/canonical-websites/snapcraft.io/blob/master/webapp/publisher/snaps/views.py
|
||||
rev = "56";
|
||||
rev = "57";
|
||||
|
||||
deps = [
|
||||
alsa-lib
|
||||
|
@ -80,7 +80,7 @@ stdenv.mkDerivation {
|
|||
# https://community.spotify.com/t5/Desktop-Linux/Redistribute-Spotify-on-Linux-Distributions/td-p/1695334
|
||||
src = fetchurl {
|
||||
url = "https://api.snapcraft.io/api/v1/snaps/download/pOBIoZ2LrCB3rDohMxoYGnbN14EHOgD7_${rev}.snap";
|
||||
sha512 = "b2bd3d49a18dfebaa4660f9c39d11d57fb80a4ef15ec7b7973e3cc07be74f74aebd2d8c66360d79fe778244c533ed02f9dfca4085f99aae0e5faae7c003ba4ef";
|
||||
sha512 = "d9f8fe692db479bcce1f47c87b65c5ac6d62e16b76a0f9b2d693d82d2b9ed2c7cf370cb091ce8ecd291c47d1efdbaa897c9bffb210edd901dc3d5425995229f7";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ makeWrapper wrapGAppsHook squashfsTools ];
|
||||
|
|
|
@ -108,6 +108,7 @@ let
|
|||
gappsWrapperArgs+=(
|
||||
# Add gio to PATH so that moving files to the trash works when not using a desktop environment
|
||||
--prefix PATH : ${glib.bin}/bin
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
)
|
||||
'';
|
||||
|
||||
|
|
|
@ -1,24 +1,29 @@
|
|||
{ lib, stdenv, fetchFromGitHub, flex, bison, pkg-config, zlib, libtiff, libpng, fftw
|
||||
, cairo, readline, ffmpeg_3, makeWrapper, wxGTK30, netcdf, blas
|
||||
, proj, gdal, geos, sqlite, postgresql, libmysqlclient, python2Packages, libLAS, proj-datumgrid
|
||||
, cairo, readline, ffmpeg, makeWrapper, wxGTK30, wxmac, netcdf, blas
|
||||
, proj, gdal, geos, sqlite, postgresql, libmysqlclient, python3Packages, libLAS, proj-datumgrid
|
||||
, zstd, pdal, wrapGAppsHook
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "grass";
|
||||
version = "7.6.1";
|
||||
version = "7.8.6";
|
||||
|
||||
src = with lib; fetchFromGitHub {
|
||||
owner = "OSGeo";
|
||||
repo = "grass";
|
||||
rev = "${name}_${replaceStrings ["."] ["_"] version}";
|
||||
sha256 = "1amjk9rz7vw5ha7nyl5j2bfwj5if9w62nlwx5qbp1x7spldimlll";
|
||||
rev = version;
|
||||
sha256 = "sha256-zvZqFWuxNyA+hu+NMiRbQVdzzrQPsZrdGdfVB17+SbM=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [ flex bison zlib proj gdal libtiff libpng fftw sqlite cairo proj
|
||||
readline ffmpeg_3 makeWrapper wxGTK30 netcdf geos postgresql libmysqlclient blas
|
||||
libLAS proj-datumgrid ]
|
||||
++ (with python2Packages; [ python python-dateutil wxPython30 numpy ]);
|
||||
buildInputs = [ flex bison zlib proj gdal libtiff libpng fftw sqlite
|
||||
readline ffmpeg makeWrapper netcdf geos postgresql libmysqlclient blas
|
||||
libLAS proj-datumgrid zstd wrapGAppsHook ]
|
||||
++ lib.optionals stdenv.isLinux [ cairo pdal wxGTK30 ]
|
||||
++ lib.optional stdenv.isDarwin wxmac
|
||||
++ (with python3Packages; [ python python-dateutil numpy ]
|
||||
++ lib.optional stdenv.isDarwin wxPython_4_0
|
||||
++ lib.optional stdenv.isLinux wxPython_4_1);
|
||||
|
||||
# On Darwin the installer tries to symlink the help files into a system
|
||||
# directory
|
||||
|
@ -32,7 +37,7 @@ stdenv.mkDerivation rec {
|
|||
configureFlags = [
|
||||
"--with-proj-share=${proj}/share/proj"
|
||||
"--with-proj-includes=${proj.dev}/include"
|
||||
"--with-proj-lib=${proj}/lib"
|
||||
"--with-proj-libs=${proj}/lib"
|
||||
"--without-opengl"
|
||||
"--with-readline"
|
||||
"--with-wxwidgets"
|
||||
|
@ -46,6 +51,15 @@ stdenv.mkDerivation rec {
|
|||
"--with-mysql-libs=${libmysqlclient}/lib/mysql"
|
||||
"--with-blas"
|
||||
"--with-liblas=${libLAS}/bin/liblas-config"
|
||||
"--with-zstd"
|
||||
"--with-fftw"
|
||||
"--with-pthread"
|
||||
] ++ lib.optionals stdenv.isLinux [
|
||||
"--with-pdal"
|
||||
] ++ lib.optionals stdenv.isDarwin [
|
||||
"--without-cairo"
|
||||
"--without-freetype"
|
||||
"--without-x"
|
||||
];
|
||||
|
||||
# Otherwise a very confusing "Can't load GDAL library" error
|
||||
|
@ -62,6 +76,7 @@ stdenv.mkDerivation rec {
|
|||
scripts/g.extension.all/g.extension.all.py \
|
||||
scripts/r.drain/r.drain.py \
|
||||
scripts/r.pack/r.pack.py \
|
||||
scripts/r.import/r.import.py \
|
||||
scripts/r.tileset/r.tileset.py \
|
||||
scripts/r.unpack/r.unpack.py \
|
||||
scripts/v.clip/v.clip.py \
|
||||
|
@ -79,18 +94,17 @@ stdenv.mkDerivation rec {
|
|||
temporal/t.rast.algebra/t.rast.algebra.py \
|
||||
temporal/t.rast3d.algebra/t.rast3d.algebra.py \
|
||||
temporal/t.vect.algebra/t.vect.algebra.py \
|
||||
temporal/t.downgrade/t.downgrade.py \
|
||||
temporal/t.select/t.select.py
|
||||
for d in gui lib scripts temporal tools; do
|
||||
patchShebangs $d
|
||||
done
|
||||
'';
|
||||
|
||||
NIX_CFLAGS_COMPILE = "-DACCEPT_USE_OF_DEPRECATED_PROJ_API_H=1";
|
||||
|
||||
postInstall = ''
|
||||
wrapProgram $out/bin/grass76 \
|
||||
wrapProgram $out/bin/grass78 \
|
||||
--set PYTHONPATH $PYTHONPATH \
|
||||
--set GRASS_PYTHON ${python2Packages.python}/bin/${python2Packages.python.executable} \
|
||||
--set GRASS_PYTHON ${python3Packages.python.interpreter} \
|
||||
--suffix LD_LIBRARY_PATH ':' '${gdal}/lib'
|
||||
ln -s $out/grass*/lib $out/lib
|
||||
ln -s $out/grass*/include $out/include
|
||||
|
|
|
@ -1,17 +1,21 @@
|
|||
{ lib, makeWrapper, symlinkJoin
|
||||
, qgis-unwrapped, extraPythonPackages ? (ps: [ ])
|
||||
, extraPythonPackages ? (ps: [ ])
|
||||
, libsForQt5
|
||||
}:
|
||||
with lib;
|
||||
symlinkJoin rec {
|
||||
let
|
||||
qgis-unwrapped = libsForQt5.callPackage ./unwrapped.nix { };
|
||||
in symlinkJoin rec {
|
||||
|
||||
inherit (qgis-unwrapped) version;
|
||||
name = "qgis-${version}";
|
||||
|
||||
paths = [ qgis-unwrapped ];
|
||||
|
||||
nativeBuildInputs = [ makeWrapper qgis-unwrapped.python3Packages.wrapPython ];
|
||||
nativeBuildInputs = [ makeWrapper qgis-unwrapped.py.pkgs.wrapPython ];
|
||||
|
||||
# extend to add to the python environment of QGIS without rebuilding QGIS application.
|
||||
pythonInputs = qgis-unwrapped.pythonBuildInputs ++ (extraPythonPackages qgis-unwrapped.python3Packages);
|
||||
pythonInputs = qgis-unwrapped.pythonBuildInputs ++ (extraPythonPackages qgis-unwrapped.py.pkgs);
|
||||
|
||||
postBuild = ''
|
||||
# unpackPhase
|
||||
|
@ -23,5 +27,7 @@ symlinkJoin rec {
|
|||
--set PYTHONPATH $program_PYTHONPATH
|
||||
'';
|
||||
|
||||
passthru.unwrapped = qgis-unwrapped;
|
||||
|
||||
meta = qgis-unwrapped.meta;
|
||||
}
|
||||
|
|
32
third_party/nixpkgs/pkgs/applications/gis/qgis/ltr.nix
vendored
Normal file
32
third_party/nixpkgs/pkgs/applications/gis/qgis/ltr.nix
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
{ lib, makeWrapper, symlinkJoin
|
||||
, extraPythonPackages ? (ps: [ ])
|
||||
, libsForQt5
|
||||
}:
|
||||
with lib;
|
||||
let
|
||||
qgis-ltr-unwrapped = libsForQt5.callPackage ./unwrapped-ltr.nix { };
|
||||
in symlinkJoin rec {
|
||||
|
||||
inherit (qgis-ltr-unwrapped) version;
|
||||
name = "qgis-${version}";
|
||||
|
||||
paths = [ qgis-ltr-unwrapped ];
|
||||
|
||||
nativeBuildInputs = [ makeWrapper qgis-ltr-unwrapped.py.pkgs.wrapPython ];
|
||||
|
||||
# extend to add to the python environment of QGIS without rebuilding QGIS application.
|
||||
pythonInputs = qgis-ltr-unwrapped.pythonBuildInputs ++ (extraPythonPackages qgis-ltr-unwrapped.py.pkgs);
|
||||
|
||||
postBuild = ''
|
||||
|
||||
buildPythonPath "$pythonInputs"
|
||||
|
||||
wrapProgram $out/bin/qgis \
|
||||
--prefix PATH : $program_PATH \
|
||||
--set PYTHONPATH $program_PYTHONPATH
|
||||
'';
|
||||
|
||||
passthru.unwrapped = qgis-ltr-unwrapped;
|
||||
|
||||
inherit (qgis-ltr-unwrapped) meta;
|
||||
}
|
148
third_party/nixpkgs/pkgs/applications/gis/qgis/unwrapped-ltr.nix
vendored
Normal file
148
third_party/nixpkgs/pkgs/applications/gis/qgis/unwrapped-ltr.nix
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
{ lib
|
||||
, mkDerivation
|
||||
, fetchFromGitHub
|
||||
, cmake
|
||||
, ninja
|
||||
, flex
|
||||
, bison
|
||||
, proj
|
||||
, geos
|
||||
, xlibsWrapper
|
||||
, sqlite
|
||||
, gsl
|
||||
, qwt
|
||||
, fcgi
|
||||
, python3
|
||||
, libspatialindex
|
||||
, libspatialite
|
||||
, postgresql
|
||||
, txt2tags
|
||||
, openssl
|
||||
, libzip
|
||||
, hdf5
|
||||
, netcdf
|
||||
, exiv2
|
||||
, protobuf
|
||||
, qtbase
|
||||
, qtsensors
|
||||
, qca-qt5
|
||||
, qtkeychain
|
||||
, qt3d
|
||||
, qscintilla
|
||||
, qtserialport
|
||||
, qtxmlpatterns
|
||||
, withGrass ? true
|
||||
, grass
|
||||
, withWebKit ? true
|
||||
, qtwebkit
|
||||
, makeWrapper
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
py = python3.override {
|
||||
packageOverrides = self: super: {
|
||||
pyqt5 = super.pyqt5.override {
|
||||
withLocation = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
pythonBuildInputs = with py.pkgs; [
|
||||
qscintilla-qt5
|
||||
gdal
|
||||
jinja2
|
||||
numpy
|
||||
psycopg2
|
||||
chardet
|
||||
python-dateutil
|
||||
pyyaml
|
||||
pytz
|
||||
requests
|
||||
urllib3
|
||||
pygments
|
||||
pyqt5
|
||||
sip_4
|
||||
owslib
|
||||
six
|
||||
];
|
||||
in mkDerivation rec {
|
||||
version = "3.16.16";
|
||||
pname = "qgis-ltr-unwrapped";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "qgis";
|
||||
repo = "QGIS";
|
||||
rev = "final-${lib.replaceStrings [ "." ] [ "_" ] version}";
|
||||
sha256 = "85RlV1Ik1BeN9B7UE51ktTWMiGkMga2E/fnhyiVwjIs=";
|
||||
};
|
||||
|
||||
passthru = {
|
||||
inherit pythonBuildInputs;
|
||||
inherit py;
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
openssl
|
||||
proj
|
||||
geos
|
||||
xlibsWrapper
|
||||
sqlite
|
||||
gsl
|
||||
qwt
|
||||
exiv2
|
||||
protobuf
|
||||
fcgi
|
||||
libspatialindex
|
||||
libspatialite
|
||||
postgresql
|
||||
txt2tags
|
||||
libzip
|
||||
hdf5
|
||||
netcdf
|
||||
qtbase
|
||||
qtsensors
|
||||
qca-qt5
|
||||
qtkeychain
|
||||
qscintilla
|
||||
qtserialport
|
||||
qtxmlpatterns
|
||||
qt3d
|
||||
] ++ lib.optional withGrass grass
|
||||
++ lib.optional withWebKit qtwebkit
|
||||
++ pythonBuildInputs;
|
||||
|
||||
nativeBuildInputs = [ makeWrapper cmake flex bison ninja ];
|
||||
|
||||
# Force this pyqt_sip_dir variable to point to the sip dir in PyQt5
|
||||
#
|
||||
# TODO: Correct PyQt5 to provide the expected directory and fix
|
||||
# build to use PYQT5_SIP_DIR consistently.
|
||||
postPatch = ''
|
||||
substituteInPlace cmake/FindPyQt5.py \
|
||||
--replace 'sip_dir = cfg.default_sip_dir' 'sip_dir = "${py.pkgs.pyqt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"'
|
||||
'';
|
||||
|
||||
cmakeFlags = [
|
||||
"-DCMAKE_SKIP_BUILD_RPATH=OFF"
|
||||
"-DWITH_3D=True"
|
||||
"-DPYQT5_SIP_DIR=${py.pkgs.pyqt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"
|
||||
"-DQSCI_SIP_DIR=${py.pkgs.qscintilla-qt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"
|
||||
] ++ lib.optional (!withWebKit) "-DWITH_QTWEBKIT=OFF"
|
||||
++ lib.optional withGrass "-DGRASS_PREFIX7=${grass}/grass78";
|
||||
|
||||
postFixup = lib.optionalString withGrass ''
|
||||
# grass has to be availble on the command line even though we baked in
|
||||
# the path at build time using GRASS_PREFIX
|
||||
wrapProgram $out/bin/qgis \
|
||||
--prefix PATH : ${lib.makeBinPath [ grass ]}
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "A Free and Open Source Geographic Information System";
|
||||
homepage = "https://www.qgis.org";
|
||||
license = licenses.gpl2Plus;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ lsix sikmir erictapen ];
|
||||
};
|
||||
}
|
|
@ -12,7 +12,7 @@
|
|||
, gsl
|
||||
, qwt
|
||||
, fcgi
|
||||
, python3Packages
|
||||
, python3
|
||||
, libspatialindex
|
||||
, libspatialite
|
||||
, postgresql
|
||||
|
@ -27,6 +27,7 @@
|
|||
, qtsensors
|
||||
, qca-qt5
|
||||
, qtkeychain
|
||||
, qt3d
|
||||
, qscintilla
|
||||
, qtserialport
|
||||
, qtxmlpatterns
|
||||
|
@ -34,10 +35,22 @@
|
|||
, grass
|
||||
, withWebKit ? true
|
||||
, qtwebkit
|
||||
, pdal
|
||||
, zstd
|
||||
, makeWrapper
|
||||
}:
|
||||
|
||||
let
|
||||
pythonBuildInputs = with python3Packages; [
|
||||
|
||||
py = python3.override {
|
||||
packageOverrides = self: super: {
|
||||
pyqt5 = super.pyqt5.override {
|
||||
withLocation = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
pythonBuildInputs = with py.pkgs; [
|
||||
qscintilla-qt5
|
||||
gdal
|
||||
jinja2
|
||||
|
@ -56,19 +69,19 @@ let
|
|||
six
|
||||
];
|
||||
in mkDerivation rec {
|
||||
version = "3.16.14";
|
||||
version = "3.22.3";
|
||||
pname = "qgis-unwrapped";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "qgis";
|
||||
repo = "QGIS";
|
||||
rev = "final-${lib.replaceStrings [ "." ] [ "_" ] version}";
|
||||
sha256 = "sha256-3FUGSBdlhJhhpTPtYuzKOznsC7PJV3kRL9Il2Yryi1Q=";
|
||||
sha256 = "TLXhXHU0dp0MnKHFw/+1rQnJbebnwje21Oasy0qWctk=";
|
||||
};
|
||||
|
||||
passthru = {
|
||||
inherit pythonBuildInputs;
|
||||
inherit python3Packages;
|
||||
inherit py;
|
||||
};
|
||||
|
||||
buildInputs = [
|
||||
|
@ -96,11 +109,14 @@ in mkDerivation rec {
|
|||
qscintilla
|
||||
qtserialport
|
||||
qtxmlpatterns
|
||||
qt3d
|
||||
pdal
|
||||
zstd
|
||||
] ++ lib.optional withGrass grass
|
||||
++ lib.optional withWebKit qtwebkit
|
||||
++ pythonBuildInputs;
|
||||
|
||||
nativeBuildInputs = [ cmake flex bison ninja ];
|
||||
nativeBuildInputs = [ makeWrapper cmake flex bison ninja ];
|
||||
|
||||
# Force this pyqt_sip_dir variable to point to the sip dir in PyQt5
|
||||
#
|
||||
|
@ -108,15 +124,24 @@ in mkDerivation rec {
|
|||
# build to use PYQT5_SIP_DIR consistently.
|
||||
postPatch = ''
|
||||
substituteInPlace cmake/FindPyQt5.py \
|
||||
--replace 'sip_dir = cfg.default_sip_dir' 'sip_dir = "${python3Packages.pyqt5}/${python3Packages.python.sitePackages}/PyQt5/bindings"'
|
||||
--replace 'sip_dir = cfg.default_sip_dir' 'sip_dir = "${py.pkgs.pyqt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"'
|
||||
'';
|
||||
|
||||
cmakeFlags = [
|
||||
"-DCMAKE_SKIP_BUILD_RPATH=OFF"
|
||||
"-DPYQT5_SIP_DIR=${python3Packages.pyqt5}/${python3Packages.python.sitePackages}/PyQt5/bindings"
|
||||
"-DQSCI_SIP_DIR=${python3Packages.qscintilla-qt5}/${python3Packages.python.sitePackages}/PyQt5/bindings"
|
||||
"-DWITH_3D=True"
|
||||
"-DWITH_PDAL=TRUE"
|
||||
"-DPYQT5_SIP_DIR=${py.pkgs.pyqt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"
|
||||
"-DQSCI_SIP_DIR=${py.pkgs.qscintilla-qt5}/${py.pkgs.python.sitePackages}/PyQt5/bindings"
|
||||
] ++ lib.optional (!withWebKit) "-DWITH_QTWEBKIT=OFF"
|
||||
++ lib.optional withGrass "-DGRASS_PREFIX7=${grass}/${grass.name}";
|
||||
++ lib.optional withGrass "-DGRASS_PREFIX7=${grass}/grass78";
|
||||
|
||||
postFixup = lib.optionalString withGrass ''
|
||||
# grass has to be availble on the command line even though we baked in
|
||||
# the path at build time using GRASS_PREFIX
|
||||
wrapProgram $out/bin/qgis \
|
||||
--prefix PATH : ${lib.makeBinPath [ grass ]}
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "A Free and Open Source Geographic Information System";
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ lib, stdenv, fetchFromGitHub, pkg-config, libtool
|
||||
{ lib, stdenv, fetchFromGitHub, fetchpatch, pkg-config, libtool
|
||||
, bzip2, zlib, libX11, libXext, libXt, fontconfig, freetype, ghostscript, libjpeg, djvulibre
|
||||
, lcms2, openexr, libjxl, libpng, liblqr1, libraw, librsvg, libtiff, libxml2, openjpeg, libwebp, libheif
|
||||
, ApplicationServices
|
||||
|
@ -27,6 +27,14 @@ stdenv.mkDerivation rec {
|
|||
sha256 = "0r8zmk2cfmf09l94hqzfz4aspnzn178ggdbgm7w4hr0p864cbvc3";
|
||||
};
|
||||
|
||||
patches = [
|
||||
# fix a type confusion bug introduced in 7.1.0-20 with commit 075565e93c71bcaaabf0ce70b7d1060bccdf0020
|
||||
(fetchpatch {
|
||||
url = "https://github.com/ImageMagick/ImageMagick/commit/62845d5672eca4446b952dd0ab2e3e0dab0309d4.patch";
|
||||
sha256 = "1kni5i8b5hl69niypidm90mhir8cafi6r9i857fxdlv045h3dg4p";
|
||||
})
|
||||
];
|
||||
|
||||
outputs = [ "out" "dev" "doc" ]; # bin/ isn't really big
|
||||
outputMan = "out"; # it's tiny
|
||||
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
url http://spiegl.de/qiv/download/
|
||||
version_link '[.]tgz$'
|
||||
do_overwrite() { do_overwrite_just_version; }
|
|
@ -5,14 +5,14 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "tev";
|
||||
version = "1.19";
|
||||
version = "1.22";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "Tom94";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
fetchSubmodules = true;
|
||||
sha256 = "sha256-laP47xOND6PMA6dwTcCupcTIW+9zCaxO6rHzvDSL9JU=";
|
||||
sha256 = "sha256-WLDQaN6wHnadvp0JyUzlcZVNiSbFudmmBSNYRMaE6U4=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake wrapGAppsHook ];
|
||||
|
@ -53,6 +53,7 @@ stdenv.mkDerivation rec {
|
|||
changelog = "https://github.com/Tom94/tev/releases/tag/v${version}";
|
||||
license = licenses.bsd3;
|
||||
platforms = platforms.unix;
|
||||
badPlatforms = [ "aarch64-linux" ]; # fails on Hydra since forever
|
||||
maintainers = with maintainers; [ ];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,5 +19,12 @@ mkDerivation {
|
|||
kpty syntax-highlighting libmtp libssh openexr openslp
|
||||
phonon qtsvg samba solid gperf
|
||||
];
|
||||
|
||||
# org.kde.kmtpd5 DBUS service launches kiod5 binary from kio derivation, not from kio-extras
|
||||
postInstall = ''
|
||||
substituteInPlace $out/share/dbus-1/services/org.kde.kmtpd5.service \
|
||||
--replace Exec=$out Exec=${kio}
|
||||
'';
|
||||
|
||||
CXXFLAGS = [ "-I${ilmbase.dev}/include/OpenEXR" ];
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ stdenv.mkDerivation rec {
|
|||
postFixup = ''
|
||||
wrapProgram $out/bin/auto-multiple-choice \
|
||||
''${makeWrapperArgs[@]} \
|
||||
--prefix PERL5LIB : "${with perlPackages; makePerlPath [
|
||||
--prefix PERL5LIB : "${with perlPackages; makeFullPerlPath [
|
||||
ArchiveZip
|
||||
DBDSQLite
|
||||
Cairo
|
||||
|
@ -75,6 +75,7 @@ stdenv.mkDerivation rec {
|
|||
GlibObjectIntrospection
|
||||
Gtk3
|
||||
LocaleGettext
|
||||
OpenOfficeOODoc
|
||||
PerlMagick
|
||||
TextCSV
|
||||
XMLParser
|
||||
|
|
|
@ -18,13 +18,13 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "cherrytree";
|
||||
version = "0.99.44";
|
||||
version = "0.99.45";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "giuspen";
|
||||
repo = "cherrytree";
|
||||
rev = version;
|
||||
sha256 = "sha256-13wZb+PxeCrQ3MpewMnqBHO8QnoCRFhKU4awTdYtFd4=";
|
||||
sha256 = "sha256-DGhzqv7huFVgCdXy3DuIBT+7s2q6FB7+gFPd4zEXi2M=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
{ lib
|
||||
, stdenv
|
||||
, fetchFromGitHub
|
||||
, autoconf
|
||||
, automake
|
||||
, bc
|
||||
, fluxbox
|
||||
, gettext
|
||||
, glibmm
|
||||
, gtkmm2
|
||||
, libglademm
|
||||
, libsigcxx
|
||||
, pkg-config
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "fme";
|
||||
version = "1.1.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "rdehouss";
|
||||
repo = "fme";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-P67OmExBdWM6NZhDyYceVJOZiy8RC+njk/QvgQcWZeQ=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
autoconf
|
||||
automake
|
||||
gettext
|
||||
pkg-config
|
||||
];
|
||||
buildInputs = [
|
||||
bc
|
||||
fluxbox
|
||||
glibmm
|
||||
gtkmm2
|
||||
libglademm
|
||||
libsigcxx
|
||||
];
|
||||
|
||||
preConfigure = ''
|
||||
./autogen.sh
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://github.com/rdehouss/fme/";
|
||||
description = "Editor for Fluxbox menus";
|
||||
longDescription = ''
|
||||
Fluxbox Menu Editor is a menu editor for the Window Manager Fluxbox
|
||||
written in C++ with the libraries Gtkmm, Glibmm, libglademm and gettext
|
||||
for internationalization. Its user-friendly interface will help you to
|
||||
edit, delete, move (Drag and Drop) a row, a submenu, etc very easily.
|
||||
'';
|
||||
license = licenses.gpl2Plus;
|
||||
maintainers = [ maintainers.AndersonTorres ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
|
@ -2,11 +2,11 @@
|
|||
|
||||
buildPythonApplication rec {
|
||||
pname = "gallery_dl";
|
||||
version = "1.20.1";
|
||||
version = "1.20.3";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "a1c06625381485f82aa14a038a622d40ab9cc2c8d150dd65c66df96dbf427f62";
|
||||
sha256 = "6d1d97bd08a2a0dcfb0578e759fb83c4902b395405b3e3238673f684973b0556";
|
||||
};
|
||||
|
||||
propagatedBuildInputs = [ requests yt-dlp ];
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
url https://mupdf.com/downloads/archive/
|
||||
do_overwrite(){
|
||||
ensure_hash
|
||||
ensure_version
|
||||
set_var_value version $CURRENT_VERSION
|
||||
set_var_value sha256 $CURRENT_HASH
|
||||
}
|
|
@ -10,14 +10,14 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "rivercarro";
|
||||
version = "0.1.1";
|
||||
version = "0.1.2";
|
||||
|
||||
src = fetchFromSourcehut {
|
||||
owner = "~novakane";
|
||||
repo = pname;
|
||||
fetchSubmodules = true;
|
||||
rev = "v${version}";
|
||||
sha256 = "0h1wvl6rlrpr67zl51x71hy7nwkfd5kfv5p2mql6w5fybxxyqnpm";
|
||||
sha256 = "07md837ki0yln464w8vgwyl3yjrvkz1p8alxlmwqfn4w45nqhw77";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
{ stdenv, fetchurl, itstool, intltool, pkg-config
|
||||
, libxml2, gnome2, atk, gtk2, glib
|
||||
, mono, mono-addins, dbus-sharp-2_0, dbus-sharp-glib-2_0, gnome-sharp, gtk-sharp-2_0
|
||||
, makeWrapper, lib}:
|
||||
|
||||
let
|
||||
version = "1.15.9";
|
||||
in
|
||||
|
||||
stdenv.mkDerivation {
|
||||
pname = "tomboy";
|
||||
inherit version;
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://github.com/tomboy-notes/tomboy/releases/download/${version}/tomboy-${version}.tar.xz";
|
||||
sha256 = "0j5jmd079bm2fydqaic5ymbfdxna3qlx6fkp2mqhgcdr7prsbl3q";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [ itstool intltool
|
||||
libxml2 gnome2.GConf atk gtk2
|
||||
mono mono-addins dbus-sharp-2_0 dbus-sharp-glib-2_0 gnome-sharp gtk-sharp-2_0
|
||||
makeWrapper ];
|
||||
|
||||
postInstall = ''
|
||||
makeWrapper "${mono}/bin/mono" "$out/bin/tomboy" \
|
||||
--add-flags "$out/lib/tomboy/Tomboy.exe" \
|
||||
--prefix MONO_GAC_PREFIX : ${dbus-sharp-2_0} \
|
||||
--prefix MONO_GAC_PREFIX : ${dbus-sharp-glib-2_0} \
|
||||
--prefix MONO_GAC_PREFIX : ${gtk-sharp-2_0} \
|
||||
--prefix MONO_GAC_PREFIX : ${gnome-sharp} \
|
||||
--prefix MONO_GAC_PREFIX : ${mono-addins} \
|
||||
--prefix LD_LIBRARY_PATH : ${lib.makeLibraryPath [ glib gtk-sharp-2_0 gtk-sharp-2_0.gtk gnome2.GConf ]}
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://wiki.gnome.org/Apps/Tomboy";
|
||||
description = "A simple note-taking application with synchronization";
|
||||
platforms = platforms.linux;
|
||||
license = lib.licenses.lgpl2;
|
||||
maintainers = with maintainers; [ stesie ];
|
||||
};
|
||||
}
|
|
@ -38,7 +38,6 @@ in stdenv.mkDerivation rec {
|
|||
license = licenses.gpl2;
|
||||
downloadPage = "https://vifm.info/downloads.shtml";
|
||||
homepage = "https://vifm.info/";
|
||||
updateWalker = true;
|
||||
changelog = "https://github.com/vifm/vifm/blob/v${version}/ChangeLog";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -183,7 +183,8 @@ in stdenv.mkDerivation {
|
|||
mkdir -p "$out/bin"
|
||||
|
||||
eval makeWrapper "${browserBinary}" "$out/bin/chromium" \
|
||||
--add-flags ${escapeShellArg (escapeShellArg commandLineArgs)}
|
||||
--add-flags ${escapeShellArg (escapeShellArg commandLineArgs)} \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
|
||||
ed -v -s "$out/bin/chromium" << EOF
|
||||
2i
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -7,10 +7,10 @@ in
|
|||
rec {
|
||||
firefox = common rec {
|
||||
pname = "firefox";
|
||||
version = "96.0.2";
|
||||
version = "96.0.3";
|
||||
src = fetchurl {
|
||||
url = "mirror://mozilla/firefox/releases/${version}/source/firefox-${version}.source.tar.xz";
|
||||
sha512 = "5ceb1f023a9217c6a9c08b6525882d4091f989859cf209cc1d0ea22c846d05a967e1c47102ae052f7a5029d18118a558dd96da00437ee2c6fbf2896caf99d9dd";
|
||||
sha512 = "3dd5fbc96e369d5f4fb3eca778c2bd3e2313d089f867de9fac3556810a797e9b5629ef1b8840fb2f22a18df7de95ea1993eee052f691d861a555cea544b05966";
|
||||
};
|
||||
|
||||
meta = {
|
||||
|
@ -32,10 +32,10 @@ rec {
|
|||
|
||||
firefox-esr-91 = common rec {
|
||||
pname = "firefox-esr";
|
||||
version = "91.5.0esr";
|
||||
version = "91.5.1esr";
|
||||
src = fetchurl {
|
||||
url = "mirror://mozilla/firefox/releases/${version}/source/firefox-${version}.source.tar.xz";
|
||||
sha512 = "1712415b6b73c6a21edfefc39eaba5fcbbca54032f78627c0005d291501d16ef4daffb8b9a160d1d5361113ceba04eb5ddb21d903e3dd8d58838aa9596f2d781";
|
||||
sha512 = "26239e7a94b79f1e24a6667d7cf1c398d75992e8850144affbc5d3f34f04b91f0c9b020cab662b2cd4927924839ff2ddd2f3605c537bb5494fd9ac0d951b14fa";
|
||||
};
|
||||
|
||||
meta = {
|
||||
|
|
|
@ -142,8 +142,9 @@ in stdenv.mkDerivation {
|
|||
makeWrapper "$out/share/google/$appname/google-$appname" "$exe" \
|
||||
--prefix LD_LIBRARY_PATH : "$rpath" \
|
||||
--prefix PATH : "$binpath" \
|
||||
--prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS:$GSETTINGS_SCHEMAS_PATH:${addOpenGLRunpath.driverLink}/share" \
|
||||
--add-flags ${escapeShellArg commandLineArgs}
|
||||
--prefix XDG_DATA_DIRS : "$XDG_ICON_DIRS:$GSETTINGS_SCHEMAS_PATH" \
|
||||
--add-flags ${escapeShellArg commandLineArgs} \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
|
||||
for elf in $out/share/google/$appname/{chrome,chrome-sandbox,${crashpadHandlerBinary},nacl_helper}; do
|
||||
patchelf --set-rpath $rpath $elf
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{ lib, buildGoModule, fetchFromGitHub, fetchzip, installShellFiles }:
|
||||
|
||||
let
|
||||
version = "0.24.1";
|
||||
sha256 = "18jzf5kd06c10f45y4crvaqa5r10dhq2ashlhppzrmhigiyavxac";
|
||||
manifestsSha256 = "0qbdik65irnwgw7klj5w0z00jxflm855gikpnqb9gsxd7rbw8ysk";
|
||||
version = "0.25.3";
|
||||
sha256 = "1j7jw6vfki67dz9lkx3f94b9hi6d2bc504yy3nfppp3hx8nwxb37";
|
||||
manifestsSha256 = "1akp1i3xasfjq6zqbk7mnbkhnzmq7if7v82q6zdp2678xrg6xps5";
|
||||
|
||||
manifests = fetchzip {
|
||||
url =
|
||||
|
@ -23,7 +23,7 @@ in buildGoModule rec {
|
|||
inherit sha256;
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-HoAVdY+kZLpUEl3mE7obbTzAJUyt5MBPjGhs6ZDSnzU=";
|
||||
vendorSha256 = "sha256-/VeJq6l3kSZ9qcYf2ypyyoXVKME+rig6aDdWDoRqNzA=";
|
||||
|
||||
postUnpack = ''
|
||||
cp -r ${manifests} source/cmd/flux/manifests
|
||||
|
|
|
@ -1,23 +1,24 @@
|
|||
{ lib, buildGoModule, fetchFromGitHub, installShellFiles }:
|
||||
{ lib, buildGo117Module, fetchFromGitHub, installShellFiles }:
|
||||
|
||||
buildGoModule rec {
|
||||
buildGo117Module rec {
|
||||
pname = "helm";
|
||||
version = "3.7.2";
|
||||
gitCommit = "663a896f4a815053445eec4153677ddc24a0a361";
|
||||
version = "3.8.0";
|
||||
gitCommit = "d14138609b01886f544b2025f5000351c9eb092e";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "helm";
|
||||
repo = "helm";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-MhBuwpgF1PBAZ5QwF7t4J1gqam2cMX+hkdZs7KoSD6I=";
|
||||
sha256 = "sha256-/vxf3YfBP1WHFpqll6iq4m+X4NA16qHnuGA0wvrVRsg=";
|
||||
};
|
||||
vendorSha256 = "sha256-YDdpeVh9rG3MF1HgG7uuRvjXDr9Fcjuhrj16kpK8tsI=";
|
||||
vendorSha256 = "sha256-M7XId+2HIh1mFzU54qQZEisWdVq67RlGJjlw+2dpiDc=";
|
||||
|
||||
doCheck = false;
|
||||
|
||||
subPackages = [ "cmd/helm" ];
|
||||
ldflags = [
|
||||
"-w" "-s"
|
||||
"-w"
|
||||
"-s"
|
||||
"-X helm.sh/helm/v3/internal/version.version=v${version}"
|
||||
"-X helm.sh/helm/v3/internal/version.gitCommit=${gitCommit}"
|
||||
];
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
{ lib, buildGoModule, fetchFromGitHub }:
|
||||
|
||||
# SHA of ${version} for the tool's help output. Unfortunately this is needed in build flags.
|
||||
let rev = "237bd35906f5c4bed1f4de4aa58cc6a6a676d4fd";
|
||||
let rev = "0665cd322b11bb40c2774776de765c38d8104bed";
|
||||
in
|
||||
buildGoModule rec {
|
||||
pname = "sonobuoy";
|
||||
version = "0.55.1"; # Do not forget to update `rev` above
|
||||
version = "0.56.0"; # Do not forget to update `rev` above
|
||||
|
||||
ldflags =
|
||||
let t = "github.com/vmware-tanzu/sonobuoy";
|
||||
|
@ -20,10 +20,10 @@ buildGoModule rec {
|
|||
owner = "vmware-tanzu";
|
||||
repo = "sonobuoy";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-pHpnh+6O9yjnDA8u0jyLvqNQbXC+xz8fRn47aQNdOAo=";
|
||||
sha256 = "sha256-78skqo3sq567s3/XN54xtC0mefDY3Io3BD0d+JP7k5Q=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-jPKCWTFABKRZCg6X5VVdrmOU/ZFc7yGD7R8RJrpcITg=";
|
||||
vendorSha256 = "sha256-qKXm39CwrTcXENIMh2BBS3MUlhJvmTTA3UzZNpF0PCc=";
|
||||
|
||||
subPackages = [ "." ];
|
||||
|
||||
|
|
44
third_party/nixpkgs/pkgs/applications/networking/cluster/talosctl/default.nix
vendored
Normal file
44
third_party/nixpkgs/pkgs/applications/networking/cluster/talosctl/default.nix
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
{ lib, buildGo117Module, fetchFromGitHub }:
|
||||
|
||||
buildGo117Module rec {
|
||||
pname = "talosctl";
|
||||
version = "0.14.1";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "talos-systems";
|
||||
repo = "talos";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-JeZ+Q6LTDJtoxfu4mJNc3wv3Y6OPcIUvgnozj9mWwLw=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-ujbEWvcNJJOUegVgAGEPwYF02TiqD1lZELvqc/Gmb4A=";
|
||||
|
||||
# look for GO_LDFLAGS getting set in the Makefile
|
||||
ldflags =
|
||||
let
|
||||
versionPkg = "github.com/talos-systems/talos/pkg/version"; # VERSION_PKG
|
||||
imagesPkgs = "github.com/talos-systems/talos/pkg/images"; # IMAGES_PKGS
|
||||
mgmtHelpersPkg = "github.com/talos-systems/talos/cmd/talosctl/pkg/mgmt/helpers"; #MGMT_HELPERS_PKG
|
||||
in
|
||||
[
|
||||
"-X ${versionPkg}.Name=Talos"
|
||||
"-X ${versionPkg}.SHA=${src.rev}" # should be the hash, but as we build from tags, this needs to do
|
||||
"-X ${versionPkg}.Tag=${src.rev}"
|
||||
"-X ${versionPkg}.PkgsVersion=v0.9.0-2-g447ce75" # PKGS
|
||||
"-X ${versionPkg}.ExtrasVersion=v0.7.0-1-gd6b73a7" # EXTRAS
|
||||
"-X ${imagesPkgs}.Username=talos-systems" # USERNAME
|
||||
"-X ${imagesPkgs}.Registry=ghcr.io" # REGISTRY
|
||||
"-X ${mgmtHelpersPkg}.ArtifactsPath=_out" # ARTIFACTS
|
||||
];
|
||||
|
||||
subPackages = [ "cmd/talosctl" ];
|
||||
|
||||
doCheck = false;
|
||||
|
||||
meta = with lib; {
|
||||
description = "A CLI for out-of-band management of Kubernetes nodes created by Talos";
|
||||
homepage = "https://github.com/talos-systems/talos";
|
||||
license = licenses.mpl20;
|
||||
maintainers = with maintainers; [ flokli ];
|
||||
};
|
||||
}
|
|
@ -21,16 +21,33 @@
|
|||
|
||||
stdenv.mkDerivation rec {
|
||||
pname = "zeek";
|
||||
version = "4.1.1";
|
||||
version = "4.2.0";
|
||||
|
||||
src = fetchurl {
|
||||
url = "https://download.zeek.org/zeek-${version}.tar.gz";
|
||||
sha256 = "0wq3kjc3zc5ikzwix7k7gr92v75rg6283kx5fzvc3lcdkaczq2lc";
|
||||
sha256 = "sha256-jZoCjKn+x61KnkinY+KWBSOEz0AupM03FXe/8YPCdFE=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake flex bison file ];
|
||||
buildInputs = [ openssl libpcap zlib curl libmaxminddb gperftools python3 swig ncurses ]
|
||||
++ lib.optionals stdenv.isDarwin [ gettext ];
|
||||
nativeBuildInputs = [
|
||||
bison
|
||||
cmake
|
||||
file
|
||||
flex
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
curl
|
||||
gperftools
|
||||
libmaxminddb
|
||||
libpcap
|
||||
ncurses
|
||||
openssl
|
||||
python3
|
||||
swig
|
||||
zlib
|
||||
] ++ lib.optionals stdenv.isDarwin [
|
||||
gettext
|
||||
];
|
||||
|
||||
outputs = [ "out" "lib" "py" ];
|
||||
|
||||
|
@ -54,7 +71,7 @@ stdenv.mkDerivation rec {
|
|||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "Powerful network analysis framework much different from a typical IDS";
|
||||
description = "Network analysis framework much different from a typical IDS";
|
||||
homepage = "https://www.zeek.org";
|
||||
changelog = "https://github.com/zeek/zeek/blob/v${version}/CHANGES";
|
||||
license = licenses.bsd3;
|
||||
|
|
|
@ -80,6 +80,7 @@ stdenv.mkDerivation rec {
|
|||
|
||||
wrapProgram $out/opt/${binaryName}/${binaryName} \
|
||||
"''${gappsWrapperArgs[@]}" \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}" \
|
||||
--prefix XDG_DATA_DIRS : "${gtk3}/share/gsettings-schemas/${gtk3.name}/" \
|
||||
--prefix LD_LIBRARY_PATH : ${libPath}:$out/opt/${binaryName}
|
||||
|
||||
|
|
|
@ -12,8 +12,6 @@
|
|||
, Security
|
||||
, AppKit
|
||||
, CoreServices
|
||||
|
||||
, useWayland ? false
|
||||
}:
|
||||
|
||||
let
|
||||
|
@ -82,7 +80,8 @@ mkYarnPackage rec {
|
|||
# LD_PRELOAD workaround for sqlcipher not found: https://github.com/matrix-org/seshat/issues/102
|
||||
makeWrapper '${electron_exec}' "$out/bin/${executableName}" \
|
||||
--set LD_PRELOAD ${sqlcipher}/lib/libsqlcipher.so \
|
||||
--add-flags "$out/share/element/electron${lib.optionalString useWayland " --enable-features=UseOzonePlatform --ozone-platform=wayland"}"
|
||||
--add-flags "$out/share/element/electron" \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
'';
|
||||
|
||||
# Do not attempt generating a tarball for element-web again.
|
||||
|
|
|
@ -87,7 +87,6 @@ python3.pkgs.buildPythonApplication rec {
|
|||
license = lib.licenses.gpl3Plus;
|
||||
maintainers = with lib.maintainers; [ raskin abbradar ];
|
||||
downloadPage = "http://gajim.org/downloads.php";
|
||||
updateWalker = true;
|
||||
platforms = lib.platforms.linux;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -38,8 +38,6 @@ stdenv.mkDerivation rec {
|
|||
license = licenses.gpl2;
|
||||
maintainers = with maintainers; [ pSub ];
|
||||
platforms = with platforms; linux;
|
||||
updateWalker = true;
|
||||
downloadPage = "http://mcabber.com/files/";
|
||||
downloadURLRegexp = "mcabber-[0-9.]+[.]tar[.][a-z0-9]+$";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -7,13 +7,13 @@
|
|||
, makeDesktopItem
|
||||
, copyDesktopItems
|
||||
, fetchYarnDeps
|
||||
, yarn, nodejs, fixup_yarn_lock
|
||||
, yarn
|
||||
, nodejs
|
||||
, fixup_yarn_lock
|
||||
, electron
|
||||
, Security
|
||||
, AppKit
|
||||
, CoreServices
|
||||
|
||||
, useWayland ? false
|
||||
}:
|
||||
|
||||
let
|
||||
|
@ -88,7 +88,8 @@ stdenv.mkDerivation rec {
|
|||
|
||||
# executable wrapper
|
||||
makeWrapper '${electron_exec}' "$out/bin/${executableName}" \
|
||||
--add-flags "$out/share/element/electron${lib.optionalString useWayland " --enable-features=UseOzonePlatform --ozone-platform=wayland"}"
|
||||
--add-flags "$out/share/element/electron" \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
|
|
@ -24,7 +24,7 @@ let
|
|||
|
||||
in stdenv.mkDerivation rec {
|
||||
pname = "signal-desktop";
|
||||
version = "5.27.1"; # Please backport all updates to the stable channel.
|
||||
version = "5.29.1"; # Please backport all updates to the stable channel.
|
||||
# All releases have a limited lifetime and "expire" 90 days after the release.
|
||||
# When releases "expire" the application becomes unusable until an update is
|
||||
# applied. The expiration date for the current release can be extracted with:
|
||||
|
@ -34,7 +34,7 @@ in stdenv.mkDerivation rec {
|
|||
|
||||
src = fetchurl {
|
||||
url = "https://updates.signal.org/desktop/apt/pool/main/s/signal-desktop/signal-desktop_${version}_amd64.deb";
|
||||
sha256 = "0z0v7q0rpxdx7ic78jv7wp1hq8nrfp51jjdr6d85x0hsfdj0z1mc";
|
||||
sha256 = "1a56mnmv0lnizmd4dl8fya3mdsy0jy5qr5bqb72m9cipq0069alc";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
|
@ -123,6 +123,7 @@ in stdenv.mkDerivation rec {
|
|||
gappsWrapperArgs+=(
|
||||
--prefix LD_LIBRARY_PATH : "${lib.makeLibraryPath [ stdenv.cc.cc ] }"
|
||||
${customLanguageWrapperArgs}
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
)
|
||||
|
||||
# Fix the desktop link
|
||||
|
|
|
@ -170,7 +170,8 @@ let
|
|||
rm $out/bin/slack
|
||||
makeWrapper $out/lib/slack/slack $out/bin/slack \
|
||||
--prefix XDG_DATA_DIRS : $GSETTINGS_SCHEMAS_PATH \
|
||||
--prefix PATH : ${lib.makeBinPath [xdg-utils]}
|
||||
--prefix PATH : ${lib.makeBinPath [xdg-utils]} \
|
||||
--add-flags "\''${NIXOS_OZONE_WL:+\''${WAYLAND_DISPLAY:+--enable-features=UseOzonePlatform --ozone-platform=wayland}}"
|
||||
|
||||
# Fix the desktop link
|
||||
substituteInPlace $out/share/applications/slack.desktop \
|
||||
|
|
|
@ -28,11 +28,11 @@
|
|||
}:
|
||||
|
||||
let
|
||||
version = "5.9.1.1380";
|
||||
version = "5.9.3.1911";
|
||||
srcs = {
|
||||
x86_64-linux = fetchurl {
|
||||
url = "https://zoom.us/client/${version}/zoom_x86_64.pkg.tar.xz";
|
||||
sha256 = "0r1w13y3ks377hdyil9s68vn09vh22zl6ni4693fm7cf6q49ayyw";
|
||||
sha256 = "0pamn028k96z0j9xzv56szk7sy0czd9myqm4p3hps1gkczc9wzs4";
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
, alsa-lib, atk, cairo, cups, dbus, expat, fontconfig, freetype
|
||||
, gdk-pixbuf, glib, gnome2, pango, nspr, nss, gtk3, mesa
|
||||
, xorg, autoPatchelfHook, systemd, libnotify, libappindicator
|
||||
, makeWrapper
|
||||
}:
|
||||
|
||||
let deps = [
|
||||
|
@ -53,6 +54,7 @@ stdenv.mkDerivation rec {
|
|||
nativeBuildInputs = [
|
||||
autoPatchelfHook
|
||||
dpkg
|
||||
makeWrapper
|
||||
];
|
||||
|
||||
buildInputs = deps;
|
||||
|
@ -73,12 +75,14 @@ stdenv.mkDerivation rec {
|
|||
mv usr/bin/* $out/bin
|
||||
mv opt/Mullvad\ VPN/* $out/share/mullvad
|
||||
|
||||
sed -i 's|"\/opt\/Mullvad.*VPN|env MULLVAD_DISABLE_UPDATE_NOTIFICATION=1 "'$out'/bin|g' $out/share/applications/mullvad-vpn.desktop
|
||||
|
||||
ln -s $out/share/mullvad/mullvad-{gui,vpn} $out/bin/
|
||||
ln -s $out/share/mullvad/resources/mullvad-daemon $out/bin/mullvad-daemon
|
||||
ln -sf $out/share/mullvad/resources/mullvad-problem-report $out/bin/mullvad-problem-report
|
||||
|
||||
wrapProgram $out/bin/mullvad-vpn --set MULLVAD_DISABLE_UPDATE_NOTIFICATION 1
|
||||
|
||||
sed -i "s|Exec.*$|Exec=$out/bin/mullvad-vpn $U|" $out/share/applications/mullvad-vpn.desktop
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
} :
|
||||
|
||||
let
|
||||
version = "0.3.3";
|
||||
version = "0.3.4";
|
||||
|
||||
in stdenv.mkDerivation {
|
||||
pname = "soapyhackrf";
|
||||
|
@ -13,7 +13,7 @@ in stdenv.mkDerivation {
|
|||
owner = "pothosware";
|
||||
repo = "SoapyHackRF";
|
||||
rev = "soapy-hackrf-${version}";
|
||||
sha256 = "1awn89z462500gb3fjb7x61b1znkjri9n1d39bqfip1qk4s11pxc";
|
||||
sha256 = "sha256-fzPYHJAPX8FkFxPXpLlUagTd/NoamRX0YnxHwkbV1nI=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [ cmake pkg-config ];
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ lib, stdenv, fetchurl, nettools, java, polyml, z3, veriT, vampire, eprover-ho, rlwrap, makeDesktopItem }:
|
||||
{ lib, stdenv, fetchurl, coreutils, nettools, java, polyml, z3, veriT, vampire, eprover-ho, rlwrap, makeDesktopItem }:
|
||||
# nettools needed for hostname
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
|
@ -73,6 +73,11 @@ stdenv.mkDerivation rec {
|
|||
for comp in contrib/jdk* contrib/polyml-* contrib/z3-* contrib/verit-* contrib/vampire-* contrib/e-*; do
|
||||
rm -rf $comp/x86*
|
||||
done
|
||||
|
||||
substituteInPlace lib/Tools/env \
|
||||
--replace /usr/bin/env ${coreutils}/bin/env
|
||||
|
||||
rm -r heaps
|
||||
'' + (if ! stdenv.isLinux then "" else ''
|
||||
arch=${if stdenv.hostPlatform.system == "x86_64-linux" then "x86_64-linux" else "x86-linux"}
|
||||
for f in contrib/*/$arch/{bash_process,epclextract,nunchaku,SPASS,zipperposition}; do
|
||||
|
@ -83,6 +88,11 @@ stdenv.mkDerivation rec {
|
|||
done
|
||||
'');
|
||||
|
||||
buildPhase = ''
|
||||
export HOME=$TMP # The build fails if home is not set
|
||||
bin/isabelle build -v -o system_heaps -b HOL
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out/bin
|
||||
mv $TMP/$dirname $out
|
||||
|
@ -117,7 +127,7 @@ stdenv.mkDerivation rec {
|
|||
'';
|
||||
homepage = "https://isabelle.in.tum.de/";
|
||||
license = licenses.bsd3;
|
||||
maintainers = [ maintainers.jwiegley ];
|
||||
maintainers = [ maintainers.jwiegley maintainers.jvanbruegge ];
|
||||
platforms = platforms.linux;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
url http://page.mi.fu-berlin.de/cbenzmueller/leo/download.html
|
||||
version_link '[.]tgz'
|
||||
version '.*_v([0-9.]+)[.][a-z0-9]+$' '\1'
|
||||
do_overwrite () {
|
||||
do_overwrite_just_version
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
target clingo.nix
|
||||
attribute_name clingo
|
||||
url https://github.com/potassco/clingo/releases/
|
||||
ensure_choice
|
||||
version '.*/v([0-9.]+)[.]tar[.].*' '\1'
|
||||
minimize_overwrite
|
|
@ -66,6 +66,5 @@ stdenv.mkDerivation rec {
|
|||
platforms = lib.platforms.linux;
|
||||
downloadPage = "http://www.ps.uni-saarland.de/~cebrown/satallax/downloads.php";
|
||||
homepage = "http://www.ps.uni-saarland.de/~cebrown/satallax/index.php";
|
||||
updateWalker = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
# build
|
||||
, cmake
|
||||
, ctags
|
||||
, python2Packages
|
||||
, python3Packages
|
||||
, swig
|
||||
# math
|
||||
, eigen
|
||||
|
@ -30,13 +30,13 @@
|
|||
, lp_solve
|
||||
, colpack
|
||||
# extra support
|
||||
, pythonSupport ? true
|
||||
, pythonSupport ? false
|
||||
, opencvSupport ? false
|
||||
, opencv ? null
|
||||
, withSvmLight ? false
|
||||
}:
|
||||
|
||||
assert pythonSupport -> python2Packages != null;
|
||||
assert pythonSupport -> python3Packages != null;
|
||||
assert opencvSupport -> opencv != null;
|
||||
|
||||
assert (!blas.isILP64) && (!lapack.isILP64);
|
||||
|
@ -101,7 +101,7 @@ stdenv.mkDerivation rec {
|
|||
] ++ lib.optional (!withSvmLight) ./svmlight-scrubber.patch;
|
||||
|
||||
nativeBuildInputs = [ cmake swig ctags ]
|
||||
++ (with python2Packages; [ python jinja2 ply ]);
|
||||
++ (with python3Packages; [ python jinja2 ply ]);
|
||||
|
||||
buildInputs = [
|
||||
eigen
|
||||
|
@ -121,7 +121,7 @@ stdenv.mkDerivation rec {
|
|||
nlopt
|
||||
lp_solve
|
||||
colpack
|
||||
] ++ lib.optionals pythonSupport (with python2Packages; [ python numpy ])
|
||||
] ++ lib.optionals pythonSupport (with python3Packages; [ python numpy ])
|
||||
++ lib.optional opencvSupport opencv;
|
||||
|
||||
cmakeFlags = let
|
||||
|
@ -139,7 +139,7 @@ stdenv.mkDerivation rec {
|
|||
"-DENABLE_TESTING=${enableIf doCheck}"
|
||||
"-DDISABLE_META_INTEGRATION_TESTS=ON"
|
||||
"-DTRAVIS_DISABLE_META_CPP=ON"
|
||||
"-DPythonModular=${enableIf pythonSupport}"
|
||||
"-DINTERFACE_PYTHON=${enableIf pythonSupport}"
|
||||
"-DOpenCV=${enableIf opencvSupport}"
|
||||
"-DUSE_SVMLIGHT=${enableIf withSvmLight}"
|
||||
];
|
||||
|
@ -177,6 +177,12 @@ stdenv.mkDerivation rec {
|
|||
rm -r $out/share
|
||||
'';
|
||||
|
||||
postFixup = ''
|
||||
# CMake incorrectly calculates library path from dev prefix
|
||||
substituteInPlace $dev/lib/cmake/shogun/ShogunTargets-release.cmake \
|
||||
--replace "\''${_IMPORT_PREFIX}/lib/" "$out/lib/"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "A toolbox which offers a wide range of efficient and unified machine learning methods";
|
||||
homepage = "http://shogun-toolbox.org/";
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue