Project import generated by Copybara.

GitOrigin-RevId: 420f89ceb267b461eed5d025b6c3c0e57703cc5c
This commit is contained in:
Default email 2020-10-07 11:15:18 +02:00
parent 6b84fe71f7
commit 1693fb2285
926 changed files with 16310 additions and 9374 deletions

View file

@ -14,7 +14,7 @@ if ! builtins ? nixVersion || builtins.compareVersions requiredVersion builtins.
- If you installed Nix using the install script (https://nixos.org/nix/install),
it is safe to upgrade by running it again:
curl https://nixos.org/nix/install | sh
curl -L https://nixos.org/nix/install | sh
For more information, please see the NixOS release notes at
https://nixos.org/nixos/manual or locally at

View file

@ -475,10 +475,13 @@ passthru.updateScript = writeScript "update-zoom-us" ''
<programlisting>
passthru.updateScript = [ ../../update.sh pname "--requested-release=unstable" ];
</programlisting>
</para>
The script will be run with <varname>UPDATE_NIX_ATTR_PATH</varname> environment variable set to the attribute path it is supposed to update.
<note>
<para>
The script will be usually run from the root of the Nixpkgs repository but you should not rely on that. Also note that the update scripts will be run in parallel by default; you should avoid running <command>git commit</command> or any other commands that cannot handle that.
</para>
</note>
</para>
<para>
For information about how to run the updates, execute <command>nix-shell maintainers/scripts/update.nix</command>.
</para>
@ -1636,10 +1639,6 @@ substitute ./foo.in ./foo.out \
--subst-var someVar
</programlisting>
</para>
<para>
<function>substitute</function> is implemented using the <command
xlink:href="http://replace.richardlloyd.org.uk/">replace</command> command. Unlike with the <command>sed</command> command, you dont have to worry about escaping special characters. It supports performing substitutions on binary files (such as executables), though there youll probably want to make sure that the replacement string is as long as the replaced string.
</para>
</listitem>
</varlistentry>
<varlistentry xml:id='fun-substituteInPlace'>

View file

@ -499,6 +499,7 @@ rec {
show = v:
if builtins.isString v then ''"${v}"''
else if builtins.isInt v then builtins.toString v
else if builtins.isBool v then if v then "true" else "false"
else ''<${builtins.typeOf v}>'';
in
mkOptionType rec {

View file

@ -806,6 +806,12 @@
githubId = 574938;
name = "Jonathan Glines";
};
austinbutler = {
email = "austinabutler@gmail.com";
github = "austinbutler";
githubId = 354741;
name = "Austin Butler";
};
avaq = {
email = "nixpkgs@account.avaq.it";
github = "avaq";
@ -3467,6 +3473,12 @@
githubId = 896431;
name = "Chris Hodapp";
};
hongchangwu = {
email = "wuhc85@gmail.com";
github = "hongchangwu";
githubId = 362833;
name = "Hongchang Wu";
};
hrdinka = {
email = "c.nix@hrdinka.at";
github = "hrdinka";
@ -3485,6 +3497,12 @@
githubId = 39689;
name = "Hugo Tavares Reis";
};
hugolgst = {
email = "hugo.lageneste@pm.me";
github = "hugolgst";
githubId = 15371828;
name = "Hugo Lageneste";
};
hyphon81 = {
email = "zero812n@gmail.com";
github = "hyphon81";
@ -4244,6 +4262,12 @@
githubId = 39434424;
name = "Felix Springer";
};
justinlovinger = {
email = "git@justinlovinger.com";
github = "JustinLovinger";
githubId = 7183441;
name = "Justin Lovinger";
};
justinwoo = {
email = "moomoowoo@gmail.com";
github = "justinwoo";
@ -6543,6 +6567,12 @@
githubId = 8641;
name = "Pierre Carrier";
};
penguwin = {
email = "penguwin@penguwin.eu";
github = "penguwin";
githubId = 13225611;
name = "Nicolas Martin";
};
periklis = {
email = "theopompos@gmail.com";
github = "periklis";
@ -8233,6 +8263,12 @@
githubId = 203195;
name = "Szczyp";
};
szlend = {
email = "pub+nix@zlender.si";
github = "szlend";
githubId = 7301807;
name = "Simon Žlender";
};
sztupi = {
email = "attila.sztupak@gmail.com";
github = "sztupi";
@ -9682,4 +9718,36 @@
fingerprint = "F1C5 760E 45B9 9A44 72E9 6BFB D65C 9AFB 4C22 4DA3";
}];
};
berbiche = {
name = "Nicolas Berbiche";
email = "nicolas@normie.dev";
github = "berbiche";
githubId = 20448408;
keys = [{
longkeyid = "rsa4096/0xB461292445C6E696";
fingerprint = "D446 E58D 87A0 31C7 EC15 88D7 B461 2924 45C6 E696";
}];
};
wenngle = {
name = "Zeke Stephens";
email = "zekestephens@gmail.com";
github = "wenngle";
githubId = 63376671;
};
yanganto = {
name = "Antonio Yang";
email = "yanganto@gmail.com";
github = "yanganto";
githubId = 10803111;
};
starcraft66 = {
name = "Tristan Gosselin-Hane";
email = "starcraft66@gmail.com";
github = "starcraft66";
githubId = 1858154;
keys = [{
longkeyid = "rsa4096/0x9D98CDACFF04FD78";
fingerprint = "8597 4506 EC69 5392 0443 0805 9D98 CDAC FF04 FD78";
}];
};
}

View file

@ -4,6 +4,7 @@
, max-workers ? null
, include-overlays ? false
, keep-going ? null
, commit ? null
}:
# TODO: add assert statements
@ -31,30 +32,47 @@ let
in
[x] ++ nubOn f xs;
packagesWithPath = relativePath: cond: return: pathContent:
/* Recursively find all packages (derivations) in `pkgs` matching `cond` predicate.
Type: packagesWithPath :: AttrPath (AttrPath derivation bool) (AttrSet | List) List<AttrSet{attrPath :: str; package :: derivation; }>
AttrPath :: [str]
The packages will be returned as a list of named pairs comprising of:
- attrPath: stringified attribute path (based on `rootPath`)
- package: corresponding derivation
*/
packagesWithPath = rootPath: cond: pkgs:
let
packagesWithPathInner = path: pathContent:
let
result = builtins.tryEval pathContent;
dedupResults = lst: nubOn (pkg: pkg.updateScript) (lib.concatLists lst);
dedupResults = lst: nubOn ({ package, attrPath }: package.updateScript) (lib.concatLists lst);
in
if result.success then
let
pathContent = result.value;
evaluatedPathContent = result.value;
in
if lib.isDerivation pathContent then
lib.optional (cond relativePath pathContent) (return relativePath pathContent)
else if lib.isAttrs pathContent then
if lib.isDerivation evaluatedPathContent then
lib.optional (cond path evaluatedPathContent) { attrPath = lib.concatStringsSep "." path; package = evaluatedPathContent; }
else if lib.isAttrs evaluatedPathContent then
# If user explicitly points to an attrSet or it is marked for recursion, we recur.
if relativePath == [] || pathContent.recurseForDerivations or false || pathContent.recurseForRelease or false then
dedupResults (lib.mapAttrsToList (name: elem: packagesWithPath (relativePath ++ [name]) cond return elem) pathContent)
if path == rootPath || evaluatedPathContent.recurseForDerivations or false || evaluatedPathContent.recurseForRelease or false then
dedupResults (lib.mapAttrsToList (name: elem: packagesWithPathInner (path ++ [name]) elem) evaluatedPathContent)
else []
else if lib.isList pathContent then
dedupResults (lib.imap0 (i: elem: packagesWithPath (relativePath ++ [i]) cond return elem) pathContent)
else if lib.isList evaluatedPathContent then
dedupResults (lib.imap0 (i: elem: packagesWithPathInner (path ++ [i]) elem) evaluatedPathContent)
else []
else [];
in
packagesWithPathInner rootPath pkgs;
/* Recursively find all packages (derivations) in `pkgs` matching `cond` predicate.
*/
packagesWith = packagesWithPath [];
/* Recursively find all packages in `pkgs` with updateScript by given maintainer.
*/
packagesWithUpdateScriptAndMaintainer = maintainer':
let
maintainer =
@ -63,7 +81,7 @@ let
else
builtins.getAttr maintainer' lib.maintainers;
in
packagesWith (relativePath: pkg: builtins.hasAttr "updateScript" pkg &&
packagesWith (path: pkg: builtins.hasAttr "updateScript" pkg &&
(if builtins.hasAttr "maintainers" pkg.meta
then (if builtins.isList pkg.meta.maintainers
then builtins.elem maintainer pkg.meta.maintainers
@ -71,39 +89,43 @@ let
)
else false
)
)
(relativePath: pkg: pkg)
pkgs;
);
packagesWithUpdateScript = path:
/* Recursively find all packages under `path` in `pkgs` with updateScript.
*/
packagesWithUpdateScript = path: pkgs:
let
pathContent = lib.attrByPath (lib.splitString "." path) null pkgs;
prefix = lib.splitString "." path;
pathContent = lib.attrByPath prefix null pkgs;
in
if pathContent == null then
builtins.throw "Attribute path `${path}` does not exists."
else
packagesWith (relativePath: pkg: builtins.hasAttr "updateScript" pkg)
(relativePath: pkg: pkg)
packagesWithPath prefix (path: pkg: builtins.hasAttr "updateScript" pkg)
pathContent;
packageByName = name:
/* Find a package under `path` in `pkgs` and require that it has an updateScript.
*/
packageByName = path: pkgs:
let
package = lib.attrByPath (lib.splitString "." name) null pkgs;
package = lib.attrByPath (lib.splitString "." path) null pkgs;
in
if package == null then
builtins.throw "Package with an attribute name `${name}` does not exists."
builtins.throw "Package with an attribute name `${path}` does not exists."
else if ! builtins.hasAttr "updateScript" package then
builtins.throw "Package with an attribute name `${name}` does not have a `passthru.updateScript` attribute defined."
builtins.throw "Package with an attribute name `${path}` does not have a `passthru.updateScript` attribute defined."
else
package;
{ attrPath = path; inherit package; };
/* List of packages matched based on the CLI arguments.
*/
packages =
if package != null then
[ (packageByName package) ]
[ (packageByName package pkgs) ]
else if maintainer != null then
packagesWithUpdateScriptAndMaintainer maintainer
packagesWithUpdateScriptAndMaintainer maintainer pkgs
else if path != null then
packagesWithUpdateScript path
packagesWithUpdateScript path pkgs
else
builtins.throw "No arguments provided.\n\n${helpText}";
@ -132,19 +154,32 @@ let
--argstr keep-going true
to continue running when a single update fails.
You can also make the updater automatically commit on your behalf from updateScripts
that support it by adding
--argstr commit true
'';
packageData = package: {
/* Transform a matched package into an object for update.py.
*/
packageData = { package, attrPath }: {
name = package.name;
pname = lib.getName package;
updateScript = map builtins.toString (lib.toList package.updateScript);
oldVersion = lib.getVersion package;
updateScript = map builtins.toString (lib.toList (package.updateScript.command or package.updateScript));
supportedFeatures = package.updateScript.supportedFeatures or [];
attrPath = package.updateScript.attrPath or attrPath;
};
/* JSON file with data for update.py.
*/
packagesJson = pkgs.writeText "packages.json" (builtins.toJSON (map packageData packages));
optionalArgs =
lib.optional (max-workers != null) "--max-workers=${max-workers}"
++ lib.optional (keep-going == "true") "--keep-going";
++ lib.optional (keep-going == "true") "--keep-going"
++ lib.optional (commit == "true") "--commit";
args = [ packagesJson ] ++ optionalArgs;

View file

@ -1,23 +1,189 @@
from __future__ import annotations
from typing import Dict, Generator, List, Optional, Tuple
import argparse
import concurrent.futures
import asyncio
import contextlib
import json
import os
import re
import subprocess
import sys
import tempfile
updates = {}
class CalledProcessError(Exception):
process: asyncio.subprocess.Process
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def run_update_script(package):
async def check_subprocess(*args, **kwargs):
"""
Emulate check argument of subprocess.run function.
"""
process = await asyncio.create_subprocess_exec(*args, **kwargs)
returncode = await process.wait()
if returncode != 0:
error = CalledProcessError()
error.process = process
raise error
return process
async def run_update_script(nixpkgs_root: str, merge_lock: asyncio.Lock, temp_dir: Optional[Tuple[str, str]], package: Dict, keep_going: bool):
worktree: Optional[str] = None
update_script_command = package['updateScript']
if temp_dir is not None:
worktree, _branch = temp_dir
# Update scripts can use $(dirname $0) to get their location but we want to run
# their clones in the git worktree, not in the main nixpkgs repo.
update_script_command = map(lambda arg: re.sub(r'^{0}'.format(re.escape(nixpkgs_root)), worktree, arg), update_script_command)
eprint(f" - {package['name']}: UPDATING ...")
subprocess.run(package['updateScript'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
try:
update_process = await check_subprocess('env', f"UPDATE_NIX_ATTR_PATH={package['attrPath']}", *update_script_command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
update_info = await update_process.stdout.read()
await merge_changes(merge_lock, package, update_info, temp_dir)
except KeyboardInterrupt as e:
eprint('Cancelling…')
raise asyncio.exceptions.CancelledError()
except CalledProcessError as e:
eprint(f" - {package['name']}: ERROR")
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
eprint()
stderr = await e.process.stderr.read()
eprint(stderr.decode('utf-8'))
with open(f"{package['pname']}.log", 'wb') as logfile:
logfile.write(stderr)
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
def main(max_workers, keep_going, packages):
with open(sys.argv[1]) as f:
if not keep_going:
raise asyncio.exceptions.CancelledError()
@contextlib.contextmanager
def make_worktree() -> Generator[Tuple[str, str], None, None]:
with tempfile.TemporaryDirectory() as wt:
branch_name = f'update-{os.path.basename(wt)}'
target_directory = f'{wt}/nixpkgs'
subprocess.run(['git', 'worktree', 'add', '-b', branch_name, target_directory])
yield (target_directory, branch_name)
subprocess.run(['git', 'worktree', 'remove', '--force', target_directory])
subprocess.run(['git', 'branch', '-D', branch_name])
async def commit_changes(name: str, merge_lock: asyncio.Lock, worktree: str, branch: str, changes: List[Dict]) -> None:
for change in changes:
# Git can only handle a single index operation at a time
async with merge_lock:
await check_subprocess('git', 'add', *change['files'], cwd=worktree)
commit_message = '{attrPath}: {oldVersion}{newVersion}'.format(**change)
await check_subprocess('git', 'commit', '--quiet', '-m', commit_message, cwd=worktree)
await check_subprocess('git', 'cherry-pick', branch)
async def check_changes(package: Dict, worktree: str, update_info: str):
if 'commit' in package['supportedFeatures']:
changes = json.loads(update_info)
else:
changes = [{}]
# Try to fill in missing attributes when there is just a single change.
if len(changes) == 1:
# Dynamic data from updater take precedence over static data from passthru.updateScript.
if 'attrPath' not in changes[0]:
# update.nix is always passing attrPath
changes[0]['attrPath'] = package['attrPath']
if 'oldVersion' not in changes[0]:
# update.nix is always passing oldVersion
changes[0]['oldVersion'] = package['oldVersion']
if 'newVersion' not in changes[0]:
attr_path = changes[0]['attrPath']
obtain_new_version_process = await check_subprocess('nix-instantiate', '--expr', f'with import ./. {{}}; lib.getVersion {attr_path}', '--eval', '--strict', '--json', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=worktree)
changes[0]['newVersion'] = json.loads((await obtain_new_version_process.stdout.read()).decode('utf-8'))
if 'files' not in changes[0]:
changed_files_process = await check_subprocess('git', 'diff', '--name-only', stdout=asyncio.subprocess.PIPE, cwd=worktree)
changed_files = (await changed_files_process.stdout.read()).splitlines()
changes[0]['files'] = changed_files
if len(changed_files) == 0:
return []
return changes
async def merge_changes(merge_lock: asyncio.Lock, package: Dict, update_info: str, temp_dir: Optional[Tuple[str, str]]) -> None:
if temp_dir is not None:
worktree, branch = temp_dir
changes = await check_changes(package, worktree, update_info)
if len(changes) > 0:
await commit_changes(package['name'], merge_lock, worktree, branch, changes)
else:
eprint(f" - {package['name']}: DONE, no changes.")
else:
eprint(f" - {package['name']}: DONE.")
async def updater(nixpkgs_root: str, temp_dir: Optional[Tuple[str, str]], merge_lock: asyncio.Lock, packages_to_update: asyncio.Queue[Optional[Dict]], keep_going: bool, commit: bool):
while True:
package = await packages_to_update.get()
if package is None:
# A sentinel received, we are done.
return
if not ('commit' in package['supportedFeatures'] or 'attrPath' in package):
temp_dir = None
await run_update_script(nixpkgs_root, merge_lock, temp_dir, package, keep_going)
async def start_updates(max_workers: int, keep_going: bool, commit: bool, packages: List[Dict]):
merge_lock = asyncio.Lock()
packages_to_update: asyncio.Queue[Optional[Dict]] = asyncio.Queue()
with contextlib.ExitStack() as stack:
temp_dirs: List[Optional[Tuple[str, str]]] = []
# Do not create more workers than there are packages.
num_workers = min(max_workers, len(packages))
nixpkgs_root_process = await check_subprocess('git', 'rev-parse', '--show-toplevel', stdout=asyncio.subprocess.PIPE)
nixpkgs_root = (await nixpkgs_root_process.stdout.read()).decode('utf-8').strip()
# Set up temporary directories when using auto-commit.
for i in range(num_workers):
temp_dir = stack.enter_context(make_worktree()) if commit else None
temp_dirs.append(temp_dir)
# Fill up an update queue,
for package in packages:
await packages_to_update.put(package)
# Add sentinels, one for each worker.
# A workers will terminate when it gets sentinel from the queue.
for i in range(num_workers):
await packages_to_update.put(None)
# Prepare updater workers for each temp_dir directory.
# At most `num_workers` instances of `run_update_script` will be running at one time.
updaters = asyncio.gather(*[updater(nixpkgs_root, temp_dir, merge_lock, packages_to_update, keep_going, commit) for temp_dir in temp_dirs])
try:
# Start updater workers.
await updaters
except asyncio.exceptions.CancelledError as e:
# When one worker is cancelled, cancel the others too.
updaters.cancel()
def main(max_workers: int, keep_going: bool, commit: bool, packages_path: str) -> None:
with open(packages_path) as f:
packages = json.load(f)
eprint()
@ -31,29 +197,7 @@ def main(max_workers, keep_going, packages):
eprint()
eprint('Running update for:')
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
for package in packages:
updates[executor.submit(run_update_script, package)] = package
for future in concurrent.futures.as_completed(updates):
package = updates[future]
try:
future.result()
eprint(f" - {package['name']}: DONE.")
except subprocess.CalledProcessError as e:
eprint(f" - {package['name']}: ERROR")
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
eprint()
eprint(e.stdout.decode('utf-8'))
with open(f"{package['pname']}.log", 'wb') as f:
f.write(e.stdout)
eprint()
eprint(f"--- SHOWING ERROR LOG FOR {package['name']} ----------------------")
if not keep_going:
sys.exit(1)
asyncio.run(start_updates(max_workers, keep_going, commit, packages))
eprint()
eprint('Packages updated!')
@ -65,15 +209,14 @@ def main(max_workers, keep_going, packages):
parser = argparse.ArgumentParser(description='Update packages')
parser.add_argument('--max-workers', '-j', dest='max_workers', type=int, help='Number of updates to run concurrently', nargs='?', default=4)
parser.add_argument('--keep-going', '-k', dest='keep_going', action='store_true', help='Do not stop after first failure')
parser.add_argument('--commit', '-c', dest='commit', action='store_true', help='Commit the changes')
parser.add_argument('packages', help='JSON file containing the list of package names and their update scripts')
if __name__ == '__main__':
args = parser.parse_args()
try:
main(args.max_workers, args.keep_going, args.packages)
except (KeyboardInterrupt, SystemExit) as e:
for update in updates:
update.cancel()
sys.exit(e.code if isinstance(e, SystemExit) else 130)
main(args.max_workers, args.keep_going, args.commit, args.packages)
except KeyboardInterrupt as e:
# Lets cancel outside of the main loop too.
sys.exit(130)

View file

@ -0,0 +1,11 @@
#! /usr/bin/env nix-shell
#! nix-shell -p bash curl ripgrep jq -i bash
set -euxo pipefail
# Possibly also add non-https redirect, but there were non of those when I first
# made this script to test that. Feel free to add it when it is relevant.
curl https://repology.org/api/v1/repository/nix_unstable/problems \
| jq -r '.[] | select(.type == "homepage_permanent_https_redirect") | .data | "s@\(.url)@\(.target)@"' \
| sort | uniq | tee script.sed
find -name '*.nix' | xargs -P4 -- sed -f script.sed -i

View file

@ -24,4 +24,10 @@
<screen>
<prompt># </prompt>mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso</screen>
</para>
<para>
If you want to customize your NixOS CD in more detail, or generate other kinds
of images, you might want to check out <link
xlink:href="https://github.com/nix-community/nixos-generators">nixos-generators</link>. This can also be a good starting point when you want to use Nix to build a
'minimal' image that doesn't include a NixOS installation.
</para>
</chapter>

View file

@ -47,7 +47,7 @@
Short version:
</para>
<screen>
<prompt>$ </prompt>curl https://nixos.org/nix/install | sh
<prompt>$ </prompt>curl -L https://nixos.org/nix/install | sh
<prompt>$ </prompt>. $HOME/.nix-profile/etc/profile.d/nix.sh # …or open a fresh shell</screen>
<para>
More details in the

View file

@ -53,9 +53,17 @@
</arg>
</group>
<sbr />
<arg>
<group choice='req'>
<arg choice='plain'>
<option>--upgrade</option>
</arg>
<arg choice='plain'>
<option>--upgrade-all</option>
</arg>
</group>
</arg>
<arg>
<option>--install-bootloader</option>
@ -110,6 +118,11 @@
<arg choice='plain'><option>-v</option></arg>
</group>
</arg>
<arg>
<group choice='req'>
<arg choice='plain'><option>--impure</option></arg>
</group>
</arg>
<arg>
<group choice='req'>
<arg choice='plain'><option>--max-jobs</option></arg>
@ -334,9 +347,23 @@
<term>
<option>--upgrade</option>
</term>
<term>
<option>--upgrade-all</option>
</term>
<listitem>
<para>
Fetch the latest version of NixOS from the NixOS channel.
Update the root user's channel named <literal>nixos</literal>
before rebuilding the system.
</para>
<para>
In addition to the <literal>nixos</literal> channel, the root
user's channels which have a file named
<literal>.update-on-nixos-rebuild</literal> in their base
directory will also be updated.
</para>
<para>
Passing <option>--upgrade-all</option> updates all of the root
user's channels.
</para>
</listitem>
</varlistentry>
@ -542,7 +569,7 @@
In addition, <command>nixos-rebuild</command> accepts various Nix-related
flags, including <option>--max-jobs</option> / <option>-j</option>,
<option>--show-trace</option>, <option>--keep-failed</option>,
<option>--keep-going</option> and <option>--verbose</option> /
<option>--keep-going</option>, <option>--impure</option>, and <option>--verbose</option> /
<option>-v</option>. See the Nix manual for details.
</para>
</refsection>

View file

@ -173,7 +173,7 @@
<listitem>
<para>
For users of a daemon-less Nix installation on Linux or macOS, you can
upgrade Nix by running <command>curl https://nixos.org/nix/install |
upgrade Nix by running <command>curl -L https://nixos.org/nix/install |
sh</command>, or prior to doing a channel update, running
<command>nix-env -iA nix</command>.
</para>

View file

@ -268,6 +268,11 @@ GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
</para>
<itemizedlist>
<listitem>
<para>
The <link linkend="opt-services.matrix-synapse.enable">matrix-synapse</link> module no longer includes optional dependencies by default, they have to be added through the <link linkend="opt-services.matrix-synapse.plugins">plugins</link> option.
</para>
</listitem>
<listitem>
<para>
<literal>buildGoModule</literal> now internally creates a vendor directory
@ -594,8 +599,8 @@ systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
<listitem>
<para>
The hostname (<literal>networking.hostName</literal>) must now be a valid
DNS label (see RFC 1035) and as such must not contain the domain part.
This means that the hostname must start with a letter, end with a letter
DNS label (see RFC 1035, RFC 1123) and as such must not contain the domain part.
This means that the hostname must start with a letter or digit, end with a letter
or digit, and have as interior characters only letters, digits, and
hyphen. The maximum length is 63 characters. Additionally it is
recommended to only use lower-case characters.
@ -834,6 +839,31 @@ CREATE ROLE postgres LOGIN SUPERUSER;
functionally redundent.
</para>
</listitem>
<listitem>
<para>
The package <package>nextcloud17</package> has been removed and <package>nextcloud18</package> was marked as insecure
since both of them will <link xlink:href="https://docs.nextcloud.com/server/19/admin_manual/release_schedule.html">
will be EOL (end of life) within the lifetime of 20.09</link>.
</para>
<para>
It's necessary to upgrade to <package>nextcloud19</package>:
<itemizedlist>
<listitem>
<para>
From <package>nextcloud17</package>, you have to upgrade to <package>nextcloud18</package> first as
Nextcloud doesn't allow going multiple major revisions forward in a single upgrade. This is possible
by setting <xref linkend="opt-services.nextcloud.package" /> to <package>nextcloud18</package>.
</para>
</listitem>
<listitem>
<para>
From <package>nextcloud18</package>, it's possible to directly upgrade to <package>nextcloud19</package>
by setting <xref linkend="opt-services.nextcloud.package" /> to <package>nextcloud19</package>.
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
</itemizedlist>
</section>

View file

@ -59,7 +59,17 @@
<itemizedlist>
<listitem>
<para />
<para>
<literal>systemd-journal2gelf</literal> no longer parses json and expects the receiving system to handle it. How to achieve this with Graylog is described in this <link xlink:href="https://github.com/parse-nl/SystemdJournal2Gelf/issues/10">GitHub issue</link>.
</para>
</listitem>
<listitem>
<para>
The option <option>fonts.enableFontDir</option> has been renamed to
<xref linkend="opt-fonts.fontDir.enable"/>. The path of font directory
has also been changed to <literal>/run/current-system/sw/share/X11/fonts</literal>,
for consistency with other X11 resources.
</para>
</listitem>
</itemizedlist>
</section>
@ -73,7 +83,16 @@
<itemizedlist>
<listitem>
<para />
<para>
The default-version of <literal>nextcloud</literal> is <package>nextcloud20</package>.
Please note that it's <emphasis>not</emphasis> possible to upgrade <literal>nextcloud</literal>
across multiple major versions! This means that it's e.g. not possible to upgrade
from <package>nextcloud18</package> to <package>nextcloud20</package> in a single deploy.
</para>
<para>
The package can be manually upgraded by setting <xref linkend="opt-services.nextcloud.package" />
to <package>nextcloud20</package>.
</para>
</listitem>
</itemizedlist>
</section>

View file

@ -4,15 +4,19 @@ with lib;
let
cfg = config.fonts.fontDir;
x11Fonts = pkgs.runCommand "X11-fonts" { preferLocalBuild = true; } ''
mkdir -p "$out/share/X11-fonts"
find ${toString config.fonts.fonts} \
\( -name fonts.dir -o -name '*.ttf' -o -name '*.otf' \) \
-exec ln -sf -t "$out/share/X11-fonts" '{}' \;
cd "$out/share/X11-fonts"
rm -f fonts.dir fonts.scale fonts.alias
${pkgs.xorg.mkfontdir}/bin/mkfontdir
mkdir -p "$out/share/X11/fonts"
font_regexp='.*\.\(ttf\|otf\|pcf\|pfa\|pfb\|bdf\)\(\.gz\)?'
find ${toString config.fonts.fonts} -regex "$font_regexp" \
-exec ln -sf -t "$out/share/X11/fonts" '{}' \;
cd "$out/share/X11/fonts"
${optionalString cfg.decompressFonts ''
${pkgs.gzip}/bin/gunzip -f *.gz
''}
${pkgs.xorg.mkfontscale}/bin/mkfontscale
${pkgs.xorg.mkfontdir}/bin/mkfontdir
cat $(find ${pkgs.xorg.fontalias}/ -name fonts.alias) >fonts.alias
'';
@ -21,28 +25,43 @@ in
{
options = {
fonts.fontDir = {
fonts = {
enableFontDir = mkOption {
enable = mkOption {
type = types.bool;
default = false;
description = ''
Whether to create a directory with links to all fonts in
<filename>/run/current-system/sw/share/X11-fonts</filename>.
<filename>/run/current-system/sw/share/X11/fonts</filename>.
'';
};
decompressFonts = mkOption {
type = types.bool;
default = config.programs.xwayland.enable;
description = ''
Whether to decompress fonts in
<filename>/run/current-system/sw/share/X11/fonts</filename>.
'';
};
};
};
config = mkIf config.fonts.enableFontDir {
config = mkIf cfg.enable {
# This is enough to make a symlink because the xserver
# module already links all /share/X11 paths.
environment.systemPackages = [ x11Fonts ];
environment.pathsToLink = [ "/share/X11-fonts" ];
services.xserver.filesSection = ''
FontPath "${x11Fonts}/share/X11/fonts"
'';
};
imports = [
(mkRenamedOptionModule [ "fonts" "enableFontDir" ] [ "fonts" "fontDir" "enable" ])
];
}

View file

@ -35,19 +35,21 @@ with lib;
config = {
fonts.fonts = mkIf config.fonts.enableDefaultFonts
[
pkgs.xorg.fontbhlucidatypewriter100dpi
pkgs.xorg.fontbhlucidatypewriter75dpi
([
pkgs.dejavu_fonts
pkgs.freefont_ttf
pkgs.gyre-fonts # TrueType substitutes for standard PostScript fonts
pkgs.liberation_ttf
pkgs.xorg.fontbh100dpi
pkgs.xorg.fontmiscmisc
pkgs.xorg.fontcursormisc
pkgs.unifont
pkgs.noto-fonts-emoji
];
] ++ lib.optionals (config.nixpkgs.config.allowUnfree or false) [
# these are unfree, and will make usage with xserver fail
pkgs.xorg.fontbhlucidatypewriter100dpi
pkgs.xorg.fontbhlucidatypewriter75dpi
pkgs.xorg.fontbh100dpi
]);
};

View file

@ -281,6 +281,12 @@ foreach my $u (values %usersOut) {
}
updateFile("/etc/shadow", \@shadowNew, 0600);
{
my $uid = getpwnam "root";
my $gid = getgrnam "shadow";
my $path = "/etc/shadow";
chown($uid, $gid, $path) || die "Failed to change ownership of $path: $!";
}
# Rewrite /etc/subuid & /etc/subgid to include default container mappings

View file

@ -537,6 +537,7 @@ in {
input.gid = ids.gids.input;
kvm.gid = ids.gids.kvm;
render.gid = ids.gids.render;
shadow.gid = ids.gids.shadow;
};
system.activationScripts.users = stringAfter [ "stdio" ]

View file

@ -29,7 +29,7 @@ in
options.i18n = {
inputMethod = {
enabled = mkOption {
type = types.nullOr (types.enum [ "ibus" "fcitx" "nabi" "uim" ]);
type = types.nullOr (types.enum [ "ibus" "fcitx" "nabi" "uim" "hime" ]);
default = null;
example = "fcitx";
description = ''
@ -44,6 +44,7 @@ in
<listitem><para>fcitx: A customizable lightweight input method, extra input engines can be added using <literal>i18n.inputMethod.fcitx.engines</literal>.</para></listitem>
<listitem><para>nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.</para></listitem>
<listitem><para>uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.</para></listitem>
<listitem><para>hime: An extremely easy-to-use input method framework.</para></listitem>
</itemizedlist>
'';
};

View file

@ -35,6 +35,11 @@
Uim: The universal input method, is a library with a XIM bridge.
</para>
</listitem>
<listitem>
<para>
Hime: An extremely easy-to-use input method framework.
</para>
</listitem>
</itemizedlist>
<section xml:id="module-services-input-methods-ibus">
<title>IBus</title>
@ -241,4 +246,24 @@ i18n.inputMethod = {
used to choose uim toolbar.
</para>
</section>
<section xml:id="module-services-input-methods-hime">
<title>Hime</title>
<para>
Hime is an extremely easy-to-use input method framework. It is lightweight,
stable, powerful and supports many commonly used input methods, including
Cangjie, Zhuyin, Dayi, Rank, Shrimp, Greek, Japanese Anthy, Korean Pinyin,
Latin Alphabet, Rancang hunting birds, cool music, etc...
</para>
<para>
The following snippet can be used to configure Hime:
</para>
<programlisting>
i18n.inputMethod = {
<link linkend="opt-i18n.inputMethod.enabled">enabled</link> = "hime";
};
</programlisting>
</section>
</chapter>

View file

@ -0,0 +1,28 @@
{ config, pkgs, ... }:
with lib;
{
options = {
i18n.inputMethod.hime = {
enableChewing = mkOption {
type = with types; nullOr bool;
default = null;
description = "enable chewing input method";
};
enableAnthy = mkOption {
type = with types; nullOr bool;
default = null;
description = "enable anthy input method";
};
};
};
config = mkIf (config.i18n.inputMethod.enabled == "hime") {
environment.variables = {
GTK_IM_MODULE = "hime";
QT_IM_MODULE = "hime";
XMODIFIERS = "@im=hime";
};
services.xserver.displayManager.sessionCommands = "${pkgs.hime}/bin/hime &";
};
}

View file

@ -628,7 +628,7 @@ EOF
write_file($fn, <<EOF);
@configuration@
EOF
print STDERR "For more hardware-specific settings, see https://github.com/NixOS/nixos-hardware"
print STDERR "For more hardware-specific settings, see https://github.com/NixOS/nixos-hardware.\n"
} else {
print STDERR "warning: not overwriting existing $fn\n";
}

View file

@ -153,7 +153,7 @@ if [[ -z $system ]]; then
else
echo "building the flake in $flake..."
nix "${flakeFlags[@]}" build "$flake#$flakeAttr.config.system.build.toplevel" \
--extra-substituters "$sub" "${verbosity[@]}" \
--store "$mountPoint" --extra-substituters "$sub" "${verbosity[@]}" \
"${extraBuildFlags[@]}" "${lockFlags[@]}" --out-link "$outLink"
fi
system=$(readlink -f "$outLink")

View file

@ -23,6 +23,7 @@ buildNix=1
fast=
rollback=
upgrade=
upgrade_all=
repair=
profile=/nix/var/nix/profiles/system
buildHost=
@ -55,6 +56,10 @@ while [ "$#" -gt 0 ]; do
--upgrade)
upgrade=1
;;
--upgrade-all)
upgrade=1
upgrade_all=1
;;
--repair)
repair=1
extraBuildFlags+=("$i")
@ -63,7 +68,7 @@ while [ "$#" -gt 0 ]; do
j="$1"; shift 1
extraBuildFlags+=("$i" "$j")
;;
--show-trace|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair|--no-build-output|-Q|-j*|-L|--refresh|--no-net)
--show-trace|--keep-failed|-K|--keep-going|-k|--verbose|-v|-vv|-vvv|-vvvv|-vvvvv|--fallback|--repair|--no-build-output|-Q|-j*|-L|--refresh|--no-net|--impure)
extraBuildFlags+=("$i")
;;
--option)
@ -223,15 +228,22 @@ if [ "$action" = switch -o "$action" = boot -o "$action" = test ]; then
fi
# If --upgrade is given, run nix-channel --update nixos.
# If --upgrade or `--upgrade-all` is given,
# run nix-channel --update nixos.
if [[ -n $upgrade && -z $_NIXOS_REBUILD_REEXEC && -z $flake ]]; then
nix-channel --update nixos
# If --upgrade-all is passed, or there are other channels that
# contain a file called ".update-on-nixos-rebuild", update them as
# well. Also upgrade the nixos channel.
# If there are other channels that contain a file called
# ".update-on-nixos-rebuild", update them as well.
for channelpath in /nix/var/nix/profiles/per-user/root/channels/*; do
if [ -e "$channelpath/.update-on-nixos-rebuild" ]; then
nix-channel --update "$(basename "$channelpath")"
channel_name=$(basename "$channelpath")
if [[ "$channel_name" == "nixos" ]]; then
nix-channel --update "$channel_name"
elif [ -e "$channelpath/.update-on-nixos-rebuild" ]; then
nix-channel --update "$channel_name"
elif [[ -n $upgrade_all ]] ; then
nix-channel --update "$channel_name"
fi
done
fi

View file

@ -23,7 +23,6 @@ let
inherit (pkgs) runtimeShell;
nix = config.nix.package.out;
path = makeBinPath [
pkgs.nixUnstable
pkgs.jq
nixos-enter
];

View file

@ -346,6 +346,7 @@ in
paperless = 315;
#mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317;
# shadow = 318; # unused
# When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
@ -647,6 +648,7 @@ in
paperless = 315;
#mailman = 316; # removed 2019-08-30
zigbee2mqtt = 317;
shadow = 318;
# When adding a gid, make sure it doesn't match an existing
# uid. Users and groups with the same name should have equal

View file

@ -140,6 +140,7 @@
./programs/mininet.nix
./programs/mtr.nix
./programs/nano.nix
./programs/neovim.nix
./programs/nm-applet.nix
./programs/npm.nix
./programs/oblogout.nix
@ -174,6 +175,7 @@
./programs/xfs_quota.nix
./programs/xonsh.nix
./programs/xss-lock.nix
./programs/xwayland.nix
./programs/yabar.nix
./programs/zmap.nix
./programs/zsh/oh-my-zsh.nix

View file

@ -67,6 +67,8 @@ with lib;
"jfs"
"minix"
"nilfs2"
"ntfs"
"omfs"
"qnx4"
"qnx6"
"sysv"

View file

@ -0,0 +1,165 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.neovim;
runtime' = filter (f: f.enable) (attrValues cfg.runtime);
# taken from the etc module
runtime = pkgs.stdenvNoCC.mkDerivation {
name = "runtime";
builder = ../system/etc/make-etc.sh;
preferLocalBuild = true;
allowSubstitutes = false;
sources = map (x: x.source) runtime';
targets = map (x: x.target) runtime';
};
in {
options.programs.neovim = {
enable = mkEnableOption "Neovim";
defaultEditor = mkOption {
type = types.bool;
default = false;
description = ''
When enabled, installs neovim and configures neovim to be the default editor
using the EDITOR environment variable.
'';
};
viAlias = mkOption {
type = types.bool;
default = false;
description = ''
Symlink <command>vi</command> to <command>nvim</command> binary.
'';
};
vimAlias = mkOption {
type = types.bool;
default = false;
description = ''
Symlink <command>vim</command> to <command>nvim</command> binary.
'';
};
withRuby = mkOption {
type = types.bool;
default = true;
description = "Enable ruby provider.";
};
configure = mkOption {
type = types.attrs;
default = {};
example = literalExample ''
configure = {
customRC = $''''
" here your custom configuration goes!
$'''';
packages.myVimPackage = with pkgs.vimPlugins; {
# loaded on launch
start = [ fugitive ];
# manually loadable by calling `:packadd $plugin-name`
opt = [ ];
};
};
'';
description = ''
Generate your init file from your list of plugins and custom commands.
Neovim will then be wrapped to load <command>nvim -u /nix/store/<replaceable>hash</replaceable>-vimrc</command>
'';
};
package = mkOption {
type = types.package;
default = pkgs.neovim-unwrapped;
defaultText = literalExample "pkgs.neovim-unwrapped";
description = "The package to use for the neovim binary.";
};
finalPackage = mkOption {
type = types.package;
visible = false;
readOnly = true;
description = "Resulting customized neovim package.";
};
runtime = mkOption {
default = {};
example = literalExample ''
runtime."ftplugin/c.vim".text = "setlocal omnifunc=v:lua.vim.lsp.omnifunc";
'';
description = ''
Set of files that have to be linked in <filename>runtime</filename>.
'';
type = with types; attrsOf (submodule (
{ name, config, ... }:
{ options = {
enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether this /etc file should be generated. This
option allows specific /etc files to be disabled.
'';
};
target = mkOption {
type = types.str;
description = ''
Name of symlink. Defaults to the attribute
name.
'';
};
text = mkOption {
default = null;
type = types.nullOr types.lines;
description = "Text of the file.";
};
source = mkOption {
type = types.path;
description = "Path of the source file.";
};
};
config = {
target = mkDefault name;
source = mkIf (config.text != null) (
let name' = "neovim-runtime" + baseNameOf name;
in mkDefault (pkgs.writeText name' config.text));
};
}));
};
};
config = mkIf cfg.enable {
environment.systemPackages = [
cfg.finalPackage
];
environment.variables = { EDITOR = mkOverride 900 "nvim"; };
programs.neovim.finalPackage = pkgs.wrapNeovim cfg.package {
inherit (cfg) viAlias vimAlias;
configure = cfg.configure // {
customRC = (cfg.configure.customRC or "") + ''
set runtimepath^=${runtime}/etc
'';
};
};
};
}

View file

@ -86,8 +86,7 @@ in {
extraPackages = mkOption {
type = with types; listOf package;
default = with pkgs; [
swaylock swayidle
xwayland alacritty dmenu
swaylock swayidle alacritty dmenu
rxvt-unicode # For backward compatibility (old default terminal)
];
defaultText = literalExample ''
@ -104,6 +103,7 @@ in {
Extra packages to be installed system wide.
'';
};
};
config = mkIf cfg.enable {
@ -130,6 +130,7 @@ in {
programs.dconf.enable = mkDefault true;
# To make a Sway session available if a display manager like SDDM is enabled:
services.xserver.displayManager.sessionPackages = [ swayPackage ];
programs.xwayland.enable = mkDefault true;
};
meta.maintainers = with lib.maintainers; [ gnidorah primeos colemickens ];

View file

@ -0,0 +1,45 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.programs.xwayland;
in
{
options.programs.xwayland = {
enable = mkEnableOption ''
Xwayland X server allows running X programs on a Wayland compositor.
'';
defaultFontPath = mkOption {
type = types.str;
default = optionalString config.fonts.fontDir.enable
"/run/current-system/sw/share/X11/fonts";
description = ''
Default font path. Setting this option causes Xwayland to be rebuilt.
'';
};
package = mkOption {
type = types.path;
description = "The Xwayland package";
};
};
config = mkIf cfg.enable {
# Needed by some applications for fonts and default settings
environment.pathsToLink = [ "/share/X11" ];
environment.systemPackages = [ cfg.package ];
programs.xwayland.package = pkgs.xwayland.override (oldArgs: {
inherit (cfg) defaultFontPath;
});
};
}

View file

@ -207,7 +207,7 @@ let
renewService = {
description = "Renew ACME certificate for ${cert}";
after = [ "network.target" "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps;
after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps;
wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps;
# https://github.com/NixOS/nixpkgs/pull/81371#issuecomment-605526099

View file

@ -23,17 +23,11 @@ in
default = [];
description = "List of packages to be added to apparmor's include path";
};
parserConfig = mkOption {
type = types.str;
default = "";
description = "AppArmor parser configuration file content";
};
};
};
config = mkIf cfg.enable {
environment.systemPackages = [ pkgs.apparmor-utils ];
environment.etc."apparmor/parser.conf".text = cfg.parserConfig;
boot.kernelParams = [ "apparmor=1" "security=apparmor" ];

View file

@ -429,8 +429,6 @@ let
"password sufficient ${pkgs.sssd}/lib/security/pam_sss.so use_authtok"}
${optionalString config.krb5.enable
"password sufficient ${pam_krb5}/lib/security/pam_krb5.so use_first_pass"}
${optionalString config.services.samba.syncPasswordsByPam
"password optional ${pkgs.samba}/lib/security/pam_smbpass.so nullok use_authtok try_first_pass"}
${optionalString cfg.enableGnomeKeyring
"password optional ${pkgs.gnome3.gnome-keyring}/lib/security/pam_gnome_keyring.so use_authtok"}

View file

@ -42,6 +42,15 @@ in
'';
};
security.sudo.package = mkOption {
type = types.package;
default = pkgs.sudo;
defaultText = "pkgs.sudo";
description = ''
Which package to use for `sudo`.
'';
};
security.sudo.wheelNeedsPassword = mkOption {
type = types.bool;
default = true;
@ -208,8 +217,8 @@ in
'';
security.wrappers = {
sudo.source = "${pkgs.sudo.out}/bin/sudo";
sudoedit.source = "${pkgs.sudo.out}/bin/sudoedit";
sudo.source = "${cfg.package.out}/bin/sudo";
sudoedit.source = "${cfg.package.out}/bin/sudoedit";
};
environment.systemPackages = [ sudo ];

View file

@ -169,7 +169,7 @@ in
boot.specialFileSystems.${parentWrapperDir} = {
fsType = "tmpfs";
options = [ "nodev" ];
options = [ "nodev" "mode=755" ];
};
# Make sure our wrapperDir exports to the PATH env variable when
@ -187,6 +187,8 @@ in
# programs to be wrapped.
WRAPPER_PATH=${config.system.path}/bin:${config.system.path}/sbin
chmod 755 "${parentWrapperDir}"
# We want to place the tmpdirs for the wrappers to the parent dir.
wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
chmod a+rx $wrapperDir
@ -197,6 +199,9 @@ in
# Atomically replace the symlink
# See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
old=$(readlink -f ${wrapperDir})
if [ -e ${wrapperDir}-tmp ]; then
rm --force --recursive ${wrapperDir}-tmp
fi
ln --symbolic --force --no-dereference $wrapperDir ${wrapperDir}-tmp
mv --no-target-directory ${wrapperDir}-tmp ${wrapperDir}
rm --force --recursive $old

View file

@ -38,13 +38,18 @@ let
cassandraYaml = builtins.toJSON cassandraConfigWithAddresses;
cassandraEnvPkg = "${cfg.package}/conf/cassandra-env.sh";
cassandraLogbackConfig = pkgs.writeText "logback.xml" cfg.logbackConfig;
passAsFile = [ "extraEnvSh" ];
inherit (cfg) extraEnvSh;
buildCommand = ''
mkdir -p "$out"
echo "$cassandraYaml" > "$out/cassandra.yaml"
ln -s "$cassandraLogbackConfig" "$out/logback.xml"
cp "$cassandraEnvPkg" "$out/cassandra-env.sh"
( cat "$cassandraEnvPkg"
echo "# lines from services.cassandra.extraEnvSh: "
cat "$extraEnvShPath"
) > "$out/cassandra-env.sh"
# Delete default JMX Port, otherwise we can't set it using env variable
sed -i '/JMX_PORT="7199"/d' "$out/cassandra-env.sh"
@ -224,6 +229,14 @@ in {
Extra options to be merged into cassandra.yaml as nix attribute set.
'';
};
extraEnvSh = mkOption {
type = types.lines;
default = "";
example = "CLASSPATH=$CLASSPATH:\${extraJar}";
description = ''
Extra shell lines to be appended onto cassandra-env.sh.
'';
};
fullRepairInterval = mkOption {
type = types.nullOr types.str;
default = "3w";

View file

@ -84,12 +84,10 @@ let
(
optionalString (cfg.mailboxes != {}) ''
protocol imap {
namespace inbox {
inbox=yes
${concatStringsSep "\n" (map mailboxConfig (attrValues cfg.mailboxes))}
}
}
''
)

View file

@ -513,10 +513,6 @@ in
include ${config.services.nginx.package}/conf/fastcgi_params;
fastcgi_pass unix:/run/sympa/wwsympa.socket;
fastcgi_split_path_info ^(${loc})(.*)$;
fastcgi_param PATH_INFO $fastcgi_path_info;
fastcgi_param SCRIPT_FILENAME ${pkg}/lib/sympa/cgi/wwsympa.fcgi;
'';
}) // {
"/static-sympa/".alias = "${dataDir}/static_content/";

View file

@ -131,7 +131,12 @@ in {
plugins = mkOption {
type = types.listOf types.package;
default = [ ];
defaultText = "with config.services.matrix-synapse.package.plugins [ matrix-synapse-ldap3 matrix-synapse-pam ]";
example = literalExample ''
with config.services.matrix-synapse.package.plugins; [
matrix-synapse-ldap3
matrix-synapse-pam
];
'';
description = ''
List of additional Matrix plugins to make available.
'';

View file

@ -6,7 +6,6 @@ let
cfg = config.services.datadog-agent;
ddConf = {
dd_url = cfg.ddUrl;
skip_ssl_validation = false;
confd_path = "/etc/datadog-agent/conf.d";
additional_checksd = "/etc/datadog-agent/checks.d";
@ -14,6 +13,8 @@ let
}
// optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
// optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
// optionalAttrs (cfg.ddUrl != null) { dd_url = cfg.ddUrl; }
// optionalAttrs (cfg.site != null) { site = cfg.site; }
// optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
// optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
// optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; }
@ -79,14 +80,23 @@ in {
ddUrl = mkOption {
description = ''
Custom dd_url to configure the agent with.
Useful when you want to point datadog to another endpoint, either
because you need a proxy to send out data, or because you use their EU
endpoint.
Custom dd_url to configure the agent with. Useful if traffic to datadog
needs to go through a proxy.
Don't use this to point to another datadog site (EU) - use site instead.
'';
default = "https://app.datadoghq.com";
example = "https://app.datadoghq.eu";
type = types.str;
default = null;
example = "http://haproxy.example.com:3834";
type = types.nullOr types.str;
};
site = mkOption {
description = ''
The datadog site to point the agent towards.
Set to datadoghq.eu to point it to their EU site.
'';
default = null;
example = "datadoghq.eu";
type = types.nullOr types.str;
};
tags = mkOption {

View file

@ -49,8 +49,8 @@ let
"--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
"--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
"--alertmanager.timeout=${toString cfg.alertmanagerTimeout}s"
] ++
optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}";
] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}";
filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
filterAttrsListRecursive = pred: x:
@ -624,6 +624,15 @@ in {
errors, despite a correct configuration.
'';
};
retentionTime = mkOption {
type = types.nullOr types.str;
default = null;
example = "15d";
description = ''
How long to retain samples in storage.
'';
};
};
config = mkIf cfg.enable {

View file

@ -38,6 +38,7 @@ let
"nextcloud"
"nginx"
"node"
"openvpn"
"postfix"
"postgres"
"redis"
@ -101,7 +102,6 @@ let
default = "${name}-exporter";
description = ''
User name under which the ${name} exporter shall be run.
Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
'';
};
group = mkOption {
@ -109,7 +109,6 @@ let
default = "${name}-exporter";
description = ''
Group under which the ${name} exporter shall be run.
Has no effect when <option>systemd.services.prometheus-${name}-exporter.serviceConfig.DynamicUser</option> is true.
'';
};
});
@ -161,10 +160,9 @@ let
serviceConfig.PrivateTmp = mkDefault true;
serviceConfig.WorkingDirectory = mkDefault /tmp;
serviceConfig.DynamicUser = mkDefault enableDynamicUser;
} serviceOpts ] ++ optional (!enableDynamicUser) {
serviceConfig.User = conf.user;
serviceConfig.Group = conf.group;
});
} serviceOpts ]);
};
in
{
@ -229,6 +227,8 @@ in
})] ++ [(mkIf config.services.nginx.enable {
systemd.services.prometheus-nginx-exporter.after = [ "nginx.service" ];
systemd.services.prometheus-nginx-exporter.requires = [ "nginx.service" ];
})] ++ [(mkIf config.services.postfix.enable {
services.prometheus.exporters.postfix.group = mkDefault config.services.postfix.setgidGroup;
})] ++ (mapAttrsToList (name: conf:
mkExporterConf {
inherit name;

View file

@ -0,0 +1,39 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.prometheus.exporters.openvpn;
in {
port = 9176;
extraOpts = {
statusPaths = mkOption {
type = types.listOf types.str;
description = ''
Paths to OpenVPN status files. Please configure the OpenVPN option
<literal>status</literal> accordingly.
'';
};
telemetryPath = mkOption {
type = types.str;
default = "/metrics";
description = ''
Path under which to expose metrics.
'';
};
};
serviceOpts = {
serviceConfig = {
PrivateDevices = true;
ProtectKernelModules = true;
NoNewPrivileges = true;
ExecStart = ''
${pkgs.prometheus-openvpn-exporter}/bin/openvpn_exporter \
-openvpn.status_paths "${concatStringsSep "," cfg.statusPaths}" \
-web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
-web.telemetry-path ${cfg.telemetryPath}
'';
};
};
}

View file

@ -8,6 +8,15 @@ in
{
port = 9154;
extraOpts = {
group = mkOption {
type = types.str;
description = ''
Group under which the postfix exporter shall be run.
It should match the group that is allowed to access the
<literal>showq</literal> socket in the <literal>queue/public/</literal> directory.
Defaults to <literal>services.postfix.setgidGroup</literal> when postfix is enabled.
'';
};
telemetryPath = mkOption {
type = types.str;
default = "/metrics";
@ -26,16 +35,20 @@ in
};
showqPath = mkOption {
type = types.path;
default = "/var/spool/postfix/public/showq";
example = "/var/lib/postfix/queue/public/showq";
default = "/var/lib/postfix/queue/public/showq";
example = "/var/spool/postfix/public/showq";
description = ''
Path where Postfix places it's showq socket.
Path where Postfix places its showq socket.
'';
};
systemd = {
enable = mkEnableOption ''
reading metrics from the systemd-journal instead of from a logfile
enable = mkOption {
type = types.bool;
default = true;
description = ''
Whether to enable reading metrics from the systemd journal instead of from a logfile
'';
};
unit = mkOption {
type = types.str;
default = "postfix.service";

View file

@ -3,120 +3,76 @@
with lib;
let
cfg = config.services.rsyncd;
motdFile = builtins.toFile "rsyncd-motd" cfg.motd;
foreach = attrs: f:
concatStringsSep "\n" (mapAttrsToList f attrs);
cfgFile = ''
${optionalString (cfg.motd != "") "motd file = ${motdFile}"}
${optionalString (cfg.address != "") "address = ${cfg.address}"}
${optionalString (cfg.port != 873) "port = ${toString cfg.port}"}
${cfg.extraConfig}
${foreach cfg.modules (name: module: ''
[${name}]
${foreach module (k: v:
"${k} = ${v}"
)}
'')}
'';
in
{
settingsFormat = pkgs.formats.ini { };
configFile = settingsFormat.generate "rsyncd.conf" cfg.settings;
in {
options = {
services.rsyncd = {
enable = mkEnableOption "the rsync daemon";
motd = mkOption {
type = types.str;
default = "";
description = ''
Message of the day to display to clients on each connect.
This usually contains site information and any legal notices.
'';
};
port = mkOption {
default = 873;
type = types.int;
type = types.port;
description = "TCP port the daemon will listen on.";
};
address = mkOption {
default = "";
example = "192.168.1.2";
settings = mkOption {
inherit (settingsFormat) type;
default = { };
example = {
global = {
uid = "nobody";
gid = "nobody";
"use chroot" = true;
"max connections" = 4;
};
ftp = {
path = "/var/ftp/./pub";
comment = "whole ftp area";
};
cvs = {
path = "/data/cvs";
comment = "CVS repository (requires authentication)";
"auth users" = [ "tridge" "susan" ];
"secrets file" = "/etc/rsyncd.secrets";
};
};
description = ''
IP address the daemon will listen on; rsyncd will listen on
all addresses if this is not specified.
'';
};
extraConfig = mkOption {
type = types.lines;
default = "";
description = ''
Lines of configuration to add to rsyncd globally.
See <command>man rsyncd.conf</command> for options.
'';
};
modules = mkOption {
default = {};
description = ''
A set describing exported directories.
See <command>man rsyncd.conf</command> for options.
'';
type = types.attrsOf (types.attrsOf types.str);
example = literalExample ''
{ srv =
{ path = "/srv";
"read only" = "yes";
comment = "Public rsync share.";
};
}
'';
};
user = mkOption {
type = types.str;
default = "root";
description = ''
The user to run the daemon as.
By default the daemon runs as root.
'';
};
group = mkOption {
type = types.str;
default = "root";
description = ''
The group to run the daemon as.
By default the daemon runs as root.
Configuration for rsyncd. See
<citerefentry><refentrytitle>rsyncd.conf</refentrytitle>
<manvolnum>5</manvolnum></citerefentry>.
'';
};
};
};
###### implementation
imports = (map (option:
mkRemovedOptionModule [ "services" "rsyncd" option ]
"This option was removed in favor of `services.rsyncd.settings`.") [
"address"
"extraConfig"
"motd"
"user"
"group"
]);
config = mkIf cfg.enable {
environment.etc."rsyncd.conf".text = cfgFile;
services.rsyncd.settings.global.port = toString cfg.port;
systemd.services.rsyncd = {
description = "Rsync daemon";
wantedBy = [ "multi-user.target" ];
restartTriggers = [ config.environment.etc."rsyncd.conf".source ];
serviceConfig = {
ExecStart = "${pkgs.rsync}/bin/rsync --daemon --no-detach";
User = cfg.user;
Group = cfg.group;
};
serviceConfig.ExecStart =
"${pkgs.rsync}/bin/rsync --daemon --no-detach --config=${configFile}";
};
};
meta.maintainers = with lib.maintainers; [ ehmry ];
# TODO: socket activated rsyncd
}

View file

@ -26,7 +26,6 @@ let
[global]
security = ${cfg.securityType}
passwd program = /run/wrappers/bin/passwd %u
pam password change = ${smbToString cfg.syncPasswordsByPam}
invalid users = ${smbToString cfg.invalidUsers}
${cfg.extraConfig}
@ -67,6 +66,7 @@ in
{
imports = [
(mkRemovedOptionModule [ "services" "samba" "defaultShare" ] "")
(mkRemovedOptionModule [ "services" "samba" "syncPasswordsByPam" ] "This option has been removed by upstream, see https://bugzilla.samba.org/show_bug.cgi?id=10669#c10")
];
###### interface
@ -124,18 +124,6 @@ in
'';
};
syncPasswordsByPam = mkOption {
type = types.bool;
default = false;
description = ''
Enabling this will add a line directly after pam_unix.so.
Whenever a password is changed the samba password will be updated as well.
However, you still have to add the samba password once, using smbpasswd -a user.
If you don't want to maintain an extra password database, you still can send plain text
passwords which is not secure.
'';
};
invalidUsers = mkOption {
type = types.listOf types.str;
default = [ "root" ];

View file

@ -11,7 +11,7 @@ in
settings = mkOption {
description = ''
Attrset that is converted and passed as TOML config file.
For available params, see: <link xlink:href="https://github.com/DNSCrypt/dnscrypt-proxy/blob/master/dnscrypt-proxy/example-dnscrypt-proxy.toml"/>
For available params, see: <link xlink:href="https://github.com/DNSCrypt/dnscrypt-proxy/blob/${pkgs.dnscrypt-proxy2.version}/dnscrypt-proxy/example-dnscrypt-proxy.toml"/>
'';
example = literalExample ''
{
@ -49,13 +49,51 @@ in
networking.nameservers = lib.mkDefault [ "127.0.0.1" ];
systemd.services.dnscrypt-proxy2 = {
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
description = "DNSCrypt-proxy client";
wants = [
"network-online.target"
"nss-lookup.target"
];
before = [
"nss-lookup.target"
];
wantedBy = [
"multi-user.target"
];
serviceConfig = {
AmbientCapabilities = "CAP_NET_BIND_SERVICE";
CacheDirectory = "dnscrypt-proxy";
DynamicUser = true;
ExecStart = "${pkgs.dnscrypt-proxy2}/bin/dnscrypt-proxy -config ${cfg.configFile}";
LockPersonality = true;
LogsDirectory = "dnscrypt-proxy";
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
NonBlocking = true;
PrivateDevices = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectSystem = "strict";
Restart = "always";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RuntimeDirectory = "dnscrypt-proxy";
StateDirectory = "dnscrypt-proxy";
SystemCallArchitectures = "native";
SystemCallFilter = [
"@system-service"
"@chown"
"~@resources"
"@privileged"
];
};
};
};

View file

@ -99,7 +99,7 @@ in
config = mkIf cfg.enable {
assertions = [{
assertion = config.networking.firewall.enable == false;
message = "You can not use nftables with services.networking.firewall.";
message = "You can not use nftables and iptables at the same time. networking.firewall.enable must be set to false.";
}];
boot.blacklistedKernelModules = [ "ip_tables" ];
environment.systemPackages = [ pkgs.nftables ];

View file

@ -57,7 +57,7 @@ let
pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
phpExt = pkgs.php.withExtensions
({ enabled, all }: with all; [ iconv mbstring curl openssl tokenizer xmlrpc soap ctype zip gd simplexml dom intl json sqlite3 pgsql pdo_sqlite pdo_pgsql pdo_odbc pdo_mysql pdo mysqli session zlib xmlreader fileinfo ]);
({ enabled, all }: with all; [ iconv mbstring curl openssl tokenizer xmlrpc soap ctype zip gd simplexml dom intl json sqlite3 pgsql pdo_sqlite pdo_pgsql pdo_odbc pdo_mysql pdo mysqli session zlib xmlreader fileinfo filter ]);
in
{
# interface

View file

@ -85,7 +85,7 @@ in {
package = mkOption {
type = types.package;
description = "Which package to use for the Nextcloud instance.";
relatedPackages = [ "nextcloud17" "nextcloud18" "nextcloud19" ];
relatedPackages = [ "nextcloud18" "nextcloud19" "nextcloud20" ];
};
maxUploadSize = mkOption {
@ -330,37 +330,28 @@ in {
}
];
warnings = []
++ (optional (cfg.poolConfig != null) ''
warnings = let
latest = 20;
upgradeWarning = major: nixos:
''
A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
After nextcloud${toString major} is installed successfully, you can safely upgrade
to ${toString (major + 1)}. The latest version available is nextcloud${toString latest}.
Please note that Nextcloud doesn't support upgrades across multiple major versions
(i.e. an upgrade from 16 is possible to 17, but not 16 to 18).
The package can be upgraded by explicitly declaring the service-option
`services.nextcloud.package`.
'';
in (optional (cfg.poolConfig != null) ''
Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
Please migrate your configuration to config.services.nextcloud.poolSettings.
'')
++ (optional (versionOlder cfg.package.version "18") ''
A legacy Nextcloud install (from before NixOS 20.03) may be installed.
You're currently deploying an older version of Nextcloud. This may be needed
since Nextcloud doesn't allow major version upgrades that skip multiple
versions (i.e. an upgrade from 16 is possible to 17, but not 16 to 18).
It is assumed that Nextcloud will be upgraded from version 16 to 17.
* If this is a fresh install, there will be no upgrade to do now.
* If this server already had Nextcloud installed, first deploy this to your
server, and wait until the upgrade to 17 is finished.
Then, set `services.nextcloud.package` to `pkgs.nextcloud18` to upgrade to
Nextcloud version 18. Please note that Nextcloud 19 is already out and it's
recommended to upgrade to nextcloud19 after that.
'')
++ (optional (versionOlder cfg.package.version "19") ''
A legacy Nextcloud install (from before NixOS 20.09/unstable) may be installed.
If/After nextcloud18 is installed successfully, you can safely upgrade to
nextcloud19. If not, please upgrade to nextcloud18 first since Nextcloud doesn't
support upgrades that skip multiple versions (i.e. an upgrade from 17 to 19 isn't
possible, but an upgrade from 18 to 19).
'');
++ (optional (versionOlder cfg.package.version "18") (upgradeWarning 17 "20.03"))
++ (optional (versionOlder cfg.package.version "19") (upgradeWarning 18 "20.09"))
++ (optional (versionOlder cfg.package.version "20") (upgradeWarning 19 "21.03"));
services.nextcloud.package = with pkgs;
mkDefault (
@ -372,7 +363,8 @@ in {
''
else if versionOlder stateVersion "20.03" then nextcloud17
else if versionOlder stateVersion "20.09" then nextcloud18
else nextcloud19
else if versionOlder stateVersion "21.03" then nextcloud19
else nextcloud20
);
}
@ -435,7 +427,7 @@ in {
then ''"$(<"${toString c.dbpassFile}")"''
else if c.dbpass != null
then ''"${toString c.dbpass}"''
else null;
else ''""'';
adminpass = if c.adminpassFile != null
then ''"$(<"${toString c.adminpassFile}")"''
else ''"${toString c.adminpass}"'';
@ -449,8 +441,7 @@ in {
${if c.dbhost != null then "--database-host" else null} = ''"${c.dbhost}"'';
${if c.dbport != null then "--database-port" else null} = ''"${toString c.dbport}"'';
${if c.dbuser != null then "--database-user" else null} = ''"${c.dbuser}"'';
${if (any (x: x != null) [c.dbpass c.dbpassFile])
then "--database-pass" else null} = dbpass;
"--database-pass" = dbpass;
${if c.dbtableprefix != null
then "--database-table-prefix" else null} = ''"${toString c.dbtableprefix}"'';
"--admin-user" = ''"${c.adminuser}"'';
@ -542,7 +533,10 @@ in {
environment.systemPackages = [ occ ];
services.nginx.enable = mkDefault true;
services.nginx.virtualHosts.${cfg.hostName} = {
services.nginx.virtualHosts.${cfg.hostName} = let
major = toInt (versions.major cfg.package.version);
in {
root = cfg.package;
locations = {
"= /robots.txt" = {
@ -555,7 +549,9 @@ in {
};
"/" = {
priority = 900;
extraConfig = "try_files $uri $uri/ /index.php$request_uri;";
extraConfig = if major < 20
then "rewrite ^ /index.php;"
else "try_files $uri $uri/ /index.php$request_uri;";
};
"~ ^/store-apps" = {
priority = 201;
@ -579,7 +575,7 @@ in {
"~ ^/(?:\\.|autotest|occ|issue|indie|db_|console)".extraConfig = ''
return 404;
'';
"~ \\.php(?:$|/)" = {
${if major < 20 then "~ ^\\/(?:index|remote|public|cron|core\\/ajax\\/update|status|ocs\\/v[12]|updater\\/.+|oc[ms]-provider\\/.+|.+\\/richdocumentscode\\/proxy)\\.php(?:$|\\/)" else "~ \\.php(?:$|/)"} = {
priority = 500;
extraConfig = ''
include ${config.services.nginx.package}/conf/fastcgi.conf;

View file

@ -693,9 +693,6 @@ in
services.httpd.phpOptions =
''
; Needed for PHP's mail() function.
sendmail_path = ${pkgs.system-sendmail}/bin/sendmail -t -i
; Don't advertise PHP
expose_php = off
'' + optionalString (config.time.timeZone != null) ''

View file

@ -6,8 +6,6 @@ let
cfg = config.services.caddy;
configFile = pkgs.writeText "Caddyfile" cfg.config;
# v2-specific options
isCaddy2 = versionAtLeast cfg.package.version "2.0";
tlsConfig = {
apps.tls.automation.policies = [{
issuer = {
@ -50,7 +48,7 @@ in {
example = "nginx";
type = types.str;
description = ''
Name of the config adapter to use. Not applicable to Caddy v1.
Name of the config adapter to use.
See https://caddyserver.com/docs/config-adapters for the full list.
'';
};
@ -90,11 +88,10 @@ in {
package = mkOption {
default = pkgs.caddy;
defaultText = "pkgs.caddy";
example = "pkgs.caddy1";
example = "pkgs.caddy";
type = types.package;
description = ''
Caddy package to use.
To use Caddy v1 (obsolete), set this to <literal>pkgs.caddy1</literal>.
'';
};
};
@ -106,21 +103,9 @@ in {
after = [ "network-online.target" ];
wants = [ "network-online.target" ]; # systemd-networkd-wait-online.service
wantedBy = [ "multi-user.target" ];
environment = mkIf (versionAtLeast config.system.stateVersion "17.09" && !isCaddy2)
{ CADDYPATH = cfg.dataDir; };
serviceConfig = {
ExecStart = if isCaddy2 then ''
${cfg.package}/bin/caddy run --config ${configJSON}
'' else ''
${cfg.package}/bin/caddy -log stdout -log-timestamps=false \
-root=/var/tmp -conf=${configFile} \
-ca=${cfg.ca} -email=${cfg.email} ${optionalString cfg.agree "-agree"}
'';
ExecReload =
if isCaddy2 then
"${cfg.package}/bin/caddy reload --config ${configJSON}"
else
"${pkgs.coreutils}/bin/kill -USR1 $MAINPID";
ExecStart = "${cfg.package}/bin/caddy run --config ${configJSON}";
ExecReload = "${cfg.package}/bin/caddy reload --config ${configJSON}";
Type = "simple";
User = "caddy";
Group = "caddy";

View file

@ -4,23 +4,8 @@ with lib;
let
cfg = config.services.molly-brown;
settingsType = with types;
attrsOf (oneOf [
int
str
(listOf str)
(attrsOf (oneOf [ int str (listOf str) (attrsOf str) ]))
]) // {
description = "primitive expression convertable to TOML";
};
configFile = pkgs.runCommand "molly-brown.toml" {
buildInputs = [ pkgs.remarshal ];
preferLocalBuild = true;
passAsFile = [ "settings" ];
settings = builtins.toJSON cfg.settings;
} "remarshal -if json -of toml < $settingsPath > $out";
settingsFormat = pkgs.formats.toml { };
configFile = settingsFormat.generate "molly-brown.toml" cfg.settings;
in {
options.services.molly-brown = {
@ -76,7 +61,7 @@ in {
};
settings = mkOption {
type = settingsType;
inherit (settingsFormat) type;
default = { };
description = ''
molly-brown configuration. Refer to

View file

@ -26,12 +26,9 @@ let
phpIni = poolOpts: pkgs.runCommand "php.ini" {
inherit (poolOpts) phpPackage phpOptions;
preferLocalBuild = true;
nixDefaults = ''
sendmail_path = "/run/wrappers/bin/sendmail -t -i"
'';
passAsFile = [ "nixDefaults" "phpOptions" ];
passAsFile = [ "phpOptions" ];
} ''
cat ${poolOpts.phpPackage}/etc/php.ini $nixDefaultsPath $phpOptionsPath > $out
cat ${poolOpts.phpPackage}/etc/php.ini $phpOptionsPath > $out
'';
poolOpts = { name, ... }:

View file

@ -53,6 +53,8 @@ let
flashbackEnabled = cfg.flashback.enableMetacity || length cfg.flashback.customSessions > 0;
notExcluded = pkg: mkDefault (!(lib.elem pkg config.environment.gnome3.excludePackages));
in
{
@ -68,6 +70,38 @@ in
core-shell.enable = mkEnableOption "GNOME Shell services";
core-utilities.enable = mkEnableOption "GNOME core utilities";
games.enable = mkEnableOption "GNOME games";
experimental-features = {
realtime-scheduling = mkOption {
type = types.bool;
default = false;
description = ''
Makes mutter (which propagates to gnome-shell) request a low priority real-time
scheduling which is only available on the wayland session.
To enable this experimental feature it requires a restart of the compositor.
Note that enabling this option only enables the <emphasis>capability</emphasis>
for realtime-scheduling to be used. It doesn't automatically set the gsetting
so that mutter actually uses realtime-scheduling. This would require adding <literal>
rt-scheduler</literal> to <literal>/org/gnome/mutter/experimental-features</literal>
with dconf-editor. You cannot use extraGSettingsOverrides because that will only
change the default value of the setting.
Please be aware of these known issues with the feature in nixos:
<itemizedlist>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/90201">NixOS/nixpkgs#90201</link>
</para>
</listitem>
<listitem>
<para>
<link xlink:href="https://github.com/NixOS/nixpkgs/issues/86730">NixOS/nixpkgs#86730</link>
</para>
</listitem>
</itemizedlist>
'';
};
};
};
services.xserver.desktopManager.gnome3 = {
@ -289,26 +323,6 @@ in
source-sans-pro
];
## Enable soft realtime scheduling, only supported on wayland ##
security.wrappers.".gnome-shell-wrapped" = {
source = "${pkgs.gnome3.gnome-shell}/bin/.gnome-shell-wrapped";
capabilities = "cap_sys_nice=ep";
};
systemd.user.services.gnome-shell-wayland = let
gnomeShellRT = with pkgs.gnome3; pkgs.runCommand "gnome-shell-rt" {} ''
mkdir -p $out/bin/
cp ${gnome-shell}/bin/gnome-shell $out/bin
sed -i "s@${gnome-shell}/bin/@${config.security.wrapperDir}/@" $out/bin/gnome-shell
'';
in {
# Note we need to clear ExecStart before overriding it
serviceConfig.ExecStart = ["" "${gnomeShellRT}/bin/gnome-shell"];
# Do not use the default environment, it provides a broken PATH
environment = mkForce {};
};
# Adapt from https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/gnome-3-36/elements/core/meta-gnome-core-shell.bst
environment.systemPackages = with pkgs.gnome3; [
adwaita-icon-theme
@ -333,6 +347,27 @@ in
];
})
# Enable soft realtime scheduling, only supported on wayland
(mkIf serviceCfg.experimental-features.realtime-scheduling {
security.wrappers.".gnome-shell-wrapped" = {
source = "${pkgs.gnome3.gnome-shell}/bin/.gnome-shell-wrapped";
capabilities = "cap_sys_nice=ep";
};
systemd.user.services.gnome-shell-wayland = let
gnomeShellRT = with pkgs.gnome3; pkgs.runCommand "gnome-shell-rt" {} ''
mkdir -p $out/bin/
cp ${gnome-shell}/bin/gnome-shell $out/bin
sed -i "s@${gnome-shell}/bin/@${config.security.wrapperDir}/@" $out/bin/gnome-shell
'';
in {
# Note we need to clear ExecStart before overriding it
serviceConfig.ExecStart = ["" "${gnomeShellRT}/bin/gnome-shell"];
# Do not use the default environment, it provides a broken PATH
environment = mkForce {};
};
})
# Adapt from https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/gnome-3-36/elements/core/meta-gnome-core-utilities.bst
(mkIf serviceCfg.core-utilities.enable {
environment.systemPackages = (with pkgs.gnome3; removePackagesByName [
@ -363,14 +398,17 @@ in
/* gnome-boxes */
] config.environment.gnome3.excludePackages);
# Enable default programs
programs.evince.enable = mkDefault true;
programs.file-roller.enable = mkDefault true;
programs.geary.enable = mkDefault true;
programs.gnome-disks.enable = mkDefault true;
programs.gnome-terminal.enable = mkDefault true;
programs.seahorse.enable = mkDefault true;
services.gnome3.sushi.enable = mkDefault true;
# Enable default program modules
# Since some of these have a corresponding package, we only
# enable that program module if the package hasn't been excluded
# through `environment.gnome3.excludePackages`
programs.evince.enable = notExcluded pkgs.gnome3.evince;
programs.file-roller.enable = notExcluded pkgs.gnome3.file-roller;
programs.geary.enable = notExcluded pkgs.gnome3.geary;
programs.gnome-disks.enable = notExcluded pkgs.gnome3.gnome-disk-utility;
programs.gnome-terminal.enable = notExcluded pkgs.gnome3.gnome-terminal;
programs.seahorse.enable = notExcluded pkgs.gnome3.seahorse;
services.gnome3.sushi.enable = notExcluded pkgs.gnome3.sushi;
# Let nautilus find extensions
# TODO: Create nautilus-with-extensions package

View file

@ -274,6 +274,7 @@ in
plasma-browser-integration
plasma-integration
polkit-kde-agent
spectacle
systemsettings
plasma-desktop
@ -362,6 +363,8 @@ in
# Update the start menu for each user that is currently logged in
system.userActivationScripts.plasmaSetup = activationScript;
nixpkgs.config.firefox.enablePlasmaBrowserIntegration = true;
})
];

View file

@ -57,7 +57,15 @@ in {
type = types.bool;
default = false;
description = ''
Whether of not to enable Picom as the X.org composite manager.
Whether or not to enable Picom as the X.org composite manager.
'';
};
experimentalBackends = mkOption {
type = types.bool;
default = false;
description = ''
Whether to use the unstable new reimplementation of the backends.
'';
};
@ -302,7 +310,8 @@ in {
};
serviceConfig = {
ExecStart = "${pkgs.picom}/bin/picom --config ${configFile}";
ExecStart = "${pkgs.picom}/bin/picom --config ${configFile}"
+ (optionalString cfg.experimentalBackends " --experimental-backends");
RestartSec = 3;
Restart = "always";
};

View file

@ -136,6 +136,7 @@ let
fi
done
echo '${cfg.filesSection}' >> $out
echo 'EndSection' >> $out
echo "$config" >> $out
@ -366,6 +367,13 @@ in
'';
};
filesSection = mkOption {
type = types.lines;
default = "";
example = ''FontPath "/path/to/my/fonts"'';
description = "Contents of the first <literal>Files</literal> section of the X server configuration file.";
};
deviceSection = mkOption {
type = types.lines;
default = "";

View file

@ -381,13 +381,14 @@ in
# syntax). Note: We also allow underscores for compatibility/legacy
# reasons (as undocumented feature):
type = types.strMatching
"^$|^[[:alpha:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
"^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
description = ''
The name of the machine. Leave it empty if you want to obtain it from a
DHCP server (if using DHCP). The hostname must be a valid DNS label (see
RFC 1035 section 2.3.1: "Preferred name syntax") and as such must not
contain the domain part. This means that the hostname must start with a
letter, end with a letter or digit, and have as interior characters only
RFC 1035 section 2.3.1: "Preferred name syntax", RFC 1123 section 2.1:
"Host Names and Numbers") and as such must not contain the domain part.
This means that the hostname must start with a letter or digit,
end with a letter or digit, and have as interior characters only
letters, digits, and hyphen. The maximum length is 63 characters.
Additionally it is recommended to only use lower-case characters.
'';

View file

@ -3,7 +3,7 @@
with pkgs.lib;
{
makeEc2Test = { name, image, userData, script, hostname ? "ec2-instance", sshPublicKey ? null }:
makeEc2Test = { name, image, userData, script, hostname ? "ec2-instance", sshPublicKey ? null, meta ? {} }:
let
metaData = pkgs.stdenv.mkDerivation {
name = "metadata";
@ -59,5 +59,7 @@ with pkgs.lib;
machine = create_machine({"startCommand": start_command})
'' + script;
inherit meta;
};
}

View file

@ -67,7 +67,10 @@ import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... } : {
# Test hidepid
with subtest("hidepid=2 option is applied and works"):
machine.succeed("grep -Fq hidepid=2 /proc/mounts")
# Linux >= 5.8 shows "invisible"
machine.succeed(
"grep -Fq hidepid=2 /proc/mounts || grep -Fq hidepid=invisible /proc/mounts"
)
# cannot use pgrep -u here, it segfaults when access to process info is denied
machine.succeed("[ `su - sybil -c 'ps --no-headers --user root | wc -l'` = 0 ]")
machine.succeed("[ `su - alice -c 'ps --no-headers --user root | wc -l'` != 0 ]")

View file

@ -1,4 +1,4 @@
import ./make-test-python.nix ({ pkgs, ... }:
import ./make-test-python.nix ({ lib, pkgs, ... }:
let
fakeReply = pkgs.writeText "namecoin-reply.json" ''
{ "error": null,
@ -15,10 +15,18 @@ let
}
}
'';
# Disabled because DNSSEC does not currently validate,
# see https://github.com/namecoin/ncdns/issues/127
dnssec = false;
in
{
name = "ncdns";
meta = with pkgs.stdenv.lib.maintainers; {
maintainers = [ rnhmjoj ];
};
nodes.server = { ... }: {
networking.nameservers = [ "127.0.0.1" ];
@ -44,13 +52,15 @@ in
services.ncdns = {
enable = true;
dnssec.enable = true;
dnssec.enable = dnssec;
identity.hostname = "example.com";
identity.hostmaster = "root@example.com";
identity.address = "1.0.0.1";
};
services.pdns-recursor = {
enable = true;
dns.allowFrom = [ "127.0.0.0/8" ];
settings.loglevel = 8;
resolveNamecoin = true;
};
@ -58,7 +68,8 @@ in
};
testScript = ''
testScript =
(lib.optionalString dnssec ''
with subtest("DNSSEC keys have been generated"):
server.wait_for_unit("ncdns")
server.wait_for_file("/var/lib/ncdns/bit.key")
@ -68,10 +79,18 @@ in
server.wait_for_unit("pdns-recursor")
server.wait_for_open_port("53")
server.succeed("host -t DNSKEY bit")
'') +
''
with subtest("can resolve a .bit name"):
server.wait_for_unit("namecoind")
server.wait_for_unit("ncdns")
server.wait_for_open_port("8332")
assert "1.2.3.4" in server.succeed("host -t A test.bit")
assert "1.2.3.4" in server.succeed("dig @localhost -p 5333 test.bit")
with subtest("SOA record has identity information"):
assert "example.com" in server.succeed("dig SOA @localhost -p 5333 bit")
with subtest("bit. zone forwarding works"):
assert "1.2.3.4" in server.succeed("host test.bit")
'';
})

View file

@ -457,6 +457,31 @@ let
'';
};
openvpn = {
exporterConfig = {
enable = true;
group = "openvpn";
statusPaths = ["/run/openvpn-test"];
};
metricProvider = {
users.groups.openvpn = {};
services.openvpn.servers.test = {
config = ''
dev tun
status /run/openvpn-test
status-version 3
'';
up = "chmod g+r /run/openvpn-test";
};
systemd.services."openvpn-test".serviceConfig.Group = "openvpn";
};
exporterTest = ''
wait_for_unit("openvpn-test.service")
wait_for_unit("prometheus-openvpn-exporter.service")
succeed("curl -sSf http://localhost:9176/metrics | grep -q 'openvpn_up{.*} 1'")
'';
};
postfix = {
exporterConfig = {
enable = true;
@ -466,10 +491,12 @@ let
};
exporterTest = ''
wait_for_unit("prometheus-postfix-exporter.service")
wait_for_file("/var/lib/postfix/queue/public/showq")
wait_for_open_port(9154)
succeed(
"curl -sSf http://localhost:9154/metrics | grep -q 'postfix_smtpd_connects_total 0'"
)
succeed("curl -sSf http://localhost:9154/metrics | grep -q 'postfix_up{.*} 1'")
'';
};

View file

@ -0,0 +1,25 @@
import ./make-test-python.nix ({ pkgs, ... }: {
name = "rsyncd";
meta.maintainers = with pkgs.lib.maintainers; [ ehmry ];
nodes.machine.services.rsyncd = {
enable = true;
settings = {
global = {
"reverse lookup" = false;
"forward lookup" = false;
};
tmp = {
path = "/nix/store";
comment = "test module";
};
};
};
testScript = ''
start_all()
machine.wait_for_unit("rsyncd")
machine.succeed("rsync localhost::")
'';
})

View file

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
pname = "BSEQuencer";
version = "1.6.0";
version = "1.8.4";
src = fetchFromGitHub {
owner = "sjaehn";
repo = pname;
rev = "${version}";
sha256 = "0w21kzq695xy4i1r6xvvh7sad5m0rlmdgc7ykmrlzfsm1252dz80";
sha256 = "0hagnn104ybzdp13r95idw20fhmzif8p3kmiypnr20m6c64rdd29";
};
nativeBuildInputs = [ pkgconfig ];

View file

@ -16,7 +16,7 @@
, withDevices ? true, udisks2
, withDynamic ? true
, withHttpServer ? true
, withLibVlc ? false, vlc
, withLibVlc ? false, libvlc
, withStreams ? true
}:
@ -71,7 +71,7 @@ in mkDerivation {
++ lib.optional withMtp libmtp
++ lib.optional withMusicbrainz libmusicbrainz5
++ lib.optional withUdisks udisks2
++ lib.optional withLibVlc vlc;
++ lib.optional withLibVlc libvlc;
nativeBuildInputs = [ cmake pkgconfig qttools ];

View file

@ -1,3 +1,13 @@
From eb21fd64a19a0e10c4c3826fc71610fd5850fa2f Mon Sep 17 00:00:00 2001
From: Christoph Neidahl <christoph.neidahl@gmail.com>
Date: Sun, 13 Sep 2020 23:18:51 +0200
Subject: [PATCH 1/2] Drop baked-in build date for r13y
---
src/ct2util.d | 2 +-
src/ui/ui.d | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/ct2util.d b/src/ct2util.d
index 523cadc..e462b09 100644
--- a/src/ct2util.d
@ -24,3 +34,6 @@ index e418dda..21af408 100644
screen.cprint(4, 0, 1, headerColor, hdr);
screen.cprint(screen.width - 14, 0, 1, headerColor, "F12 = Help");
int c1 = audio.player.isPlaying ? 13 : 12;
--
2.25.4

View file

@ -0,0 +1,25 @@
From abc5e8786d41803300b56ef40c08db0d867eb01a Mon Sep 17 00:00:00 2001
From: Christoph Neidahl <christoph.neidahl@gmail.com>
Date: Sun, 13 Sep 2020 23:22:33 +0200
Subject: [PATCH 2/2] Prepend libSDL.dylib to macOS SDL loader
---
src/derelict/sdl/sdl.d | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/derelict/sdl/sdl.d b/src/derelict/sdl/sdl.d
index e31a52f..f7915b1 100644
--- a/src/derelict/sdl/sdl.d
+++ b/src/derelict/sdl/sdl.d
@@ -54,7 +54,7 @@ public:
super(
"sdl.dll",
"libSDL.so, libSDL.so.0, libSDL-1.2.so, libSDL-1.2.so.0",
- "@executable_path/../Frameworks/SDL.framework/SDL, /Library/Frameworks/SDL.framework/SDL, /System/Library/Frameworks/SDL.framework/SDL"
+ "@rpath/libSDL.dylib, @executable_path/../Frameworks/SDL.framework/SDL, /Library/Frameworks/SDL.framework/SDL, /System/Library/Frameworks/SDL.framework/SDL"
);
}
--
2.25.4

View file

@ -1,27 +1,32 @@
{ stdenv, lib, fetchFromGitHub, fetchpatch
, acme, ldc, patchelf
{ stdenv
, lib
, fetchFromGitHub
, acme
, ldc
, patchelf
, SDL
}:
stdenv.mkDerivation rec {
pname = "cheesecutter";
version = "unstable-2019-12-06";
version = "unstable-2020-04-03";
src = fetchFromGitHub {
owner = "theyamo";
repo = "CheeseCutter";
rev = "6b433c5512d693262742a93c8bfdfb353d4be853";
sha256 = "1szlcg456b208w1237581sg21x69mqlh8cr6v8yvbhxdz9swxnwy";
rev = "68d6518f0e6249a2a5d122fc80201578337c1277";
sha256 = "0xspzjhc6cp3m0yd0mwxncg8n1wklizamxvidrnn21jgj3mnaq2q";
};
nativeBuildInputs = [ acme ldc patchelf ];
patches = [
./0001-Drop-baked-in-build-date-for-r13y.patch
]
++ lib.optional stdenv.hostPlatform.isDarwin ./0002-Prepend-libSDL.dylib-to-macOS-SDL-loader.patch;
nativeBuildInputs = [ acme ldc ]
++ lib.optional (!stdenv.hostPlatform.isDarwin) patchelf;
buildInputs = [ SDL ];
patches = [
./0001-fix-impure-build-date-display.patch
];
makefile = "Makefile.ldc";
installPhase = ''
@ -33,15 +38,21 @@ stdenv.mkDerivation rec {
cp -r tunes/* $out/share/cheesecutter/example_tunes
'';
postFixup = ''
postFixup =
let
rpathSDL = lib.makeLibraryPath [ SDL ];
in
if stdenv.hostPlatform.isDarwin then ''
install_name_tool -add_rpath ${rpathSDL} $out/bin/ccutter
'' else ''
rpath=$(patchelf --print-rpath $out/bin/ccutter)
patchelf --set-rpath "$rpath:${lib.makeLibraryPath buildInputs}" $out/bin/ccutter
patchelf --set-rpath "$rpath:${rpathSDL}" $out/bin/ccutter
'';
meta = with lib; {
description = "A tracker program for composing music for the SID chip.";
description = "A tracker program for composing music for the SID chip";
homepage = "https://github.com/theyamo/CheeseCutter/";
license = licenses.gpl2;
license = licenses.gpl2Plus;
platforms = [ "x86_64-linux" "i686-linux" "x86_64-darwin" ];
maintainers = with maintainers; [ OPNA2608 ];
};

View file

@ -2,13 +2,13 @@
stdenv.mkDerivation rec {
pname = "geonkick";
version = "2.3.7";
version = "2.3.8";
src = fetchFromGitLab {
owner = "iurie-sw";
repo = pname;
rev = "v${version}";
sha256 = "1wdcbwiyy6i5agq5lffkyilyc8mv1cc4mp9h0nybn240vb2flqc2";
sha256 = "07809yy2q7dd6fcp0yndlg1vw2ca2zisnsplb3xrxvzdvrqlw910";
};
nativeBuildInputs = [ cmake pkg-config ];

View file

@ -18,13 +18,13 @@
stdenv.mkDerivation rec {
pname = "giada";
version = "0.16.3.1";
version = "0.16.4";
src = fetchFromGitHub {
owner = "monocasual";
repo = pname;
rev = "v${version}";
sha256 = "0z1jrkggdn630i3j59j30apaa9s242y1wiawqp4g1n9dkg3r9a1j";
sha256 = "0qyx0bvivlvly0vj5nnnbiks22xh13sqlw4mfgplq2lbbpgisigp";
};
configureFlags = [

View file

@ -5,13 +5,13 @@
stdenv.mkDerivation rec {
pname = "lsp-plugins";
version = "1.1.24";
version = "1.1.26";
src = fetchFromGitHub {
owner = "sadko4u";
repo = pname;
rev = "${pname}-${version}";
sha256 = "0rzgzkg6wvhjcf664i16nz4v30drgv80s34bhdflcjzx2x7ix5zk";
sha256 = "1apw8zh3a3il4smkjji6bih4vbsymj0hjs10fgkrd4nazqkjvgyd";
};
nativeBuildInputs = [ pkgconfig php makeWrapper ];
@ -58,6 +58,10 @@ stdenv.mkDerivation rec {
- Expander MidSide - Expander MidSide
- Expander Mono - Expander Mono
- Expander Stereo - Expander Stereo
- Crossover LeftRight x8 - Frequenzweiche LeftRight x8
- Crossover MidSide x8 - Frequenzweiche MidSide x8
- Crossover Mono x8 - Frequenzweiche Mono x8
- Crossover Stereo x8 - Frequenzweiche Stereo x8
- Gate LeftRight - Gate LeftRight
- Gate MidSide - Gate MidSide
- Gate Mono - Gate Mono
@ -81,6 +85,16 @@ stdenv.mkDerivation rec {
- Compressor Mono - Kompressor Mono
- Compressor Stereo - Kompressor Stereo
- Latency Meter - Latenzmessgerät
- Loudness Compensator Mono - Lautstärke Kompensator Mono
- Loudness Compensator Stereo - Lautstärke Kompensator Stereo
- Multiband Expander LeftRight x8 - Multi-band Expander LeftRight x8
- Multiband Expander MidSide x8 - Multi-band Expander MidSide x8
- Multiband Expander Mono x8 - Multi-band Expander Mono x8
- Multiband Expander Stereo x8 - Multi-band Expander Stereo x8
- Multiband Gate LeftRight x8 - Multi-band Gate LeftRight x8
- Multiband Gate MidSide x8 - Multi-band Gate MidSide x8
- Multiband Gate Mono x8 - Multi-band Gate Mono x8
- Multiband Gate Stereo x8 - Multi-band Gate Stereo x8
- Multiband Compressor LeftRight x8 - Multi-band Kompressor LeftRight x8
- Multiband Compressor MidSide x8 - Multi-band Kompressor MidSide x8
- Multiband Compressor Mono x8 - Multi-band Kompressor Mono x8
@ -96,12 +110,23 @@ stdenv.mkDerivation rec {
- Parametric Equalizer x32 Stereo - Parametrischer Entzerrer x32 Stereo
- Phase Detector - Phasendetektor
- Profiler Mono - Profiler Mono
- Profiler Stereo - Profiler Stereo
- Room Builder Mono - Raumbaumeister Mono
- Room Builder Stereo - Raumbaumeister Stereo
- Multi-Sampler x12 DirectOut - Schlagzeug x12 Direktausgabe
- Multi-Sampler x12 Stereo - Schlagzeug x12 Stereo
- Multi-Sampler x24 DirectOut - Schlagzeug x24 Direktausgabe
- Multi-Sampler x24 Stereo - Schlagzeug x24 Stereo
- Multi-Sampler x48 DirectOut - Schlagzeug x48 Direktausgabe
- Multi-Sampler x48 Stereo - Schlagzeug x48 Stereo
- Sidechain Multiband Expander LeftRight x8 - Sidechain Multi-band Expander LeftRight x8
- Sidechain Multiband Expander MidSide x8 - Sidechain Multi-band Expander MidSide x8
- Sidechain Multiband Expander Mono x8 - Sidechain Multi-band Expander Mono x8
- Sidechain Multiband Expander Stereo x8 - Sidechain Multi-band Expander Stereo x8
- Sidechain Multiband Gate LeftRight x8 - Sidechain Multi-band Gate LeftRight x8
- Sidechain Multiband Gate MidSide x8 - Sidechain Multi-band Gate MidSide x8
- Sidechain Multiband Gate Mono x8 - Sidechain Multi-band Gate Mono x8
- Sidechain Multiband Gate Stereo x8 - Sidechain Multi-band Gate Stereo x8
- Sidechain Multiband Compressor LeftRight x8 - Sidechain Multi-band Kompressor LeftRight x8
- Sidechain Multiband Compressor MidSide x8 - Sidechain Multi-band Kompressor MidSide x8
- Sidechain Multiband Compressor Mono x8 - Sidechain Multi-band Kompressor Mono x8
@ -132,6 +157,8 @@ stdenv.mkDerivation rec {
- Spectrum Analyzer x2 - Spektrumanalysator x2
- Spectrum Analyzer x4 - Spektrumanalysator x4
- Spectrum Analyzer x8 - Spektrumanalysator x8
- Surge Filter Mono - Sprungfilter Mono
- Surge Filter Stereo - Sprungfilter Stereo
- Trigger MIDI Mono - Triggersensor MIDI Mono
- Trigger MIDI Stereo - Triggersensor MIDI Stereo
- Trigger Mono - Triggersensor Mono

View file

@ -1,18 +1,18 @@
{ stdenv, fetchFromGitHub, glib, lilv, lv2, pkgconfig, serd, sord, sratom }:
{ stdenv, fetchFromGitHub, glib, libsndfile, lilv, lv2, pkgconfig, serd, sord, sratom }:
stdenv.mkDerivation {
stdenv.mkDerivation rec {
pname = "lv2bm";
version = "git-2015-11-29";
version = "1.1";
src = fetchFromGitHub {
owner = "moddevices";
repo = "lv2bm";
rev = "e844931503b7597f45da6d61ff506bb9fca2e9ca";
sha256 = "1rrz5sp04zjal6v34ldkl6fjj9xqidb8xm1iscjyljf6z4l516cx";
rev = "v${version}";
sha256 = "0vlppxfb9zbmffazs1kiyb79py66s8x9hihj36m2vz86zsq7ybl0";
};
nativeBuildInputs = [ pkgconfig ];
buildInputs = [ glib lilv lv2 serd sord sratom ];
buildInputs = [ glib libsndfile lilv lv2 serd sord sratom ];
installPhase = ''
make install PREFIX=$out

View file

@ -0,0 +1,63 @@
{ python3Packages
, lib
, fetchFromGitHub
, makeDesktopItem
, wrapGAppsHook
, gtk3
, gobject-introspection
, sox
, pulseaudio
}:
let
desktopItem = makeDesktopItem {
name = "lyrebird";
exec = "lyrebird";
icon = "${placeholder "out"}/share/lyrebird/icon.png";
desktopName = "Lyrebird";
genericName = "Voice Changer";
categories = "AudioVideo;Audio;";
};
in
python3Packages.buildPythonApplication rec {
pname = "lyrebird";
version = "1.1.0";
format = "other";
doCheck = false;
src = fetchFromGitHub {
owner = "chxrlt";
repo = "lyrebird";
rev = "v${version}";
sha256 = "0wmnww2wi8bb9m8jgc18n04gjia8pf9klmvij0w98xz11l6kxb13";
};
propagatedBuildInputs = with python3Packages; [ toml pygobject3 ];
nativeBuildInputs = [ wrapGAppsHook ];
buildInputs = [ gtk3 gobject-introspection sox ];
dontWrapGApps = true;
makeWrapperArgs = [
"--prefix 'PATH' ':' '${lib.makeBinPath [ sox pulseaudio ]}'"
"--prefix 'PYTHONPATH' ':' '${placeholder "out"}/share/lyrebird'"
"--run 'cd ${placeholder "out"}/share/lyrebird'"
''"''${gappsWrapperArgs[@]}"''
];
installPhase = ''
mkdir -p $out/{bin,share/{applications,lyrebird}}
cp -at $out/share/lyrebird/ app icon.png
cp -at $out/share/applications/ ${desktopItem}
install -Dm755 app.py $out/bin/lyrebird
'';
meta = with lib; {
description = "Simple and powerful voice changer for Linux, written in GTK 3";
homepage = "https://github.com/chxrlt/lyrebird";
license = licenses.mit;
maintainers = with maintainers; [ OPNA2608 ];
platforms = platforms.linux;
};
}

View file

@ -12,7 +12,7 @@ stdenv.mkDerivation rec {
meta = {
description = "Tools to work with opus encoded audio streams";
homepage = "http://www.opus-codec.org/";
homepage = "https://www.opus-codec.org/";
license = stdenv.lib.licenses.bsd2;
maintainers = with stdenv.lib.maintainers; [ ];
platforms = with stdenv.lib.platforms; unix;

View file

@ -15,7 +15,7 @@ stdenv.mkDerivation rec {
meta = with stdenv.lib; {
description = "High-level API for decoding and seeking in .opus files";
homepage = "http://www.opus-codec.org/";
homepage = "https://www.opus-codec.org/";
license = licenses.bsd3;
platforms = platforms.linux ++ platforms.darwin;
maintainers = with maintainers; [ ];

View file

@ -14,7 +14,7 @@ stdenv.mkDerivation {
buildInputs = [ alsaLib ];
meta = with stdenv.lib; {
homepage = "http://www.parabola.me.uk/alsa/pmidi.html";
homepage = "https://www.parabola.me.uk/alsa/pmidi.html";
description = "A straightforward command line program to play midi files through the ALSA sequencer";
maintainers = with maintainers; [ lheckemann ];
license = licenses.gpl2;

View file

@ -23,7 +23,7 @@ stdenv.mkDerivation rec {
meta = {
description = "Doing phonetics by computer";
homepage = "http://www.fon.hum.uva.nl/praat/";
homepage = "https://www.fon.hum.uva.nl/praat/";
license = stdenv.lib.licenses.gpl2Plus; # Has some 3rd-party code in it though
platforms = stdenv.lib.platforms.linux;
};

View file

@ -1,4 +1,5 @@
{ mkDerivation
{ stdenv
, mkDerivation
, lib
, fetchFromGitHub
, autoreconfHook
@ -39,12 +40,13 @@ mkDerivation rec {
"--enable-sdl"
];
fixupPhase = ''
fixupPhase = lib.optionalString (!stdenv.hostPlatform.isDarwin) ''
# NOTE: 2019-10-05: Upstream inserts the src path buring build into ELF rpath, so must delete it out
# upstream report: https://github.com/projectM-visualizer/projectm/issues/245
for entry in $out/bin/* ; do
patchelf --set-rpath "$(patchelf --print-rpath $entry | tr ':' '\n' | grep -v 'src/libprojectM' | tr '\n' ':')" "$entry"
done
'' + ''
wrapQtApp $out/bin/projectM-pulseaudio
rm $out/bin/projectM-unittest
'';

View file

@ -8,13 +8,13 @@
stdenv.mkDerivation rec {
pname = "pt2-clone";
version = "1.22";
version = "1.23";
src = fetchFromGitHub {
owner = "8bitbubsy";
repo = "pt2-clone";
rev = "v${version}";
sha256 = "1w6lbq4366bawy975glvjizk57zhvl562xhxwzn7p5hpm2bvw09b";
sha256 = "1vixpp0vqpawqviwl44wn8zf602zyyrgqnjzrlwjrmdzydx9c93y";
};
nativeBuildInputs = [ cmake ];

View file

@ -1,6 +1,8 @@
{ lib
, fetchFromGitHub
, genericUpdater
, substituteAll
, common-updater-scripts
, ffmpeg_3
, python3Packages
, sox
@ -8,13 +10,13 @@
python3Packages.buildPythonApplication rec {
pname = "r128gain";
version = "1.0.1";
version = "1.0.3";
src = fetchFromGitHub {
owner = "desbma";
repo = "r128gain";
rev = version;
sha256 = "0fnxis2g7mw8mb0cz9bws909lrndli7ml54nnzda49vc2fhbjwxr";
sha256 = "0w2i2szajv1vcmc96w0fczdr8xc28ijcf1gdg180f21gi6yh96sc";
};
patches = [
@ -33,6 +35,13 @@ python3Packages.buildPythonApplication rec {
# sandbox to be disabled.
doCheck = false;
passthru = {
updateScript = genericUpdater {
inherit pname version;
versionLister = "${common-updater-scripts}/bin/list-git-tags ${src.meta.homepage}";
};
};
meta = with lib; {
description = "Fast audio loudness scanner & tagger (ReplayGain v2 / R128)";
homepage = "https://github.com/desbma/r128gain";

View file

@ -1,5 +1,3 @@
diff --git i/r128gain/__init__.py w/r128gain/__init__.py
index 53fc3ef..f144e15 100755
--- i/r128gain/__init__.py
+++ w/r128gain/__init__.py
@@ -78,7 +78,7 @@ def get_ffmpeg_lib_versions(ffmpeg_path=None):
@ -14,7 +12,7 @@ index 53fc3ef..f144e15 100755
@@ -156,7 +156,7 @@ def get_r128_loudness(audio_filepaths, *, calc_peak=True, enable_ffmpeg_threadin
os.devnull,
**additional_ffmpeg_args,
f="null"),
f="null").global_args("-hide_banner", "-nostats"),
- cmd=ffmpeg_path or "ffmpeg")
+ cmd=ffmpeg_path or "@ffmpeg@/bin/ffmpeg")

View file

@ -2,16 +2,16 @@
rustPlatform.buildRustPackage rec {
pname = "spotify-tui";
version = "0.21.0";
version = "0.22.0";
src = fetchFromGitHub {
owner = "Rigellute";
repo = "spotify-tui";
rev = "v${version}";
sha256 = "0pvgq8r1bb7bdxm50hxl0n7ajplpzp1gnf6j55dn6xwck0syml9y";
sha256 = "0w1y37qh9n3936d59hvqzjz2878x2nwxqxc4s7mp4f9xqcfl0c5r";
};
cargoSha256 = "07v1qm5ky99j2lwbl00g80z0f8hfrpwgyqsm8fnda6y9s3vzzdgz";
cargoSha256 = "1ri054p08si95x1gh2bkh4fk50ja79c5izzjnkvs0yhfj1wzbghi";
nativeBuildInputs = stdenv.lib.optionals stdenv.isLinux [ pkgconfig python3 ];
buildInputs = [ ]

View file

@ -30,7 +30,7 @@
, withGstreamer ? true
, gst_all_1 ? null
, withVlc ? true
, vlc ? null
, libvlc ? null
}:
mkDerivation rec {
@ -75,7 +75,7 @@ mkDerivation rec {
gst-plugins-good
gst-plugins-ugly
])
++ lib.optional withVlc vlc;
++ lib.optional withVlc libvlc;
nativeBuildInputs = [ cmake ninja pkgconfig qttools ];

View file

@ -2,11 +2,11 @@
stdenv.mkDerivation rec {
pname = "ergo";
version = "3.3.1";
version = "3.3.3";
src = fetchurl {
url = "https://github.com/ergoplatform/ergo/releases/download/v${version}/ergo-${version}.jar";
sha256 = "1qr1vfb6mhm2hxl2ksydkhadm7phadn93lwm3f9zni01plk56bb5";
sha256 = "1lsqshpbc5p5qm8kic8a90xmvd2zx2s7jf613j9ng4h3hh75wbff";
};
nativeBuildInputs = [ makeWrapper ];

View file

@ -2,17 +2,17 @@
buildGoModule rec {
pname = "go-ethereum";
version = "1.9.21";
version = "1.9.22";
src = fetchFromGitHub {
owner = "ethereum";
repo = pname;
rev = "v${version}";
sha256 = "0mr5pw08jka11lzgl28555nb90cqxx9vlqd1plfmyic6rb5z11df";
sha256 = "08i31xil2lygfcn2igsvn4hpg8xnf8l6g914f78hgl4wj6v1dja9";
};
runVend = true;
vendorSha256 = "155hmny3543h02ryn1nnlpmvs0qvhd0lb66vmkhw5351m6gkbx7x";
vendorSha256 = "1qbg44cryiv9kvcak6qjrbmkc9bxyk5fybj62vdkskqfjvv86068";
doCheck = false;

View file

@ -19,9 +19,9 @@ let
sha256Hash = "sha256-qbxmR9g8DSKzcP09bJuc+am79BSXWG39UQxFEb1bZ88=";
};
latestVersion = { # canary & dev
version = "4.2.0.11"; # "Android Studio 4.2 Canary 11"
build = "202.6825553";
sha256Hash = "sha256-la3J0mgUxJA50l1PLr9FPMKI5QYkoBRriVyu3aVq7io=";
version = "4.2.0.13"; # "Android Studio 4.2 Canary 13"
build = "202.6863838";
sha256Hash = "sha256-avkRelP5/sDXW7pdVrOknmb3PtR6XQEmQXivZFljpLc=";
};
in {
# Attributes are named by their corresponding release channels

View file

@ -75,9 +75,9 @@ in stdenv.mkDerivation {
"${lib.getLib stdenv.cc.libc}/lib"
# Executable paths necessary for compilation (ld, as):
"${lib.getBin stdenv.cc.cc}"
"${lib.getBin stdenv.cc.bintools}"
"${lib.getBin stdenv.cc.bintools.bintools}"
"${lib.getBin stdenv.cc.cc}/bin"
"${lib.getBin stdenv.cc.bintools}/bin"
"${lib.getBin stdenv.cc.bintools.bintools}/bin"
]));
in ''
substituteInPlace lisp/emacs-lisp/comp.el --replace \

View file

@ -2,25 +2,25 @@
buildGoModule rec {
pname = "glow";
version = "0.2.0";
version = "1.0.0";
src = fetchFromGitHub {
owner = "charmbracelet";
repo = "glow";
rev = "v${version}";
sha256 = "0vhl8d7xxqqyl916nh8sgm1xdaf7xlc3r18464bd2av22q9yz68n";
sha256 = "0cgi7rz5al5smjsna9p2v5zxjn3lwpnhd38vpr1qhz8n0z37vss5";
};
vendorSha256 = "1c16s5xiqr36azh2w90wg14jlw67ca2flbgjijpz7qd0ypxyfqlk";
vendorSha256 = "180g6d9w3lfmxj4843kqvq4ikg8lwmwprgfxdgz1lzvjmbfjj3g9";
doCheck = false;
buildFlagsArray = [ "-ldflags=" "-X=main.Version=${version}" ];
meta = with lib; {
description = "Render markdown on the CLI";
description = "Render markdown on the CLI, with pizzazz!";
homepage = "https://github.com/charmbracelet/glow";
license = licenses.mit;
maintainers = with maintainers; [ ehmry filalex77 ];
maintainers = with maintainers; [ ehmry filalex77 penguwin ];
};
}

View file

@ -385,12 +385,12 @@ in
rider = buildRider rec {
name = "rider-${version}";
version = "2020.2.2"; /* updated by script */
version = "2020.2.3"; /* updated by script */
description = "A cross-platform .NET IDE based on the IntelliJ platform and ReSharper";
license = stdenv.lib.licenses.unfree;
src = fetchurl {
url = "https://download.jetbrains.com/rider/JetBrains.Rider-${version}.tar.gz";
sha256 = "1v3n4mg8b55ni72bdgsgiwyqcvp9zhqlkqshscwfjggv0iai9r6p"; /* updated by script */
sha256 = "01namzd29chj975w3awanlzf38hh30cfjzyljqfkp6y3djn0if1r"; /* updated by script */
};
wmClass = "jetbrains-rider";
update-channel = "Rider RELEASE";

View file

@ -4,17 +4,18 @@
let
unwrapped = mkDerivation rec {
pname = "neovim-qt-unwrapped";
version = "0.2.15";
version = "0.2.16.1";
src = fetchFromGitHub {
owner = "equalsraf";
repo = "neovim-qt";
rev = "v${version}";
sha256 = "097nykglqp4jyvla4yp32sc1f1hph4cqqhp6rm9ww7br8c0j54xl";
sha256 = "0x5brrim3f21bzdmh6wyrhrislwpx1248wbx56csvic6v78hzqny";
};
cmakeFlags = [
"-DUSE_SYSTEM_MSGPACK=1"
"-DENABLE_TESTS=0" # tests fail because xcb platform plugin is not found
];
buildInputs = [

View file

@ -0,0 +1,31 @@
{ stdenv, fetchurl, pkg-config, gtk2, pcre }:
stdenv.mkDerivation rec {
pname = "qxw";
version = "20200708";
src = fetchurl {
url = "https://www.quinapalus.com/qxw-${version}.tar.gz";
sha256 = "1si3ila7137c7x4mp3jv1q1mh3jp0p4khir1yz1rwy0mp3znwv7d";
};
nativeBuildInputs = [ pkg-config ];
buildInputs = [ gtk2 pcre ];
makeFlags = [ "DESTDIR=$(out)" ];
patchPhase = ''
sed -i 's/ `dpkg-buildflags[^`]*`//g;
/mkdir -p/d;
s/cp -a/install -D/;
s,/usr/games,/bin,' Makefile
'';
meta = with stdenv.lib; {
description = "A program to help create and publish crosswords";
homepage = "https://www.quinapalus.com/qxw.html";
license = licenses.gpl2;
maintainers = [ maintainers.tckmn ];
platforms = platforms.linux;
};
}

View file

@ -0,0 +1,67 @@
{ stdenv, callPackage, vimUtils, buildEnv, makeWrapper }:
let
macvim = callPackage ./macvim.nix { inherit stdenv; };
makeCustomizable = macvim: macvim // {
# configure expects the same args as vimUtils.vimrcFile.
# This is the same as the value given to neovim.override { configure = … }
# or the value of vim_configurable.customize { vimrcConfig = … }
#
# Note: Like neovim and vim_configurable, configuring macvim disables the
# sourcing of the user's vimrc. Use `customRC = "source $HOME/.vim/vimrc"`
# if you want to preserve that behavior.
configure = let
inherit (stdenv) lib;
doConfig = config: let
vimrcConfig = config // {
# always source the bundled system vimrc
beforePlugins = ''
source $VIM/vimrc
${config.beforePlugins or ""}
'';
};
in buildEnv {
name = macvim.name;
paths = [ macvim ];
pathsToLink = [
"/"
"/bin"
"/Applications/MacVim.app/Contents/MacOS"
"/Applications/MacVim.app/Contents/bin"
];
buildInputs = [ makeWrapper ];
# We need to do surgery on the resulting app. We can't just make a wrapper for vim because this
# is a GUI app. We need to copy the actual GUI executable image as AppKit uses the loaded image's
# path to locate the bundle. We can use symlinks for other executables and resources though.
postBuild = ''
# Replace the Contents/MacOS/MacVim symlink with the original file
target=$(readlink $out/Applications/MacVim.app/Contents/MacOS/MacVim)
rm $out/Applications/MacVim.app/Contents/MacOS/MacVim
cp -a -t $out/Applications/MacVim.app/Contents/MacOS "$target"
# Wrap the Vim binary for our vimrc
wrapProgram $out/Applications/MacVim.app/Contents/MacOS/Vim \
--add-flags "-u ${vimUtils.vimrcFile vimrcConfig}"
# Replace each symlink in bin/ with the original. Most of them point at other symlinks
# and we need those original symlinks to point into our new app bundle.
for prefix in bin Applications/MacVim.app/Contents/bin; do
for link in $out/$prefix/*; do
target=$(readlink "$link")
# don't copy binaries like vimtutor, but we do need mvim
[ -L "$target" ] || [ "$(basename "$target")" = mvim ] || continue;
rm "$link"
cp -a -t $out/$prefix "$target"
done
done
'';
meta = macvim.meta;
};
in lib.makeOverridable (lib.setFunctionArgs doConfig (lib.functionArgs vimUtils.vimrcFile));
override = f: makeCustomizable (macvim.override f);
overrideAttrs = f: makeCustomizable (macvim.overrideAttrs f);
};
in
makeCustomizable macvim

View file

@ -27,13 +27,13 @@ in
stdenv.mkDerivation {
pname = "macvim";
version = "8.2.539";
version = "8.2.1719";
src = fetchFromGitHub {
owner = "macvim-dev";
repo = "macvim";
rev = "snapshot-163";
sha256 = "0ibc6h7zmk81dygkxd8a2rcq72zbqmr9kh64xhsm9h0p70505cdk";
rev = "snapshot-166";
sha256 = "1p51q59l1dl5lnf1ms960lm8zfg39p8xq0pdjw6wdyypjj3r8v3v";
};
enableParallelBuilding = true;
@ -68,8 +68,6 @@ stdenv.mkDerivation {
"--disable-sparkle"
];
makeFlags = ''PREFIX=$(out) CPPFLAGS="-Wno-error"'';
# Remove references to Sparkle.framework from the project.
# It's unused (we disabled it with --disable-sparkle) and this avoids
# copying the unnecessary several-megabyte framework into the result.
@ -85,7 +83,10 @@ stdenv.mkDerivation {
DEV_DIR=$(/usr/bin/xcode-select -print-path)/Platforms/MacOSX.platform/Developer
configureFlagsArray+=(
"--with-developer-dir=$DEV_DIR"
--with-developer-dir="$DEV_DIR"
LDFLAGS="-L${ncurses}/lib"
CPPFLAGS="-isystem ${ncurses.dev}/include"
CFLAGS="-Wno-error=implicit-function-declaration"
)
''
# For some reason having LD defined causes PSMTabBarControl to fail at link-time as it

View file

@ -1,5 +1,5 @@
diff --git a/src/MacVim/vimrc b/src/MacVim/vimrc
index 23a06bf..dfb10fe 100644
index af43549..dfb10fe 100644
--- a/src/MacVim/vimrc
+++ b/src/MacVim/vimrc
@@ -14,35 +14,5 @@ set backspace+=indent,eol,start
@ -29,22 +29,22 @@ index 23a06bf..dfb10fe 100644
-" or an installation from python.org:
-if exists("&pythonthreedll") && exists("&pythonthreehome") &&
- \ !filereadable(&pythonthreedll)
- if filereadable("/opt/local/Library/Frameworks/Python.framework/Versions/3.7/Python")
- " MacPorts python 3.7
- set pythonthreedll=/opt/local/Library/Frameworks/Python.framework/Versions/3.7/Python
- elseif filereadable("/Library/Frameworks/Python.framework/Versions/3.7/Python")
- if filereadable("/opt/local/Library/Frameworks/Python.framework/Versions/3.8/Python")
- " MacPorts python 3.8
- set pythonthreedll=/opt/local/Library/Frameworks/Python.framework/Versions/3.8/Python
- elseif filereadable("/Library/Frameworks/Python.framework/Versions/3.8/Python")
- " https://www.python.org/downloads/mac-osx/
- set pythonthreedll=/Library/Frameworks/Python.framework/Versions/3.7/Python
- set pythonthreedll=/Library/Frameworks/Python.framework/Versions/3.8/Python
- endif
-endif
-
+" Default cscopeprg to the Nix-installed path
+set cscopeprg=@CSCOPE@
diff --git a/src/Makefile b/src/Makefile
index 24c6934..d0f094e 100644
index fd2d5e1..37a6d6a 100644
--- a/src/Makefile
+++ b/src/Makefile
@@ -1407,7 +1407,7 @@ MACVIMGUI_SRC = gui.c gui_beval.c MacVim/gui_macvim.m MacVim/MMBackend.m \
@@ -1397,7 +1397,7 @@ MACVIMGUI_SRC = gui.c gui_beval.c MacVim/gui_macvim.m MacVim/MMBackend.m \
MacVim/MacVim.m
MACVIMGUI_OBJ = objects/gui.o objects/gui_beval.o \
objects/gui_macvim.o objects/MMBackend.o objects/MacVim.o
@ -54,10 +54,10 @@ index 24c6934..d0f094e 100644
MACVIMGUI_LIBS_DIR =
MACVIMGUI_LIBS1 = -framework Cocoa -framework Carbon
diff --git a/src/auto/configure b/src/auto/configure
index 730d6d5..0259112 100755
index 06257a5..68437df 100755
--- a/src/auto/configure
+++ b/src/auto/configure
@@ -5859,10 +5859,7 @@ $as_echo "not found" >&6; }
@@ -5872,10 +5872,7 @@ $as_echo "not found" >&6; }
for path in "${vi_cv_path_mzscheme_pfx}/lib" "${SCHEME_LIB}"; do
if test "X$path" != "X"; then
@ -69,7 +69,7 @@ index 730d6d5..0259112 100755
MZSCHEME_LIBS="${path}/libmzscheme3m.a"
MZSCHEME_CFLAGS="-DMZ_PRECISE_GC"
elif test -f "${path}/libracket3m.a"; then
@@ -6247,23 +6244,6 @@ $as_echo ">>> too old; need Perl version 5.003_01 or later <<<" >&6; }
@@ -6260,23 +6257,6 @@ $as_echo ">>> too old; need Perl version 5.003_01 or later <<<" >&6; }
fi
if test "x$MACOS_X" = "xyes"; then
@ -93,7 +93,7 @@ index 730d6d5..0259112 100755
PERL_LIBS=`echo "$PERL_LIBS" | sed -e 's/-arch\ ppc//' -e 's/-arch\ i386//' -e 's/-arch\ x86_64//'`
PERL_CFLAGS=`echo "$PERL_CFLAGS" | sed -e 's/-arch\ ppc//' -e 's/-arch\ i386//' -e 's/-arch\ x86_64//'`
fi
@@ -6486,13 +6466,7 @@ __:
@@ -6499,13 +6479,7 @@ __:
eof
eval "`cd ${PYTHON_CONFDIR} && make -f "${tmp_mkf}" __ | sed '/ directory /d'`"
rm -f -- "${tmp_mkf}"
@ -108,7 +108,7 @@ index 730d6d5..0259112 100755
vi_cv_path_python_plibs="-L${PYTHON_CONFDIR} -lpython${vi_cv_var_python_version}"
if test -n "${python_LINKFORSHARED}" && test -n "${python_PYTHONFRAMEWORKPREFIX}"; then
python_link_symbol=`echo ${python_LINKFORSHARED} | sed 's/\([^ \t][^ \t]*[ \t][ \t]*[^ \t][^ \t]*\)[ \t].*/\1/'`
@@ -6507,7 +6481,6 @@ eof
@@ -6520,7 +6494,6 @@ eof
fi
vi_cv_path_python_plibs="${vi_cv_path_python_plibs} ${python_BASEMODLIBS} ${python_LIBS} ${python_SYSLIBS} ${python_LINKFORSHARED}"
vi_cv_path_python_plibs=`echo $vi_cv_path_python_plibs | sed s/-ltermcap//`
@ -116,7 +116,7 @@ index 730d6d5..0259112 100755
fi
@@ -6586,13 +6559,6 @@ rm -f core conftest.err conftest.$ac_objext \
@@ -6599,13 +6572,6 @@ rm -f core conftest.err conftest.$ac_objext \
$as_echo "no" >&6; }
fi
@ -130,19 +130,19 @@ index 730d6d5..0259112 100755
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compile and link flags for Python are sane" >&5
$as_echo_n "checking if compile and link flags for Python are sane... " >&6; }
cflags_save=$CFLAGS
@@ -7486,11 +7452,7 @@ $as_echo "$tclver - OK" >&6; };
@@ -7499,11 +7465,7 @@ $as_echo "$tclver - OK" >&6; };
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for location of Tcl include" >&5
$as_echo_n "checking for location of Tcl include... " >&6; }
- if test "x$MACOS_X" != "xyes"; then
tclinc="$tclloc/include $tclloc/include/tcl $tclloc/include/tcl$tclver /usr/local/include /usr/local/include/tcl$tclver /usr/include /usr/include/tcl$tclver"
- else
- tclinc="/System/Library/Frameworks/Tcl.framework/Headers"
- tclinc="$tclloc/include $tclloc/include/tcl $tclloc/include/tcl$tclver /System/Library/Frameworks/Tcl.framework/Headers `xcrun --show-sdk-path`/System/Library/Frameworks/Tcl.framework/Versions/Current/Headers"
- fi
TCL_INC=
for try in $tclinc; do
if test -f "$try/tcl.h"; then
@@ -7508,12 +7470,8 @@ $as_echo "<not found>" >&6; }
@@ -7521,13 +7483,8 @@ $as_echo "<not found>" >&6; }
if test -z "$SKIP_TCL"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for location of tclConfig.sh script" >&5
$as_echo_n "checking for location of tclConfig.sh script... " >&6; }
@ -150,12 +150,13 @@ index 730d6d5..0259112 100755
tclcnf=`echo $tclinc | sed s/include/lib/g`
tclcnf="$tclcnf `echo $tclinc | sed s/include/lib64/g`"
- else
- tclcnf="/System/Library/Frameworks/Tcl.framework"
- tclcnf=`echo $tclinc | sed s/include/lib/g`
- tclcnf="$tclcnf /System/Library/Frameworks/Tcl.framework `xcrun --show-sdk-path`/System/Library/Frameworks/Tcl.framework"
- fi
for try in $tclcnf; do
if test -f "$try/tclConfig.sh"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $try/tclConfig.sh" >&5
@@ -7703,10 +7661,6 @@ $as_echo "$rubyhdrdir" >&6; }
@@ -7717,10 +7674,6 @@ $as_echo "$rubyhdrdir" >&6; }
if test -f "$rubylibdir/$librubya"; then
librubyarg="$librubyarg"
RUBY_LIBS="$RUBY_LIBS -L$rubylibdir"
@ -167,10 +168,10 @@ index 730d6d5..0259112 100755
if test "X$librubyarg" != "X"; then
diff --git a/src/vim.h b/src/vim.h
index 87d1c92..8a7d5a5 100644
index bbc01ee..5a93591 100644
--- a/src/vim.h
+++ b/src/vim.h
@@ -250,17 +250,6 @@
@@ -244,17 +244,6 @@
# define SUN_SYSTEM
#endif

View file

@ -11,8 +11,8 @@ let
archive_fmt = if system == "x86_64-darwin" then "zip" else "tar.gz";
sha256 = {
x86_64-linux = "1kgvwcwkdvywsiyg86srfzcq6jcas6hyi9ds4qvndsnd64j0fgkn";
x86_64-darwin = "03jci05psxkknpjrrgjpdxsii2xyf5cfpkhrp5nnfafb5acfvs1x";
x86_64-linux = "103p0daf13zsqz2481pw6zpr6n0vaf57dq89i4djcm449i9c959i";
x86_64-darwin = "0bxggvi0wzsy801iylszqp8rv6kij6j2v05b6qyf6af7j3cmd1qf";
}.${system};
in
callPackage ./generic.nix rec {
@ -21,7 +21,7 @@ in
# Please backport all compatible updates to the stable release.
# This is important for the extension ecosystem.
version = "1.49.1";
version = "1.49.3";
pname = "vscode";
executableName = "code" + lib.optionalString isInsiders "-insiders";

View file

@ -11,8 +11,8 @@ let
archive_fmt = if system == "x86_64-darwin" then "zip" else "tar.gz";
sha256 = {
x86_64-linux = "10v13j1zg1bpgmr99vqhs1gwcipvnbkln0w6yphwn9440fw9fyp4";
x86_64-darwin = "1mgi2awrqsm11l1yb8rgmfrxvjfn9z3qvp5np76vgbaibq2ihh0k";
x86_64-linux = "19y25yfkls53w4qlmipfvjig7zykgwx1010ny58k339fv181vdyq";
x86_64-darwin = "1ak3pfvwdg51hcv2kyqpnhzkl7k23k5qk197sf6rv02kgwan7pxl";
}.${system};
sourceRoot = {
@ -27,7 +27,7 @@ in
# Please backport all compatible updates to the stable release.
# This is important for the extension ecosystem.
version = "1.49.1";
version = "1.49.3";
pname = "vscodium";
executableName = "codium";

View file

@ -10,6 +10,7 @@ let
else if stdenv.hostPlatform.system == "x86_64-linux" || stdenv.hostPlatform.system == "x86_64-darwin" then "x86-64"
else if stdenv.hostPlatform.system == "armv7l-linux" then "armv7l"
else if stdenv.hostPlatform.system == "aarch64-linux" then "aarch64"
else if stdenv.hostPlatform.system == "powerpc64le-linux" then "ppc64le"
else throw "ImageMagick is not supported on this platform.";
cfg = {

View file

@ -10,11 +10,12 @@ let
else if stdenv.hostPlatform.system == "x86_64-linux" || stdenv.hostPlatform.system == "x86_64-darwin" then "x86-64"
else if stdenv.hostPlatform.system == "armv7l-linux" then "armv7l"
else if stdenv.hostPlatform.system == "aarch64-linux" then "aarch64"
else if stdenv.hostPlatform.system == "powerpc64le-linux" then "ppc64le"
else throw "ImageMagick is not supported on this platform.";
cfg = {
version = "6.9.11-14";
sha256 = "0x51vf48g75cfp0mbwf3ckmlwa6v00592xx3gvrqzjzx7vlayjyg";
version = "6.9.11-29";
sha256 = "0adjdpi91ya0g3v7y503n95833m25aibkim5swg5lnwjrba407hg";
patches = [];
}
# Freeze version on mingw so we don't need to port the patch too often.

View file

@ -26,12 +26,15 @@
, exiv2
, ffmpeg
, flex
, graphviz
, imagemagick
, lcms2
, lensfun
, libgphoto2
, libkipi
, libksane
, liblqr1
, libqtav
, libusb1
, marble
, libGL
@ -39,22 +42,24 @@
, opencv3
, pcre
, threadweaver
, x265
# For panorama and focus stacking
, enblend-enfuse
, hugin
, gnumake
, breeze-icons
, oxygen
}:
mkDerivation rec {
pname = "digikam";
version = "6.4.0";
version = "7.1.0";
src = fetchurl {
url = "https://download.kde.org/stable/${pname}/${version}/${pname}-${version}.tar.xz";
sha256 = "0vwd97zkxv30y8x0z76s4fsj4w9ysgsmpjclp2h2bpava7zi4l3p";
url = "mirror://kde/stable/${pname}/${version}/${pname}-${version}.tar.xz";
sha256 = "1gmblnsm0aida3yynyddm6jdh59hx3w177hrhfar616z793ch0xi";
};
nativeBuildInputs = [ cmake doxygen extra-cmake-modules kdoctools wrapGAppsHook ];
@ -66,17 +71,21 @@ mkDerivation rec {
exiv2
ffmpeg
flex
graphviz
imagemagick
lcms2
lensfun
libgphoto2
libkipi
libksane
liblqr1
libqtav
libusb1
libGL
libGLU
opencv3
pcre
x265
qtbase
qtxmlpatterns
@ -94,6 +103,7 @@ mkDerivation rec {
kwidgetsaddons
kxmlgui
breeze-icons
marble
oxygen
threadweaver

Some files were not shown because too many files have changed in this diff Show more